lollms-webui/configs/config.yaml
Saifeddine ALOUI 0d3de00af7 new version
2024-05-03 00:58:18 +02:00

191 lines
5.5 KiB
YAML

# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
version: 88
binding_name: null
model_name: null
model_variant: null
model_type: null
show_news_panel: true
# Security measures
turn_on_setting_update_validation: true
turn_on_code_execution: true
turn_on_code_validation: true
turn_on_open_file_validation: true
turn_on_send_file_validation: true
turn_on_language_validation: true
force_accept_remote_access: false
# Server information
headless_server_mode: false
allowed_origins: []
# Host information
host: localhost
port: 9600
app_custom_logo: ""
# Genreration parameters
discussion_prompt_separator: "!@>"
seed: -1
ctx_size: 4084
max_n_predict: 4096
min_n_predict: 512
temperature: 0.9
top_k: 50
top_p: 0.95
repeat_last_n: 40
repeat_penalty: 1.2
num_experts_per_token: 2
n_threads: 8
#Personality parameters
personalities: ["generic/lollms"]
active_personality_id: 0
override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour)
extensions: []
user_name: user
user_description: ""
use_user_name_in_discussions: false
use_model_name_in_discussions: false
user_avatar: default_user.svg
use_user_informations_in_discussion: false
# UI parameters
discussion_db_name: default
# Automatic updates
debug: false
debug_log_file_path: ""
auto_update: true
auto_sync_personalities: true
auto_sync_extensions: true
auto_sync_bindings: true
auto_sync_models: true
auto_save: true
auto_title: false
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
hardware_mode: nvidia-tensorcores
# Automatically open the browser
auto_show_browser: true
# copy to clipboard
copy_to_clipboard_add_all_details: false
# Voice service
enable_voice_service: false
xtts_base_url: http://localhost:8020
xtts_use_deepspeed: false
xtts_use_streaming_mode: true
auto_read: false
current_voice: null
current_language: en
# Image generation service
enable_sd_service: false
sd_base_url: http://localhost:7860
# Image generation service comfyui
enable_comfyui_service: false
comfyui_base_url: http://127.0.0.1:8188/
# Motion control service
enable_motion_ctrl_service: false
motion_ctrl_base_url: http://localhost:7861
# ollama service
enable_ollama_service: false
ollama_base_url: http://localhost:11434
# petals service
enable_petals_service: false
petals_base_url: http://localhost:8064
petals_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
petals_device: cuda
# lollms service
enable_lollms_service: false
lollms_base_url: http://localhost:1234
lollms_access_keys : "" # set a list of keys separated by coma to restrict access
activate_lollms_server: true
activate_ollama_emulator: true
activate_openai_emulator: true
activate_mistralai_emulator: true
# elastic search service
elastic_search_service: false
elastic_search_url: http://localhost:9200
# vll service
enable_vllm_service: false
vllm_url: http://localhost:8000
vllm_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
vllm_gpu_memory_utilization: 0.9
vllm_max_model_len: 4096
vllm_max_num_seqs: 256
# Audio
media_on: false
audio_in_language: 'en-US'
auto_speak: false
audio_out_voice: null
audio_pitch: 1
audio_auto_send_input: true
audio_silenceTimer: 5000
# Data vectorization
activate_skills_lib: false # Activate vectorizing previous conversations
skills_lib_database_name: "default" # Default skills database
summerize_discussion: false # activate discussion summary (better but adds computation time)
max_summary_size: 512 # in tokens
data_vectorization_visualize_on_vectorization: false
use_files: true # Activate using files
data_vectorization_activate: true # To activate/deactivate data vectorization
data_vectorization_method: "tfidf_vectorizer" #"model_embedding" or "tfidf_vectorizer"
data_visualization_method: "PCA" #"PCA" or "TSNE"
data_vectorization_sentense_transformer_model: "all-MiniLM-L6-v2" # you can use another model by setting its name here or its path
data_vectorization_save_db: true # For each new session, new files
data_vectorization_chunk_size: 512 # chunk size
data_vectorization_overlap_size: 128 # overlap between chunks size
data_vectorization_nb_chunks: 2 # number of chunks to use
data_vectorization_put_chunk_informations_into_context: false # if true then each chunk will be preceded by its information which may waste some context space but allow the ai to point where it found th einformation
data_vectorization_build_keys_words: true # If true, when querrying the database, we use keywords generated from the user prompt instead of the prompt itself.
data_vectorization_force_first_chunk: false # If true, the first chunk of the document will systematically be used
data_vectorization_make_persistance: false # If true, the data will be persistant webween runs
# Activate internet search
activate_internet_search: false
internet_vectorization_chunk_size: 512 # chunk size
internet_vectorization_overlap_size: 128 # overlap between chunks size
internet_vectorization_nb_chunks: 2 # number of chunks to use
internet_nb_search_pages: 3 # number of pages to select
internet_quick_search: false # If active the search engine will not load and read the webpages
internet_activate_search_decision: false # If active the ai decides by itself if it needs to do search
# Helpers
pdf_latex_path: null
# boosting information
positive_boost: null
negative_boost: null
current_language: english
fun_mode: false
# webui configurations
show_code_of_conduct: true
activate_audio_infos: true
# whisper configuration
whisper_model: base