lollms-webui/configs/config.yaml

342 lines
11 KiB
YAML
Raw Normal View History

2024-01-27 18:44:58 +00:00
# =================== Lord Of Large Language Multimodal Systems Configuration file ===========================
2024-11-17 23:29:18 +00:00
version: 140
2023-06-25 19:06:47 +00:00
binding_name: null
2023-06-15 19:19:19 +00:00
model_name: null
2024-02-24 01:49:04 +00:00
model_variant: null
model_type: null
2023-06-04 23:21:12 +00:00
2024-04-29 16:52:35 +00:00
show_news_panel: true
2024-02-16 21:44:44 +00:00
2024-02-17 23:14:52 +00:00
# Security measures
2024-04-29 16:52:35 +00:00
turn_on_setting_update_validation: true
turn_on_code_execution: true
turn_on_code_validation: true
turn_on_open_file_validation: true
turn_on_send_file_validation: true
2024-05-01 18:02:57 +00:00
turn_on_language_validation: true
2024-02-15 00:31:16 +00:00
2024-02-17 23:14:52 +00:00
force_accept_remote_access: false
2024-02-16 21:44:44 +00:00
# Server information
2024-04-29 16:52:35 +00:00
headless_server_mode: false
2024-02-14 23:35:04 +00:00
allowed_origins: []
2023-07-19 15:41:23 +00:00
2023-06-04 23:21:12 +00:00
# Host information
2023-04-14 09:58:07 +00:00
host: localhost
port: 9600
2023-06-04 23:21:12 +00:00
2024-05-01 18:02:57 +00:00
app_custom_logo: ""
2023-06-04 23:21:12 +00:00
# Genreration parameters
2023-07-19 15:41:23 +00:00
discussion_prompt_separator: "!@>"
2024-05-29 00:53:18 +00:00
start_header_id_template: "!@>"
end_header_id_template: ": "
2024-05-30 23:33:34 +00:00
2024-05-29 00:53:18 +00:00
separator_template: "\n"
2024-05-30 23:33:34 +00:00
start_user_header_id_template: "!@>"
end_user_header_id_template: ": "
end_user_message_id_template: ""
start_ai_header_id_template: "!@>"
end_ai_header_id_template: ": "
end_ai_message_id_template: ""
2024-05-29 00:53:18 +00:00
system_message_template: "system"
2024-07-21 19:50:34 +00:00
use_continue_message: true
2024-05-29 00:53:18 +00:00
2023-05-07 01:44:42 +00:00
seed: -1
2023-10-20 21:42:08 +00:00
ctx_size: 4084
2024-11-24 17:27:28 +00:00
max_n_predict: None
2024-05-09 10:07:39 +00:00
min_n_predict: 1024
temperature: 0.9
top_k: 50
2023-04-12 20:36:03 +00:00
top_p: 0.95
2023-06-04 23:21:12 +00:00
repeat_last_n: 40
repeat_penalty: 1.2
2024-04-14 20:50:45 +00:00
num_experts_per_token: 2
2023-06-04 23:21:12 +00:00
n_threads: 8
#Personality parameters
2023-08-17 23:29:53 +00:00
personalities: ["generic/lollms"]
2023-06-08 06:58:02 +00:00
active_personality_id: 0
override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour)
2023-06-04 23:21:12 +00:00
2023-10-02 23:13:02 +00:00
extensions: []
2023-06-04 23:21:12 +00:00
user_name: user
2023-07-16 16:57:30 +00:00
user_description: ""
use_user_name_in_discussions: false
use_model_name_in_discussions: false
2024-05-09 09:47:02 +00:00
user_avatar: null
2023-09-12 23:43:23 +00:00
use_user_informations_in_discussion: false
2023-06-04 23:21:12 +00:00
# UI parameters
2024-02-18 23:23:15 +00:00
discussion_db_name: default
2023-07-16 16:57:30 +00:00
2023-08-26 23:16:46 +00:00
# Automatic updates
2024-04-29 16:52:35 +00:00
debug: false
2024-06-27 22:39:42 +00:00
debug_show_final_full_prompt: false
debug_show_chunks: false
2024-01-27 21:02:31 +00:00
debug_log_file_path: ""
2023-08-17 23:29:53 +00:00
auto_update: true
2024-01-15 00:10:40 +00:00
auto_sync_personalities: true
auto_sync_extensions: true
auto_sync_bindings: true
auto_sync_models: true
2023-08-26 23:16:46 +00:00
auto_save: true
2023-11-26 01:33:25 +00:00
auto_title: false
2024-01-04 02:57:42 +00:00
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
hardware_mode: nvidia-tensorcores
2023-08-31 21:05:16 +00:00
# Automatically open the browser
auto_show_browser: true
2023-07-21 21:59:24 +00:00
2024-02-08 22:04:27 +00:00
# copy to clipboard
copy_to_clipboard_add_all_details: false
2024-05-18 18:56:11 +00:00
# -------------------- Services global configurations --------------------------
# Select the active test to speach, text to image and speach to text services
2024-09-19 23:46:53 +00:00
active_tts_service: "None" # xtts (offline), openai_tts (API key required), elevenlabs_tts, fish_tts (API key required)
2024-08-29 12:51:55 +00:00
active_tti_service: "None" # autosd (offline), diffusers (offline), diffusers_client (online), dall-e (online), midjourney (online)
2024-05-18 18:56:11 +00:00
active_stt_service: "None" # whisper (offline), asr (offline or online), openai_whiosper (API key required)
2024-05-19 08:09:36 +00:00
active_ttm_service: "None" # musicgen (offline)
2024-09-29 08:17:55 +00:00
active_ttv_service: "None" # cog_video_x, diffusers, lumalab (offline)
2024-05-18 18:56:11 +00:00
# -------------------- Services --------------------------
# ***************** STT *****************
2024-05-24 23:41:28 +00:00
stt_input_device: 0
2024-05-26 22:36:45 +00:00
# STT service
2024-05-23 20:56:01 +00:00
stt_listening_threshold: 1000
stt_silence_duration: 2
stt_sound_threshold_percentage: 10
stt_gain: 1.0
stt_rate: 44100
stt_channels: 1
stt_buffer_size: 10
2024-05-26 22:36:45 +00:00
stt_activate_word_detection: false
stt_word_detection_file: null
2024-05-23 20:56:01 +00:00
# ASR STT service
2024-05-08 10:46:46 +00:00
asr_enable: false
asr_base_url: http://localhost:9000
2024-05-18 18:56:11 +00:00
# openai_whisper configuration
openai_whisper_key: ""
openai_whisper_model: "whisper-1"
# whisper configuration
whisper_activate: false
whisper_model: base
# ***************** TTS *****************
2024-05-24 23:41:28 +00:00
tts_output_device: 0
2023-12-29 22:38:30 +00:00
# Voice service
auto_read: false
2024-05-04 23:17:41 +00:00
xtts_current_voice: null
xtts_current_language: en
2024-05-05 18:57:11 +00:00
xtts_stream_chunk_size: 100
xtts_temperature: 0.75
xtts_length_penalty: 1.0
xtts_repetition_penalty: 5.0
xtts_top_k: 50
xtts_top_p: 0.85
xtts_speed: 1
xtts_enable_text_splitting: true
2024-07-17 23:32:20 +00:00
xtts_freq: 22050
2023-12-29 22:38:30 +00:00
2024-05-18 18:56:11 +00:00
# openai_whisper configuration
openai_tts_key: ""
openai_tts_model: "tts-1"
openai_tts_voice: "alloy"
2024-07-30 23:08:35 +00:00
elevenlabs_tts_key: ""
2024-09-12 23:19:22 +00:00
elevenlabs_tts_model_id: "eleven_turbo_v2_5"
2024-07-30 23:08:35 +00:00
elevenlabs_tts_voice_stability: 0.5
elevenlabs_tts_voice_boost: 0.5
elevenlabs_tts_voice_id: EXAVITQu4vr4xnSDxMaL
2024-09-19 23:46:53 +00:00
fish_tts_key: ""
fish_tts_voice: "default"
2024-05-18 18:56:11 +00:00
# ***************** TTI *****************
2024-06-02 18:44:41 +00:00
use_negative_prompt: true
use_ai_generated_negative_prompt: false
negative_prompt_generation_prompt: Generate negative prompt for the following prompt. negative prompt is a set of words that describe things we do not want to have in the generated image.
2024-06-04 22:29:40 +00:00
default_negative_prompt: (((text))), (((ugly))), (((duplicate))), ((morbid)), ((mutilated)), out of frame, extra fingers, mutated hands, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), (((deformed))), blurry, ((bad anatomy)), (((bad proportions))), ((extra limbs)), cloned face, (((disfigured))), ((extra arms)), (((extra legs))), mutated hands, (fused fingers), (too many fingers), (((long neck))), ((watermark)), ((robot eyes))
2024-06-02 18:44:41 +00:00
2024-01-03 00:41:01 +00:00
# Image generation service
enable_sd_service: false
2024-02-20 22:52:32 +00:00
sd_base_url: http://localhost:7860
2024-01-03 00:41:01 +00:00
2024-05-30 23:33:34 +00:00
# Image generation service
enable_fooocus_service: false
fooocus_base_url: http://localhost:7860
2024-08-29 12:51:55 +00:00
# diffusers
2024-05-27 22:00:00 +00:00
diffusers_offloading_mode: sequential_cpu_offload # sequential_cpu_offload
2024-08-29 12:51:55 +00:00
diffusers_model: v2ray/stable-diffusion-3-medium-diffusers
# diffusers client
diffusers_client_base_url: http://localhost:8593
2024-05-27 22:00:00 +00:00
2024-05-13 21:58:19 +00:00
# Dall e service key
dall_e_key: ""
dall_e_generation_engine: "dall-e-3"
2024-05-18 18:56:11 +00:00
# Midjourney service key
midjourney_key: ""
2024-07-03 22:06:53 +00:00
midjourney_timeout: 300
midjourney_retries: 1
2024-05-13 21:58:19 +00:00
2024-03-17 22:05:56 +00:00
# Image generation service comfyui
enable_comfyui_service: false
comfyui_base_url: http://127.0.0.1:8188/
2024-06-06 23:34:22 +00:00
comfyui_model: v1-5-pruned-emaonly.ckpt
2024-03-17 22:05:56 +00:00
2024-02-27 16:06:22 +00:00
# Motion control service
enable_motion_ctrl_service: false
motion_ctrl_base_url: http://localhost:7861
2024-08-29 00:22:36 +00:00
# ***************** TTV *****************
cog_video_x_model: "THUDM/CogVideoX-5b"
2024-09-29 08:17:55 +00:00
# lumalabs configuration
lumalabs_key: ""
2024-05-18 18:56:11 +00:00
# ***************** TTT *****************
2024-01-11 01:32:21 +00:00
# ollama service
enable_ollama_service: false
2024-02-20 22:52:32 +00:00
ollama_base_url: http://localhost:11434
2024-01-11 01:32:21 +00:00
2024-01-13 11:16:58 +00:00
# petals service
enable_petals_service: false
2024-02-19 23:53:41 +00:00
petals_base_url: http://localhost:8064
petals_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
2024-02-21 00:09:44 +00:00
petals_device: cuda
2024-01-13 11:16:58 +00:00
2024-01-13 23:37:43 +00:00
# lollms service
enable_lollms_service: false
2024-08-06 09:47:49 +00:00
lollms_access_keys : [] # set a list of keys separated by coma to restrict access
2024-04-29 16:52:35 +00:00
activate_lollms_server: true
2024-08-06 09:47:49 +00:00
activate_lollms_rag_server: true
activate_lollms_tts_server: true
activate_lollms_stt_server: true
activate_lollms_tti_server: true
activate_lollms_itt_server: true
activate_lollms_ttm_server: true
2024-04-29 16:52:35 +00:00
activate_ollama_emulator: true
activate_openai_emulator: true
activate_mistralai_emulator: true
2024-01-13 23:37:43 +00:00
2024-08-06 09:47:49 +00:00
use_smart_routing: false
smart_routing_router_model : ""
2024-11-17 23:29:18 +00:00
smart_routing_models_description : {}
2024-08-06 10:19:34 +00:00
restore_model_after_smart_routing : false
2024-08-06 09:47:49 +00:00
2024-01-25 00:10:11 +00:00
# elastic search service
elastic_search_service: false
2024-02-19 23:53:41 +00:00
elastic_search_url: http://localhost:9200
2024-01-25 00:10:11 +00:00
# vll service
2024-02-20 22:52:32 +00:00
enable_vllm_service: false
2024-02-19 23:53:41 +00:00
vllm_url: http://localhost:8000
vllm_model_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0
2024-02-21 00:09:44 +00:00
vllm_gpu_memory_utilization: 0.9
vllm_max_model_len: 4096
vllm_max_num_seqs: 256
2024-01-25 00:10:11 +00:00
2023-07-21 21:59:24 +00:00
# Audio
2023-12-26 01:46:50 +00:00
media_on: false
audio_in_language: 'en-US'
2023-07-22 01:17:30 +00:00
auto_speak: false
2023-12-28 15:57:59 +00:00
audio_out_voice: null
2023-07-27 23:16:26 +00:00
audio_pitch: 1
2023-08-26 23:16:46 +00:00
audio_auto_send_input: true
audio_silenceTimer: 5000
2023-08-23 02:21:58 +00:00
# Data vectorization
2024-06-17 23:08:15 +00:00
rag_databases: [] # This is the list of paths to database sources. Each database is a folder containing data
2024-09-25 22:46:31 +00:00
rag_vectorizer: tfidf # possible values semantic, tfidf, openai
rag_vectorizer_model: sentence-transformers/bert-base-nli-mean-tokens # The model name if applicable
2024-06-23 20:27:27 +00:00
rag_vectorizer_parameters: null # Parameters of the model in json format
2024-06-18 22:41:31 +00:00
rag_chunk_size: 512 # number of tokens per chunk
2024-06-28 00:45:21 +00:00
rag_overlap: 0 # number of tokens of overlap
2024-06-19 00:46:24 +00:00
rag_n_chunks: 4 #Number of chunks to recover from the database
2024-06-23 20:27:27 +00:00
rag_clean_chunks: true #Removed all uinecessary spaces and line returns
rag_follow_subfolders: true #if true the vectorizer will vectorize the content of subfolders too
rag_check_new_files_at_startup: false #if true, the vectorizer will automatically check for any new files in the folder and adds it to the database
rag_preprocess_chunks: false #if true, an LLM will preprocess the content of the chunk before writing it in a simple format
2024-06-25 23:27:29 +00:00
rag_activate_multi_hops: false #if true, we use multi hops algorithm to do multiple researches until the AI has enough data
rag_min_nb_tokens_in_chunk: 10 #this removed any useless junk ith less than x tokens
2024-06-27 21:05:51 +00:00
rag_max_n_hops: 3 #We set the maximum number of hop in multi hops rag
2024-08-07 15:15:59 +00:00
rag_deactivate: false # if you have a large context model, you can activate this to use your document as a whole
rag_vectorizer_openai_key: "" # The open ai key (if not provided, this will use the environment varaible OPENAI_API_KEY)
2024-08-07 15:15:59 +00:00
2024-06-30 23:02:24 +00:00
contextual_summary: false #If activated this will completely replace the rag and instead will use contextual summary
2024-06-08 23:42:06 +00:00
2024-02-26 21:59:03 +00:00
activate_skills_lib: false # Activate vectorizing previous conversations
skills_lib_database_name: "default" # Default skills database
2023-12-04 00:40:36 +00:00
max_summary_size: 512 # in tokens
2023-10-20 21:42:08 +00:00
data_vectorization_visualize_on_vectorization: false
2023-08-27 00:31:22 +00:00
data_vectorization_activate: true # To activate/deactivate data vectorization
2023-10-08 23:18:21 +00:00
data_vectorization_method: "tfidf_vectorizer" #"model_embedding" or "tfidf_vectorizer"
2023-08-23 02:21:58 +00:00
data_visualization_method: "PCA" #"PCA" or "TSNE"
2024-04-14 21:33:26 +00:00
data_vectorization_sentense_transformer_model: "all-MiniLM-L6-v2" # you can use another model by setting its name here or its path
2024-05-02 22:58:18 +00:00
data_vectorization_save_db: true # For each new session, new files
2023-08-23 02:21:58 +00:00
data_vectorization_chunk_size: 512 # chunk size
data_vectorization_overlap_size: 128 # overlap between chunks size
data_vectorization_nb_chunks: 2 # number of chunks to use
2024-04-16 23:00:57 +00:00
data_vectorization_put_chunk_informations_into_context: false # if true then each chunk will be preceded by its information which may waste some context space but allow the ai to point where it found th einformation
2024-04-14 21:33:26 +00:00
data_vectorization_build_keys_words: true # If true, when querrying the database, we use keywords generated from the user prompt instead of the prompt itself.
2023-12-22 15:41:01 +00:00
data_vectorization_force_first_chunk: false # If true, the first chunk of the document will systematically be used
data_vectorization_make_persistance: false # If true, the data will be persistant webween runs
2023-12-30 16:09:30 +00:00
2024-02-09 00:42:29 +00:00
# Activate internet search
activate_internet_search: false
2024-06-13 23:45:25 +00:00
activate_internet_pages_judgement: true
2024-02-10 10:33:09 +00:00
internet_vectorization_chunk_size: 512 # chunk size
2024-06-13 23:44:00 +00:00
internet_vectorization_overlap_size: 0 # overlap between chunks size
internet_vectorization_nb_chunks: 4 # number of chunks to use
internet_nb_search_pages: 8 # number of pages to select
2024-04-29 16:52:35 +00:00
internet_quick_search: false # If active the search engine will not load and read the webpages
internet_activate_search_decision: false # If active the ai decides by itself if it needs to do search
2023-12-30 16:09:30 +00:00
# Helpers
2024-01-03 00:41:01 +00:00
pdf_latex_path: null
# boosting information
positive_boost: null
negative_boost: null
2024-05-01 23:21:45 +00:00
current_language: english
2024-04-29 16:52:35 +00:00
fun_mode: false
2024-01-27 18:44:58 +00:00
# webui configurations
show_code_of_conduct: true
activate_audio_infos: true
2024-02-04 11:06:13 +00:00