lollms-webui/configs/config.yaml

110 lines
3.0 KiB
YAML
Raw Normal View History

2023-06-04 23:21:12 +00:00
# =================== Lord Of Large Language Models Configuration file ===========================
2024-01-13 23:37:43 +00:00
version: 45
2023-06-25 19:06:47 +00:00
binding_name: null
2023-06-15 19:19:19 +00:00
model_name: null
2023-06-04 23:21:12 +00:00
2023-08-26 23:16:46 +00:00
2023-07-19 15:41:23 +00:00
2023-06-04 23:21:12 +00:00
# Host information
2023-04-14 09:58:07 +00:00
host: localhost
port: 9600
2023-06-04 23:21:12 +00:00
# Genreration parameters
2023-07-19 15:41:23 +00:00
discussion_prompt_separator: "!@>"
2023-05-07 01:44:42 +00:00
seed: -1
2023-06-04 23:21:12 +00:00
n_predict: 1024
2023-10-20 21:42:08 +00:00
ctx_size: 4084
min_n_predict: 512
temperature: 0.9
top_k: 50
2023-04-12 20:36:03 +00:00
top_p: 0.95
2023-06-04 23:21:12 +00:00
repeat_last_n: 40
repeat_penalty: 1.2
n_threads: 8
#Personality parameters
2023-08-17 23:29:53 +00:00
personalities: ["generic/lollms"]
2023-06-08 06:58:02 +00:00
active_personality_id: 0
override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour)
2023-06-04 23:21:12 +00:00
2023-10-02 23:13:02 +00:00
extensions: []
2023-06-04 23:21:12 +00:00
user_name: user
2023-07-16 16:57:30 +00:00
user_description: ""
use_user_name_in_discussions: false
user_avatar: default_user.svg
2023-09-12 23:43:23 +00:00
use_user_informations_in_discussion: false
2023-06-04 23:21:12 +00:00
# UI parameters
2023-07-16 16:57:30 +00:00
db_path: database.db
2023-08-26 23:16:46 +00:00
# Automatic updates
debug: False
2023-08-17 23:29:53 +00:00
auto_update: true
2023-08-26 23:16:46 +00:00
auto_save: true
2023-11-26 01:33:25 +00:00
auto_title: false
2024-01-04 02:57:42 +00:00
# Install mode (cpu, cpu-noavx, nvidia-tensorcores, nvidia, amd-noavx, amd, apple-intel, apple-silicon)
hardware_mode: nvidia-tensorcores
2023-08-31 21:05:16 +00:00
# Automatically open the browser
auto_show_browser: true
2023-07-21 21:59:24 +00:00
2023-12-29 22:38:30 +00:00
# Voice service
enable_voice_service: false
xtts_base_url: http://127.0.0.1:8020
auto_read: false
current_voice: null
current_language: en
2024-01-03 00:41:01 +00:00
# Image generation service
enable_sd_service: false
sd_base_url: http://127.0.0.1:7860
2024-01-11 01:32:21 +00:00
# ollama service
enable_ollama_service: false
ollama_base_url: http://0.0.0.0:11434
2024-01-13 11:16:58 +00:00
# petals service
enable_petals_service: false
petals_base_url: http://0.0.0.0:8010
2024-01-13 23:37:43 +00:00
# lollms service
enable_lollms_service: false
lollms_base_url: http://0.0.0.0:1234
2023-07-21 21:59:24 +00:00
# Audio
2023-12-26 01:46:50 +00:00
media_on: false
audio_in_language: 'en-US'
2023-07-22 01:17:30 +00:00
auto_speak: false
2023-12-28 15:57:59 +00:00
audio_out_voice: null
2023-07-27 23:16:26 +00:00
audio_pitch: 1
2023-08-26 23:16:46 +00:00
audio_auto_send_input: true
audio_silenceTimer: 5000
2023-08-23 02:21:58 +00:00
# Data vectorization
2023-10-07 21:16:27 +00:00
use_discussions_history: false # Activate vectorizing previous conversations
2023-12-04 00:40:36 +00:00
summerize_discussion: false # activate discussion summary (better but adds computation time)
max_summary_size: 512 # in tokens
2023-10-20 21:42:08 +00:00
data_vectorization_visualize_on_vectorization: false
2023-08-27 00:31:22 +00:00
use_files: true # Activate using files
data_vectorization_activate: true # To activate/deactivate data vectorization
2023-10-08 23:18:21 +00:00
data_vectorization_method: "tfidf_vectorizer" #"model_embedding" or "tfidf_vectorizer"
2023-08-23 02:21:58 +00:00
data_visualization_method: "PCA" #"PCA" or "TSNE"
data_vectorization_save_db: False # For each new session, new files
data_vectorization_chunk_size: 512 # chunk size
data_vectorization_overlap_size: 128 # overlap between chunks size
data_vectorization_nb_chunks: 2 # number of chunks to use
2023-08-27 00:31:22 +00:00
data_vectorization_build_keys_words: false # If true, when querrying the database, we use keywords generated from the user prompt instead of the prompt itself.
2023-12-22 15:41:01 +00:00
data_vectorization_force_first_chunk: false # If true, the first chunk of the document will systematically be used
data_vectorization_make_persistance: false # If true, the data will be persistant webween runs
2023-12-30 16:09:30 +00:00
# Helpers
2024-01-03 00:41:01 +00:00
pdf_latex_path: null
# boosting information
positive_boost: null
negative_boost: null
force_output_language_to_be: null
2024-01-11 22:14:30 +00:00
fun_mode: False