lollms-webui/configs/default.yaml
2023-05-22 08:56:50 +02:00

32 lines
852 B
YAML

version: 4
user_name: user
config: default
ctx_size: 2048
n_gpu_layers: 20 #Depends on your GPU size
db_path: databases/database.db
debug: false
n_threads: 8
host: localhost
language: en-US
# Supported backends are llamacpp and gpt-j
backend: gpt_4all
model: null
n_predict: 1024
nb_messages_to_remember: 5
personality_language: english
personality_category: default
personality: gpt4all
port: 9600
repeat_last_n: 40
repeat_penalty: 1.2
seed: -1
temperature: 0.9
top_k: 50
top_p: 0.95
voice: ""
use_gpu: false # Not active yet
auto_read: false
use_avx2: true # By default we require using avx2 but if not supported, make sure you remove it from here
use_new_ui: true # By default use old ui
override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour)