lollms-webui/configs/default.yaml

26 lines
598 B
YAML
Raw Normal View History

2023-04-14 09:58:07 +00:00
config: default
ctx_size: 512
db_path: databases/database.db
2023-04-14 09:58:07 +00:00
debug: false
n_threads: 8
2023-04-14 09:58:07 +00:00
host: localhost
language: en-US
2023-04-20 17:30:03 +00:00
# Supported backends are llamacpp and gpt-j
backend: llama_cpp
model: gpt4all-lora-quantized-ggml.bin
n_predict: 1024
2023-04-14 09:58:07 +00:00
nb_messages_to_remember: 5
2023-04-20 17:30:03 +00:00
personality_language: english
personality_category: general
2023-04-30 20:40:19 +00:00
personality: gpt4all
2023-04-14 09:58:07 +00:00
port: 9600
repeat_last_n: 40
repeat_penalty: 1.2
2023-04-12 20:36:03 +00:00
seed: 0
2023-04-17 11:11:46 +00:00
temp: 0.9
top_k: 50
2023-04-12 20:36:03 +00:00
top_p: 0.95
2023-04-14 09:58:07 +00:00
voice: ""
2023-04-14 21:29:11 +00:00
use_gpu: false # Not active yet
2023-04-20 17:30:03 +00:00
auto_read: false
2023-04-23 22:19:15 +00:00
use_avx2: true # By default we require using avx2 but if not supported, make sure you remove it from here