2023-04-14 09:58:07 +00:00
|
|
|
config: default
|
|
|
|
ctx_size: 512
|
2023-04-15 11:30:08 +00:00
|
|
|
db_path: databases/database.db
|
2023-04-14 09:58:07 +00:00
|
|
|
debug: false
|
2023-04-15 11:30:08 +00:00
|
|
|
n_threads: 8
|
2023-04-14 09:58:07 +00:00
|
|
|
host: localhost
|
|
|
|
language: en-US
|
2023-04-20 17:30:03 +00:00
|
|
|
# Supported backends are llamacpp and gpt-j
|
2023-04-23 23:54:30 +00:00
|
|
|
backend: llama_cpp
|
|
|
|
model: gpt4all-lora-quantized-ggml.bin
|
2023-04-16 10:03:56 +00:00
|
|
|
n_predict: 1024
|
2023-04-14 09:58:07 +00:00
|
|
|
nb_messages_to_remember: 5
|
2023-04-20 17:30:03 +00:00
|
|
|
personality_language: english
|
|
|
|
personality_category: general
|
2023-04-30 20:40:19 +00:00
|
|
|
personality: gpt4all
|
2023-04-14 09:58:07 +00:00
|
|
|
port: 9600
|
|
|
|
repeat_last_n: 40
|
2023-04-16 10:03:56 +00:00
|
|
|
repeat_penalty: 1.2
|
2023-04-12 20:36:03 +00:00
|
|
|
seed: 0
|
2023-04-17 11:11:46 +00:00
|
|
|
temp: 0.9
|
2023-04-16 10:03:56 +00:00
|
|
|
top_k: 50
|
2023-04-12 20:36:03 +00:00
|
|
|
top_p: 0.95
|
2023-04-14 09:58:07 +00:00
|
|
|
voice: ""
|
2023-04-14 21:29:11 +00:00
|
|
|
use_gpu: false # Not active yet
|
2023-04-20 17:30:03 +00:00
|
|
|
auto_read: false
|
2023-04-23 22:19:15 +00:00
|
|
|
use_avx2: true # By default we require using avx2 but if not supported, make sure you remove it from here
|