mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-25 23:31:04 +00:00
cleanup
This commit is contained in:
parent
2457932748
commit
dc8b8640d5
@ -1,34 +1,20 @@
|
|||||||
|
|
||||||
context_size: 4096
|
context_size: 4096
|
||||||
#mirostat: 2
|
|
||||||
#mirostat_tau: 5.0
|
|
||||||
#mirostat_eta: 0.1
|
|
||||||
f16: true
|
f16: true
|
||||||
#low_vram: true
|
|
||||||
threads: 11
|
threads: 11
|
||||||
gpu_layers: 90
|
gpu_layers: 90
|
||||||
|
|
||||||
name: llava
|
name: llava
|
||||||
mmap: true
|
mmap: true
|
||||||
backend: llama-cpp
|
backend: llama-cpp
|
||||||
roles:
|
roles:
|
||||||
user: "USER:"
|
user: "USER:"
|
||||||
assistant: "ASSISTANT:"
|
assistant: "ASSISTANT:"
|
||||||
|
|
||||||
system: "SYSTEM:"
|
system: "SYSTEM:"
|
||||||
parameters:
|
parameters:
|
||||||
#model: openbuddy-llama2-34b-v11.1-bf16.Q3_K_S.gguf
|
|
||||||
#model: openbuddy-mistral-7b-v13.Q6_K.gguf
|
|
||||||
model: ggml-model-q4_k.gguf
|
model: ggml-model-q4_k.gguf
|
||||||
#model: openbuddy-llama2-13b-v11.1.Q6_K.gguf
|
|
||||||
#model: openbuddy-llama2-34b-v11.1-bf16.Q4_K_S.gguf
|
|
||||||
#model: llama2-22b-daydreamer-v3.ggmlv3.q6_K.bin
|
|
||||||
|
|
||||||
temperature: 0.2
|
temperature: 0.2
|
||||||
|
|
||||||
top_k: 40
|
top_k: 40
|
||||||
top_p: 0.95
|
top_p: 0.95
|
||||||
#ngqa: 8
|
|
||||||
template:
|
template:
|
||||||
chat: chat-simple
|
chat: chat-simple
|
||||||
mmproj: mmproj-model-f16.gguf
|
mmproj: mmproj-model-f16.gguf
|
Loading…
Reference in New Issue
Block a user