mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-24 06:46:39 +00:00
fix(llama.cpp-ggml): fixup max_tokens
for old backend (#2094)
fix(llama.cpp-ggml): set 0 as default for `max_tokens` Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
284ad026b1
commit
180cd4ccda
@ -210,7 +210,7 @@ func (cfg *BackendConfig) SetDefaults(opts ...ConfigLoaderOption) {
|
|||||||
defaultMirostatETA := 0.1
|
defaultMirostatETA := 0.1
|
||||||
defaultTypicalP := 1.0
|
defaultTypicalP := 1.0
|
||||||
defaultTFZ := 1.0
|
defaultTFZ := 1.0
|
||||||
defaultInfinity := -1
|
defaultZero := 0
|
||||||
|
|
||||||
// Try to offload all GPU layers (if GPU is found)
|
// Try to offload all GPU layers (if GPU is found)
|
||||||
defaultHigh := 99999999
|
defaultHigh := 99999999
|
||||||
@ -254,7 +254,7 @@ func (cfg *BackendConfig) SetDefaults(opts ...ConfigLoaderOption) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Maxtokens == nil {
|
if cfg.Maxtokens == nil {
|
||||||
cfg.Maxtokens = &defaultInfinity
|
cfg.Maxtokens = &defaultZero
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Mirostat == nil {
|
if cfg.Mirostat == nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user