mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-19 20:57:54 +00:00
chore(deps): bump llama.cpp to 47f931c8f9a26c072d71224bc8013cc66ea9e445
(#4263)
chore(deps): bump llama.cpp to '47f931c8f9a26c072d71224bc8013cc66ea9e445' Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
7492179c67
commit
404ca3cc23
2
Makefile
2
Makefile
@ -8,7 +8,7 @@ DETECT_LIBS?=true
|
||||
# llama.cpp versions
|
||||
GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp
|
||||
GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||
CPPLLAMA_VERSION?=cce5a9007572c6e9fa522296b77571d2e5071357
|
||||
CPPLLAMA_VERSION?=47f931c8f9a26c072d71224bc8013cc66ea9e445
|
||||
|
||||
# go-rwkv version
|
||||
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
||||
|
@ -203,7 +203,7 @@ struct llama_client_slot
|
||||
std::string stopping_word;
|
||||
|
||||
// sampling
|
||||
struct common_sampler_params sparams;
|
||||
struct common_params_sampling sparams;
|
||||
common_sampler *ctx_sampling = nullptr;
|
||||
|
||||
int32_t ga_i = 0; // group-attention state
|
||||
@ -662,7 +662,7 @@ struct llama_server_context
|
||||
|
||||
bool launch_slot_with_data(llama_client_slot* &slot, json data) {
|
||||
slot_params default_params;
|
||||
common_sampler_params default_sparams;
|
||||
common_params_sampling default_sparams;
|
||||
|
||||
slot->params.stream = json_value(data, "stream", false);
|
||||
slot->params.cache_prompt = json_value(data, "cache_prompt", false);
|
||||
|
Loading…
Reference in New Issue
Block a user