mirror of
https://github.com/mudler/LocalAI.git
synced 2025-02-20 09:26:15 +00:00
chore: ⬆️ Update ggerganov/llama.cpp (#3166)
* arrow_up: Update ggerganov/llama.cpp Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> * fix(llama.cpp): adapt init function call Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Ettore Di Giacinto <mudler@localai.io> Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
This commit is contained in:
parent
52ba230d31
commit
4e11ca55fd
2
Makefile
2
Makefile
@ -8,7 +8,7 @@ DETECT_LIBS?=true
|
||||
# llama.cpp versions
|
||||
GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp
|
||||
GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||
CPPLLAMA_VERSION?=0d6fb52be0c1b7e77eb855f3adc4952771c8ce4c
|
||||
CPPLLAMA_VERSION?=0a4ce786814b123096d18aadca89cd352b9e590b
|
||||
|
||||
# gpt4all version
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
|
@ -458,7 +458,9 @@ struct llama_server_context
|
||||
}
|
||||
}
|
||||
|
||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
model = llama_init.model;
|
||||
ctx = llama_init.context;
|
||||
if (model == nullptr)
|
||||
{
|
||||
LOG_ERROR("unable to load model", {{"model", params.model}});
|
||||
|
Loading…
x
Reference in New Issue
Block a user