mirror of
https://github.com/mudler/LocalAI.git
synced 2025-02-14 14:41:56 +00:00
chore(deps): bump llama.cpp to 4b0c638b9 (#4532)
deps(llama.cpp): bump to 4b0c638b9 Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
1006e8a2ed
commit
c553d73748
2
Makefile
2
Makefile
@ -8,7 +8,7 @@ DETECT_LIBS?=true
|
||||
# llama.cpp versions
|
||||
GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp
|
||||
GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||
CPPLLAMA_VERSION?=2f0ee84b9b02d2a98742308026f060ebdc2423f1
|
||||
CPPLLAMA_VERSION?=4b0c638b9a68f577cb2066b638c9f622d91ee661
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp
|
||||
|
@ -492,8 +492,8 @@ struct llama_server_context
|
||||
}
|
||||
|
||||
common_init_result common_init = common_init_from_params(params);
|
||||
model = common_init.model;
|
||||
ctx = common_init.context;
|
||||
model = common_init.model.release();
|
||||
ctx = common_init.context.release();
|
||||
if (model == nullptr)
|
||||
{
|
||||
LOG_ERR("unable to load model: %s", params.model.c_str());
|
||||
|
Loading…
x
Reference in New Issue
Block a user