From adb24214c67eee1ad1a113c8bec1cc77dd0c8a20 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 6 May 2025 11:21:25 +0200 Subject: [PATCH] chore(deps): bump llama.cpp to `b34c859146630dff136943abc9852ca173a7c9d6` (#5323) chore(deps): bump llama.cpp to 'b34c859146630dff136943abc9852ca173a7c9d6' Signed-off-by: Ettore Di Giacinto --- Makefile | 2 +- backend/cpp/llama/patches/01-llava.patch | 6 +++--- backend/cpp/llama/prepare.sh | 10 +++++----- backend/cpp/llama/utils.hpp | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 0ab1f3ca..166db6c7 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ BINARY_NAME=local-ai DETECT_LIBS?=true # llama.cpp versions -CPPLLAMA_VERSION?=9fdfcdaeddd1ef57c6d041b89cd8fb7048a0f028 +CPPLLAMA_VERSION?=b34c859146630dff136943abc9852ca173a7c9d6 # whisper.cpp version WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp diff --git a/backend/cpp/llama/patches/01-llava.patch b/backend/cpp/llama/patches/01-llava.patch index 6e2abde2..a7a32f16 100644 --- a/backend/cpp/llama/patches/01-llava.patch +++ b/backend/cpp/llama/patches/01-llava.patch @@ -1,7 +1,7 @@ -diff --git a/tools/llava/clip.cpp b/tools/llava/clip.cpp +diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 3cd0d2fa..6c5e811a 100644 ---- a/tools/llava/clip.cpp -+++ b/tools/llava/clip.cpp +--- a/tools/mtmd/clip.cpp ++++ b/tools/mtmd/clip.cpp @@ -2608,7 +2608,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches"); int* patches_data = (int*)malloc(ggml_nbytes(patches)); diff --git a/backend/cpp/llama/prepare.sh b/backend/cpp/llama/prepare.sh index f332bc48..153b148f 100644 --- a/backend/cpp/llama/prepare.sh +++ b/backend/cpp/llama/prepare.sh @@ -20,9 +20,9 @@ fi ## XXX: In some versions of CMake clip wasn't being built before llama. ## This is an hack for now, but it should be fixed in the future. -cp -rfv llama.cpp/tools/llava/clip.h llama.cpp/tools/grpc-server/clip.h -cp -rfv llama.cpp/tools/llava/clip-impl.h llama.cpp/tools/grpc-server/clip-impl.h -cp -rfv llama.cpp/tools/llava/llava.cpp llama.cpp/tools/grpc-server/llava.cpp +cp -rfv llama.cpp/tools/mtmd/clip.h llama.cpp/tools/grpc-server/clip.h +cp -rfv llama.cpp/tools/mtmd/clip-impl.h llama.cpp/tools/grpc-server/clip-impl.h +cp -rfv llama.cpp/tools/mtmd/llava.cpp llama.cpp/tools/grpc-server/llava.cpp echo '#include "llama.h"' > llama.cpp/tools/grpc-server/llava.h -cat llama.cpp/tools/llava/llava.h >> llama.cpp/tools/grpc-server/llava.h -cp -rfv llama.cpp/tools/llava/clip.cpp llama.cpp/tools/grpc-server/clip.cpp \ No newline at end of file +cat llama.cpp/tools/mtmd/llava.h >> llama.cpp/tools/grpc-server/llava.h +cp -rfv llama.cpp/tools/mtmd/clip.cpp llama.cpp/tools/grpc-server/clip.cpp \ No newline at end of file diff --git a/backend/cpp/llama/utils.hpp b/backend/cpp/llama/utils.hpp index 0816ef56..a67c235f 100644 --- a/backend/cpp/llama/utils.hpp +++ b/backend/cpp/llama/utils.hpp @@ -11,7 +11,7 @@ #include "json.hpp" -#include "../llava/clip.h" +#include "../mtmd/clip.h" using json = nlohmann::json;