diff --git a/backend/cpp/llama/CMakeLists.txt b/backend/cpp/llama/CMakeLists.txt index 5a17d3d3..1c68e60b 100644 --- a/backend/cpp/llama/CMakeLists.txt +++ b/backend/cpp/llama/CMakeLists.txt @@ -38,11 +38,6 @@ find_package(absl CONFIG REQUIRED) find_package(Protobuf CONFIG REQUIRED) find_package(gRPC CONFIG REQUIRED) -if ( DEFINED ENV{ONEAPI_ROOT}) - find_package(MKL REQUIRED) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -L${MKLROOT}/lib") -endif() - find_program(_PROTOBUF_PROTOC protoc) set(_GRPC_GRPCPP grpc++) find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin) @@ -80,15 +75,10 @@ add_library(hw_grpc_proto ${hw_proto_hdrs} ) add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp) -target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto - absl::flags_parse - gRPC::${_REFLECTION} - gRPC::${_GRPC_GRPCPP} - protobuf::${_PROTOBUF_LIBPROTOBUF}) # Conditionally link SYCL to grpc-server +# https://github.com/ggerganov/llama.cpp/issues/8665 if ( DEFINED ENV{ONEAPI_ROOT}) - find_package(IntelSYCL REQUIRED) target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto absl::flags_parse gRPC::${_REFLECTION} diff --git a/backend/cpp/llama/CMakeLists.txt.rpc-8662 b/backend/cpp/llama/CMakeLists.txt.rpc-8662 new file mode 100644 index 00000000..82bbedca --- /dev/null +++ b/backend/cpp/llama/CMakeLists.txt.rpc-8662 @@ -0,0 +1,8 @@ +# https://github.com/ggerganov/llama.cpp/issues/8665 + +add_executable(rpc-server rpc-server.cpp) +if ( DEFINED ENV{ONEAPI_ROOT}) +target_link_libraries(rpc-server PRIVATE ggml llama sycl) +else() +target_link_libraries(rpc-server PRIVATE ggml llama) +endif() \ No newline at end of file diff --git a/backend/cpp/llama/prepare.sh b/backend/cpp/llama/prepare.sh index 6c00f27c..c1c94ce6 100644 --- a/backend/cpp/llama/prepare.sh +++ b/backend/cpp/llama/prepare.sh @@ -17,4 +17,7 @@ cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h -cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp \ No newline at end of file +cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp + +# https://github.com/ggerganov/llama.cpp/issues/8665 +cp -rfv CMakeLists.txt.rpc-8662 llama.cpp/examples/rpc/CMakeLists.txt \ No newline at end of file