mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-09 12:03:15 +00:00
fix(hipblas): do not build all cpu-specific flags (#5322)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
84a26458dc
commit
6ce94834b6
@ -303,7 +303,7 @@ RUN make prepare
|
|||||||
## Build the binary
|
## Build the binary
|
||||||
## If we're on arm64 AND using cublas/hipblas, skip some of the llama-compat backends to save space
|
## If we're on arm64 AND using cublas/hipblas, skip some of the llama-compat backends to save space
|
||||||
## Otherwise just run the normal build
|
## Otherwise just run the normal build
|
||||||
RUN if [ "${TARGETARCH}" = "arm64" ] && ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "hipblas" ] ); then \
|
RUN if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||||
SKIP_GRPC_BACKEND="backend-assets/grpc/llama-cpp-avx512 backend-assets/grpc/llama-cpp-avx backend-assets/grpc/llama-cpp-avx2" make build; \
|
SKIP_GRPC_BACKEND="backend-assets/grpc/llama-cpp-avx512 backend-assets/grpc/llama-cpp-avx backend-assets/grpc/llama-cpp-avx2" make build; \
|
||||||
else \
|
else \
|
||||||
make build; \
|
make build; \
|
||||||
|
Loading…
x
Reference in New Issue
Block a user