2023-04-15 23:39:07 +00:00
GOCMD = go
GOTEST = $( GOCMD) test
GOVET = $( GOCMD) vet
2023-04-19 16:43:10 +00:00
BINARY_NAME = local-ai
2023-05-04 10:26:59 +00:00
2024-07-05 21:19:24 +00:00
DETECT_LIBS ?= true
2023-07-04 17:02:02 +00:00
# llama.cpp versions
2024-07-11 20:51:59 +00:00
GOLLAMA_REPO ?= https://github.com/go-skynet/go-llama.cpp
GOLLAMA_VERSION ?= 2b57a8ae43e4699d3dc5d1496a1ccd42922993be
2024-12-17 22:15:14 +00:00
CPPLLAMA_VERSION ?= 081b29bd2a3d91e7772e3910ce223dd63b8d7d26
2023-10-16 19:46:29 +00:00
2023-07-04 17:02:02 +00:00
# whisper.cpp version
2024-07-11 20:51:59 +00:00
WHISPER_REPO ?= https://github.com/ggerganov/whisper.cpp
2024-11-20 00:15:59 +00:00
WHISPER_CPP_VERSION ?= 6266a9f9e56a5b925e9892acf650f3eb1245814d
2023-07-04 17:02:02 +00:00
# go-piper version
2024-07-11 20:51:59 +00:00
PIPER_REPO ?= https://github.com/mudler/go-piper
2024-11-03 21:39:43 +00:00
PIPER_VERSION ?= e10ca041a885d4a8f3871d52924b47792d5e5aa0
2023-07-04 17:02:02 +00:00
# stablediffusion version
2024-07-11 20:51:59 +00:00
STABLEDIFFUSION_REPO ?= https://github.com/mudler/go-stable-diffusion
2024-04-30 16:22:44 +00:00
STABLEDIFFUSION_VERSION ?= 4a3cd6aeae6f66ee57eae9a0075f8c58c3a6a38f
2023-07-04 17:02:02 +00:00
2023-12-24 19:27:24 +00:00
# tinydream version
2024-07-11 20:51:59 +00:00
TINYDREAM_REPO ?= https://github.com/M0Rf30/go-tiny-dream
2024-04-29 13:17:09 +00:00
TINYDREAM_VERSION ?= c04fa463ace9d9a6464313aa5f9cd0f953b6c057
2023-12-24 19:27:24 +00:00
2024-11-28 21:16:44 +00:00
# bark.cpp
BARKCPP_REPO ?= https://github.com/PABannier/bark.cpp.git
BARKCPP_VERSION ?= v1.0.0
2024-12-03 21:41:22 +00:00
# stablediffusion.cpp (ggml)
STABLEDIFFUSION_GGML_REPO ?= https://github.com/leejet/stable-diffusion.cpp
2024-12-05 08:09:11 +00:00
STABLEDIFFUSION_GGML_VERSION ?= 9578fdcc4632dc3de5565f28e2fb16b7c18f8d48
2024-12-03 21:41:22 +00:00
2024-11-20 13:48:40 +00:00
ONNX_VERSION ?= 1.20.0
ONNX_ARCH ?= x64
ONNX_OS ?= linux
2023-06-05 12:26:20 +00:00
export BUILD_TYPE ?=
2023-09-28 19:42:20 +00:00
export STABLE_BUILD_TYPE ?= $( BUILD_TYPE)
2023-09-08 16:38:22 +00:00
export CMAKE_ARGS ?=
2024-07-05 21:19:24 +00:00
export BACKEND_LIBS ?=
2023-12-15 07:26:48 +00:00
2023-05-16 14:26:25 +00:00
CGO_LDFLAGS ?=
2024-03-18 14:56:53 +00:00
CGO_LDFLAGS_WHISPER ?=
2024-07-04 21:09:50 +00:00
CGO_LDFLAGS_WHISPER += -lggml
2023-05-16 14:26:25 +00:00
CUDA_LIBPATH ?= /usr/local/cuda/lib64/
2023-05-16 17:32:53 +00:00
GO_TAGS ?=
2024-05-10 14:01:55 +00:00
BUILD_ID ?=
2024-11-18 12:59:06 +00:00
NATIVE ?= false
2023-06-26 13:12:43 +00:00
2023-10-19 11:50:40 +00:00
TEST_DIR = /tmp/test
2024-03-01 15:19:53 +00:00
TEST_FLAKES ?= 5
2023-10-19 11:50:40 +00:00
RANDOM := $( shell bash -c 'echo $$RANDOM' )
2023-07-02 09:14:09 +00:00
VERSION ?= $( shell git describe --always --tags || echo "dev" )
2023-06-26 13:12:43 +00:00
# go tool nm ./local-ai | grep Commit
2024-07-22 13:39:57 +00:00
LD_FLAGS ?= -s -w
2024-07-08 20:04:06 +00:00
o v e r r i d e LD_FLAGS += -X " github.com/mudler/LocalAI/internal.Version= $( VERSION) "
o v e r r i d e LD_FLAGS += -X " github.com/mudler/LocalAI/internal.Commit= $( shell git rev-parse HEAD) "
2023-06-26 13:12:43 +00:00
2023-05-16 17:32:53 +00:00
OPTIONAL_TARGETS ?=
2023-05-03 09:45:22 +00:00
2024-06-26 12:58:38 +00:00
export OS := $( shell uname -s)
2023-05-23 15:12:48 +00:00
ARCH := $( shell uname -m)
2023-04-15 23:39:07 +00:00
GREEN := $( shell tput -Txterm setaf 2)
YELLOW := $( shell tput -Txterm setaf 3)
WHITE := $( shell tput -Txterm setaf 7)
CYAN := $( shell tput -Txterm setaf 6)
RESET := $( shell tput -Txterm sgr0)
2024-07-22 13:39:57 +00:00
UPX ?=
# check if upx exists
i f e q ( , $( shell which upx ) )
UPX =
e l s e
UPX = $( shell which upx)
e n d i f
2023-10-19 11:50:40 +00:00
# Default Docker bridge IP
E2E_BRIDGE_IP ?= 172.17.0.1
2023-07-14 23:19:43 +00:00
i f n d e f U N A M E _ S
UNAME_S := $( shell uname -s)
e n d i f
2024-11-18 12:59:06 +00:00
# IF native is false, we add -DGGML_NATIVE=OFF to CMAKE_ARGS
i f e q ( $( NATIVE ) , f a l s e )
CMAKE_ARGS += -DGGML_NATIVE= OFF
e n d i f
2024-11-20 13:48:40 +00:00
# Detect if we are running on arm64
i f n e q ( , $( findstring aarch 64,$ ( shell uname -m ) ) )
ONNX_ARCH = aarch64
e n d i f
2023-11-30 18:50:50 +00:00
i f e q ( $( OS ) , D a r w i n )
2024-11-20 13:48:40 +00:00
ONNX_OS = osx
ifneq ( ,$( findstring aarch64,$( shell uname -m) ) )
ONNX_ARCH = arm64
else ifneq ( ,$( findstring arm64,$( shell uname -m) ) )
ONNX_ARCH = arm64
else
ONNX_ARCH = x86_64
endif
2024-05-13 09:37:52 +00:00
2023-11-30 18:50:50 +00:00
ifeq ( $( OSX_SIGNING_IDENTITY) ,)
OSX_SIGNING_IDENTITY := $( shell security find-identity -v -p codesigning | grep '"' | head -n 1 | sed -E 's/.*"(.*)"/\1/' )
endif
# on OSX, if BUILD_TYPE is blank, we should default to use Metal
ifeq ( $( BUILD_TYPE) ,)
BUILD_TYPE = metal
# disable metal if on Darwin and any other value is explicitly passed.
else ifneq ( $( BUILD_TYPE) ,metal)
2024-06-27 21:10:04 +00:00
CMAKE_ARGS += -DGGML_METAL= OFF
export GGML_NO_ACCELERATE = 1
2024-07-04 21:09:50 +00:00
export GGML_NO_METAL = 1
2024-03-18 18:19:43 +00:00
endif
ifeq ( $( BUILD_TYPE) ,metal)
# -lcblas removed: it seems to always be listed as a duplicate flag.
CGO_LDFLAGS += -framework Accelerate
2023-11-30 18:50:50 +00:00
endif
2024-07-05 17:19:38 +00:00
e l s e
2024-07-04 21:09:50 +00:00
CGO_LDFLAGS_WHISPER += -lgomp
2023-07-14 23:19:43 +00:00
e n d i f
2023-05-16 14:26:25 +00:00
i f e q ( $( BUILD_TYPE ) , o p e n b l a s )
CGO_LDFLAGS += -lopenblas
2024-07-04 21:09:50 +00:00
export GGML_OPENBLAS = 1
2023-04-21 22:29:32 +00:00
e n d i f
2023-05-16 14:26:25 +00:00
i f e q ( $( BUILD_TYPE ) , c u b l a s )
CGO_LDFLAGS += -lcublas -lcudart -L$( CUDA_LIBPATH)
2024-06-27 21:10:04 +00:00
export GGML_CUDA = 1
2024-06-06 06:41:04 +00:00
CGO_LDFLAGS_WHISPER += -L$( CUDA_LIBPATH) /stubs/ -lcuda -lcufft
2023-04-21 22:29:32 +00:00
e n d i f
2024-06-24 18:04:58 +00:00
i f e q ( $( BUILD_TYPE ) , v u l k a n )
2024-06-27 21:10:04 +00:00
CMAKE_ARGS += -DGGML_VULKAN= 1
2024-06-24 18:04:58 +00:00
e n d i f
2024-07-04 21:09:50 +00:00
i f n e q ( , $( findstring sycl ,$ ( BUILD_TYPE ) ) )
export GGML_SYCL = 1
e n d i f
i f e q ( $( BUILD_TYPE ) , s y c l _ f 1 6 )
export GGML_SYCL_F16 = 1
e n d i f
2023-09-28 19:42:20 +00:00
i f e q ( $( BUILD_TYPE ) , h i p b l a s )
ROCM_HOME ?= /opt/rocm
2024-02-16 14:08:50 +00:00
ROCM_PATH ?= /opt/rocm
LD_LIBRARY_PATH ?= /opt/rocm/lib:/opt/rocm/llvm/lib
2023-09-28 19:42:20 +00:00
export CXX = $( ROCM_HOME) /llvm/bin/clang++
export CC = $( ROCM_HOME) /llvm/bin/clang
2023-11-18 07:18:43 +00:00
# llama-ggml has no hipblas support, so override it here.
2023-09-28 19:42:20 +00:00
export STABLE_BUILD_TYPE =
2024-11-15 11:51:43 +00:00
export GGML_HIP = 1
2024-05-27 20:35:11 +00:00
GPU_TARGETS ?= gfx900,gfx906,gfx908,gfx940,gfx941,gfx942,gfx90a,gfx1030,gfx1031,gfx1100,gfx1101
2023-09-28 19:42:20 +00:00
AMDGPU_TARGETS ?= " $( GPU_TARGETS) "
2024-11-15 11:51:43 +00:00
CMAKE_ARGS += -DGGML_HIP= ON -DAMDGPU_TARGETS= " $( AMDGPU_TARGETS) " -DGPU_TARGETS= " $( GPU_TARGETS) "
2024-02-16 14:08:50 +00:00
CGO_LDFLAGS += -O3 --rtlib= compiler-rt -unwindlib= libgcc -lhipblas -lrocblas --hip-link -L${ ROCM_HOME } /lib/llvm/lib
2023-09-28 19:42:20 +00:00
e n d i f
2023-06-05 12:26:20 +00:00
i f e q ( $( BUILD_TYPE ) , m e t a l )
CGO_LDFLAGS += -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
2024-06-27 21:10:04 +00:00
export GGML_METAL = 1
2023-06-05 12:26:20 +00:00
e n d i f
2023-05-29 13:17:38 +00:00
i f e q ( $( BUILD_TYPE ) , c l b l a s )
CGO_LDFLAGS += -lOpenCL -lclblast
2024-07-04 21:09:50 +00:00
export GGML_OPENBLAS = 1
2023-05-29 13:17:38 +00:00
e n d i f
2023-05-24 14:42:24 +00:00
# glibc-static or glibc-devel-static required
i f e q ( $( STATIC ) , t r u e )
2024-07-08 20:04:06 +00:00
LD_FLAGS += -linkmode external -extldflags -static
2023-05-24 14:42:24 +00:00
e n d i f
2023-06-22 15:53:10 +00:00
i f e q ( $( findstring stablediffusion ,$ ( GO_TAGS ) ) , s t a b l e d i f f u s i o n )
2023-07-14 23:19:43 +00:00
# OPTIONAL_TARGETS+=go-stable-diffusion/libstablediffusion.a
2023-07-14 23:19:43 +00:00
OPTIONAL_GRPC += backend-assets/grpc/stablediffusion
2023-05-16 17:32:53 +00:00
e n d i f
2023-12-24 19:27:24 +00:00
i f e q ( $( findstring tinydream ,$ ( GO_TAGS ) ) , t i n y d r e a m )
# OPTIONAL_TARGETS+=go-tiny-dream/libtinydream.a
OPTIONAL_GRPC += backend-assets/grpc/tinydream
e n d i f
2023-06-22 15:53:10 +00:00
i f e q ( $( findstring tts ,$ ( GO_TAGS ) ) , t t s )
2023-07-14 23:19:43 +00:00
# OPTIONAL_TARGETS+=go-piper/libpiper_binding.a
# OPTIONAL_TARGETS+=backend-assets/espeak-ng-data
2024-01-10 08:39:50 +00:00
PIPER_CGO_CXXFLAGS += -I$( CURDIR) /sources/go-piper/piper/src/cpp -I$( CURDIR) /sources/go-piper/piper/build/fi/include -I$( CURDIR) /sources/go-piper/piper/build/pi/include -I$( CURDIR) /sources/go-piper/piper/build/si/include
PIPER_CGO_LDFLAGS += -L$( CURDIR) /sources/go-piper/piper/build/fi/lib -L$( CURDIR) /sources/go-piper/piper/build/pi/lib -L$( CURDIR) /sources/go-piper/piper/build/si/lib -lfmt -lspdlog -lucd
2023-07-14 23:19:43 +00:00
OPTIONAL_GRPC += backend-assets/grpc/piper
2023-06-22 15:53:10 +00:00
e n d i f
2024-05-04 15:56:12 +00:00
ALL_GRPC_BACKENDS = backend-assets/grpc/huggingface
2024-05-13 09:37:52 +00:00
ALL_GRPC_BACKENDS += backend-assets/grpc/llama-cpp-avx
ALL_GRPC_BACKENDS += backend-assets/grpc/llama-cpp-avx2
2024-05-04 15:56:12 +00:00
ALL_GRPC_BACKENDS += backend-assets/grpc/llama-cpp-fallback
2024-02-08 19:12:51 +00:00
ALL_GRPC_BACKENDS += backend-assets/grpc/llama-ggml
2024-05-14 23:17:02 +00:00
ALL_GRPC_BACKENDS += backend-assets/grpc/llama-cpp-grpc
ALL_GRPC_BACKENDS += backend-assets/util/llama-cpp-rpc-server
2024-02-08 19:12:51 +00:00
ALL_GRPC_BACKENDS += backend-assets/grpc/whisper
2024-11-28 21:16:44 +00:00
i f e q ( $( ONNX_OS ) , l i n u x )
i f e q ( $( ONNX_ARCH ) , x 6 4 )
ALL_GRPC_BACKENDS += backend-assets/grpc/bark-cpp
2024-12-03 21:41:22 +00:00
ALL_GRPC_BACKENDS += backend-assets/grpc/stablediffusion-ggml
2024-11-28 21:16:44 +00:00
e n d i f
e n d i f
2024-03-22 20:14:04 +00:00
ALL_GRPC_BACKENDS += backend-assets/grpc/local-store
2024-11-20 13:48:40 +00:00
ALL_GRPC_BACKENDS += backend-assets/grpc/silero-vad
2024-02-08 19:12:51 +00:00
ALL_GRPC_BACKENDS += $( OPTIONAL_GRPC)
2024-07-05 17:19:38 +00:00
# Use filter-out to remove the specified backends
ALL_GRPC_BACKENDS := $( filter-out $( SKIP_GRPC_BACKEND) ,$( ALL_GRPC_BACKENDS) )
2024-02-08 19:12:51 +00:00
2023-10-21 09:34:59 +00:00
GRPC_BACKENDS ?= $( ALL_GRPC_BACKENDS) $( OPTIONAL_GRPC)
2024-02-27 14:04:19 +00:00
TEST_PATHS ?= ./api/... ./pkg/... ./core/...
2023-10-21 09:34:59 +00:00
# If empty, then we build all
i f e q ( $( GRPC_BACKENDS ) , )
GRPC_BACKENDS = $( ALL_GRPC_BACKENDS)
e n d i f
2023-08-13 18:04:08 +00:00
2024-01-19 14:38:43 +00:00
i f e q ( $( BUILD_API_ONLY ) , t r u e )
GRPC_BACKENDS =
e n d i f
2024-03-17 14:39:20 +00:00
.PHONY : all test build vendor get -sources prepare -sources prepare
2023-04-15 23:39:07 +00:00
all : help
2024-04-18 01:59:05 +00:00
## go-llama.cpp
sources/go-llama.cpp :
2024-07-11 20:51:59 +00:00
mkdir -p sources/go-llama.cpp
cd sources/go-llama.cpp && \
git init && \
git remote add origin $( GOLLAMA_REPO) && \
git fetch origin && \
git checkout $( GOLLAMA_VERSION) && \
2024-07-13 20:32:25 +00:00
git submodule update --init --recursive --depth 1 --single-branch
2023-05-16 17:32:53 +00:00
2024-12-03 21:41:22 +00:00
sources/go-llama.cpp/libbinding.a : sources /go -llama .cpp
$( MAKE) -C sources/go-llama.cpp BUILD_TYPE = $( STABLE_BUILD_TYPE) libbinding.a
## bark.cpp
2024-11-28 21:16:44 +00:00
sources/bark.cpp :
2024-12-03 21:41:22 +00:00
git clone --recursive $( BARKCPP_REPO) sources/bark.cpp && \
2024-11-28 21:16:44 +00:00
cd sources/bark.cpp && \
git checkout $( BARKCPP_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
sources/bark.cpp/build/libbark.a : sources /bark .cpp
cd sources/bark.cpp && \
2024-12-03 21:41:22 +00:00
mkdir -p build && \
2024-11-28 21:16:44 +00:00
cd build && \
cmake $( CMAKE_ARGS) .. && \
cmake --build . --config Release
backend/go/bark/libbark.a : sources /bark .cpp /build /libbark .a
$( MAKE) -C backend/go/bark libbark.a
2024-03-17 14:39:20 +00:00
## go-piper
sources/go-piper :
2024-07-11 20:51:59 +00:00
mkdir -p sources/go-piper
cd sources/go-piper && \
git init && \
git remote add origin $( PIPER_REPO) && \
git fetch origin && \
git checkout $( PIPER_VERSION) && \
2024-07-13 20:32:25 +00:00
git submodule update --init --recursive --depth 1 --single-branch
2024-03-17 14:39:20 +00:00
sources/go-piper/libpiper_binding.a : sources /go -piper
$( MAKE) -C sources/go-piper libpiper_binding.a example/main piper.o
2024-12-03 21:41:22 +00:00
## stable diffusion (onnx)
2024-03-17 14:39:20 +00:00
sources/go-stable-diffusion :
2024-07-11 20:51:59 +00:00
mkdir -p sources/go-stable-diffusion
cd sources/go-stable-diffusion && \
git init && \
git remote add origin $( STABLEDIFFUSION_REPO) && \
git fetch origin && \
git checkout $( STABLEDIFFUSION_VERSION) && \
2024-07-13 20:32:25 +00:00
git submodule update --init --recursive --depth 1 --single-branch
2023-05-10 13:20:21 +00:00
2024-03-17 14:39:20 +00:00
sources/go-stable-diffusion/libstablediffusion.a : sources /go -stable -diffusion
2024-03-27 20:12:19 +00:00
CPATH = " $( CPATH) :/usr/include/opencv4 " $( MAKE) -C sources/go-stable-diffusion libstablediffusion.a
2023-06-01 21:38:52 +00:00
2024-12-03 21:41:22 +00:00
## stablediffusion (ggml)
sources/stablediffusion-ggml.cpp :
git clone --recursive $( STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
cd sources/stablediffusion-ggml.cpp && \
git checkout $( STABLEDIFFUSION_GGML_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
sources/stablediffusion-ggml.cpp/build/libstable-diffusion.a : sources /stablediffusion -ggml .cpp
cd sources/stablediffusion-ggml.cpp && \
mkdir -p build && \
cd build && \
cmake $( CMAKE_ARGS) .. && \
cmake --build . --config Release
backend/go/image/stablediffusion-ggml/libsd.a : sources /stablediffusion -ggml .cpp /build /libstable -diffusion .a
$( MAKE) -C backend/go/image/stablediffusion-ggml libsd.a
backend-assets/grpc/stablediffusion-ggml : backend /go /image /stablediffusion -ggml /libsd .a backend -assets /grpc
CGO_LDFLAGS = " $( CGO_LDFLAGS) " C_INCLUDE_PATH = $( CURDIR) /backend/go/image/stablediffusion-ggml/ LIBRARY_PATH = $( CURDIR) /backend/go/image/stablediffusion-ggml/ \
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/stablediffusion-ggml ./backend/go/image/stablediffusion-ggml/
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/stablediffusion-ggml
e n d i f
2024-11-20 13:48:40 +00:00
sources/onnxruntime :
mkdir -p sources/onnxruntime
curl -L https://github.com/microsoft/onnxruntime/releases/download/v$( ONNX_VERSION) /onnxruntime-$( ONNX_OS) -$( ONNX_ARCH) -$( ONNX_VERSION) .tgz -o sources/onnxruntime/onnxruntime-$( ONNX_OS) -$( ONNX_ARCH) -$( ONNX_VERSION) .tgz
cd sources/onnxruntime && tar -xvf onnxruntime-$( ONNX_OS) -$( ONNX_ARCH) -$( ONNX_VERSION) .tgz && rm onnxruntime-$( ONNX_OS) -$( ONNX_ARCH) -$( ONNX_VERSION) .tgz
cd sources/onnxruntime && mv onnxruntime-$( ONNX_OS) -$( ONNX_ARCH) -$( ONNX_VERSION) /* ./
backend-assets/lib/libonnxruntime.so.1 : backend -assets /lib sources /onnxruntime
cp -rfv sources/onnxruntime/lib/* backend-assets/lib/
i f e q ( $( OS ) , D a r w i n )
mv backend-assets/lib/libonnxruntime.$( ONNX_VERSION) .dylib backend-assets/lib/libonnxruntime.dylib
e l s e
mv backend-assets/lib/libonnxruntime.so.$( ONNX_VERSION) backend-assets/lib/libonnxruntime.so.1
e n d i f
2024-03-17 14:39:20 +00:00
## tiny-dream
sources/go-tiny-dream :
2024-07-11 20:51:59 +00:00
mkdir -p sources/go-tiny-dream
cd sources/go-tiny-dream && \
git init && \
git remote add origin $( TINYDREAM_REPO) && \
git fetch origin && \
git checkout $( TINYDREAM_VERSION) && \
2024-07-13 20:32:25 +00:00
git submodule update --init --recursive --depth 1 --single-branch
2023-06-22 15:53:10 +00:00
2024-03-17 14:39:20 +00:00
sources/go-tiny-dream/libtinydream.a : sources /go -tiny -dream
$( MAKE) -C sources/go-tiny-dream libtinydream.a
2023-04-15 23:39:07 +00:00
2024-03-17 14:39:20 +00:00
## whisper
2023-11-18 07:18:43 +00:00
sources/whisper.cpp :
2024-07-11 20:51:59 +00:00
mkdir -p sources/whisper.cpp
cd sources/whisper.cpp && \
git init && \
git remote add origin $( WHISPER_REPO) && \
git fetch origin && \
git checkout $( WHISPER_CPP_VERSION) && \
2024-07-13 20:32:25 +00:00
git submodule update --init --recursive --depth 1 --single-branch
2023-05-09 09:43:50 +00:00
2023-11-18 07:18:43 +00:00
sources/whisper.cpp/libwhisper.a : sources /whisper .cpp
2024-07-04 21:09:50 +00:00
cd sources/whisper.cpp && $( MAKE) libwhisper.a libggml.a
2023-05-09 09:43:50 +00:00
2024-12-03 21:41:22 +00:00
get-sources : sources /go -llama .cpp sources /go -piper sources /stablediffusion -ggml .cpp sources /bark .cpp sources /whisper .cpp sources /go -stable -diffusion sources /go -tiny -dream backend /cpp /llama /llama .cpp
2023-06-26 20:34:03 +00:00
2023-04-20 17:33:36 +00:00
replace :
2024-01-10 08:39:50 +00:00
$( GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp= $( CURDIR) /sources/whisper.cpp
$( GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go= $( CURDIR) /sources/whisper.cpp/bindings/go
$( GOCMD) mod edit -replace github.com/M0Rf30/go-tiny-dream= $( CURDIR) /sources/go-tiny-dream
$( GOCMD) mod edit -replace github.com/mudler/go-piper= $( CURDIR) /sources/go-piper
2024-03-17 14:39:20 +00:00
$( GOCMD) mod edit -replace github.com/mudler/go-stable-diffusion= $( CURDIR) /sources/go-stable-diffusion
2024-04-18 01:59:05 +00:00
$( GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp= $( CURDIR) /sources/go-llama.cpp
2024-03-17 14:39:20 +00:00
dropreplace :
$( GOCMD) mod edit -dropreplace github.com/ggerganov/whisper.cpp
$( GOCMD) mod edit -dropreplace github.com/ggerganov/whisper.cpp/bindings/go
$( GOCMD) mod edit -dropreplace github.com/M0Rf30/go-tiny-dream
$( GOCMD) mod edit -dropreplace github.com/mudler/go-piper
$( GOCMD) mod edit -dropreplace github.com/mudler/go-stable-diffusion
2024-03-27 20:12:19 +00:00
$( GOCMD) mod edit -dropreplace github.com/go-skynet/go-llama.cpp
2023-04-20 17:33:36 +00:00
2023-06-26 20:34:03 +00:00
prepare-sources : get -sources replace
2023-04-29 18:38:37 +00:00
$( GOCMD) mod download
2023-05-03 09:45:22 +00:00
## GENERIC
rebuild : ## Rebuilds the project
2023-07-05 16:24:55 +00:00
$( GOCMD) clean -cache
2024-04-18 01:59:05 +00:00
$( MAKE) -C sources/go-llama.cpp clean
2023-11-18 07:18:43 +00:00
$( MAKE) -C sources/whisper.cpp clean
$( MAKE) -C sources/go-stable-diffusion clean
$( MAKE) -C sources/go-piper clean
2023-12-24 19:27:24 +00:00
$( MAKE) -C sources/go-tiny-dream clean
2023-04-29 18:38:37 +00:00
$( MAKE) build
2023-09-04 17:11:28 +00:00
prepare : prepare -sources $( OPTIONAL_TARGETS )
2023-04-19 15:10:29 +00:00
2023-04-15 23:39:07 +00:00
clean : ## Remove build related file
2023-07-05 16:24:55 +00:00
$( GOCMD) clean -cache
2023-07-25 21:02:46 +00:00
rm -f prepare
2023-11-18 07:18:43 +00:00
rm -rf ./sources
2023-04-19 15:10:29 +00:00
rm -rf $( BINARY_NAME)
2023-05-23 15:12:48 +00:00
rm -rf release/
2024-04-13 07:37:32 +00:00
rm -rf backend-assets/*
2024-01-13 09:08:26 +00:00
$( MAKE) -C backend/cpp/grpc clean
2024-11-28 21:16:44 +00:00
$( MAKE) -C backend/go/bark clean
2023-10-16 19:46:29 +00:00
$( MAKE) -C backend/cpp/llama clean
2024-12-03 21:41:22 +00:00
$( MAKE) -C backend/go/image/stablediffusion-ggml clean
2024-05-04 15:56:12 +00:00
rm -rf backend/cpp/llama-* || true
2024-03-17 14:39:20 +00:00
$( MAKE) dropreplace
2024-04-13 07:37:32 +00:00
$( MAKE) protogen-clean
rmdir pkg/grpc/proto || true
2023-04-15 23:39:07 +00:00
2024-03-18 18:19:43 +00:00
clean-tests :
rm -rf test-models
rm -rf test-dir
rm -rf core/http/backend-assets
2024-09-24 07:32:48 +00:00
clean-dc : clean
cp -r /build/backend-assets /workspace/backend-assets
2023-05-03 09:45:22 +00:00
## Build:
2024-03-17 14:39:20 +00:00
build : prepare backend -assets grpcs ## Build the project
2023-05-03 09:45:22 +00:00
$( info ${ GREEN } I local-ai build info:${ RESET } )
$( info ${ GREEN } I BUILD_TYPE: ${ YELLOW } $( BUILD_TYPE) ${ RESET } )
2023-05-16 17:32:53 +00:00
$( info ${ GREEN } I GO_TAGS: ${ YELLOW } $( GO_TAGS) ${ RESET } )
2023-06-26 13:12:43 +00:00
$( info ${ GREEN } I LD_FLAGS: ${ YELLOW } $( LD_FLAGS) ${ RESET } )
2024-07-22 13:39:57 +00:00
$( info ${ GREEN } I UPX: ${ YELLOW } $( UPX) ${ RESET } )
2024-06-14 06:51:25 +00:00
i f n e q ( $( BACKEND_LIBS ) , )
$( MAKE) backend-assets/lib
2024-07-05 21:19:24 +00:00
cp -f $( BACKEND_LIBS) backend-assets/lib/
2024-06-14 06:51:25 +00:00
e n d i f
2023-07-14 23:19:43 +00:00
CGO_LDFLAGS = " $( CGO_LDFLAGS) " $( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o $( BINARY_NAME) ./
2023-05-03 09:45:22 +00:00
2024-03-29 21:29:33 +00:00
build-minimal :
2024-07-15 06:31:38 +00:00
BUILD_GRPC_FOR_BACKEND_LLAMA = true GRPC_BACKENDS = "backend-assets/grpc/llama-cpp-avx2" GO_TAGS = p2p $( MAKE) build
2024-03-29 21:29:33 +00:00
build-api :
2024-08-09 18:12:01 +00:00
BUILD_GRPC_FOR_BACKEND_LLAMA = true BUILD_API_ONLY = true GO_TAGS = p2p $( MAKE) build
2024-03-29 21:29:33 +00:00
2024-06-14 06:51:25 +00:00
backend-assets/lib :
mkdir -p backend-assets/lib
2024-05-14 17:40:18 +00:00
dist :
2024-06-14 10:28:46 +00:00
$( MAKE) backend-assets/grpc/llama-cpp-avx2
2024-07-05 21:19:24 +00:00
i f e q ( $( DETECT_LIBS ) , t r u e )
scripts/prepare-libs.sh backend-assets/grpc/llama-cpp-avx2
e n d i f
2024-05-14 17:40:18 +00:00
i f e q ( $( OS ) , D a r w i n )
2024-08-08 04:59:10 +00:00
BUILD_TYPE = none $( MAKE) backend-assets/grpc/llama-cpp-fallback
2024-05-14 17:40:18 +00:00
e l s e
$( MAKE) backend-assets/grpc/llama-cpp-cuda
2024-06-05 06:44:15 +00:00
$( MAKE) backend-assets/grpc/llama-cpp-hipblas
2024-06-06 06:40:51 +00:00
$( MAKE) backend-assets/grpc/llama-cpp-sycl_f16
$( MAKE) backend-assets/grpc/llama-cpp-sycl_f32
2024-05-14 17:40:18 +00:00
e n d i f
2024-07-05 21:19:24 +00:00
GO_TAGS = "tts p2p" $( MAKE) build
i f e q ( $( DETECT_LIBS ) , t r u e )
scripts/prepare-libs.sh backend-assets/grpc/piper
e n d i f
GO_TAGS = "tts p2p" STATIC = true $( MAKE) build
2023-05-23 15:12:48 +00:00
mkdir -p release
2024-05-05 15:20:51 +00:00
# if BUILD_ID is empty, then we don't append it to the binary name
i f e q ( $( BUILD_ID ) , )
cp $( BINARY_NAME) release/$( BINARY_NAME) -$( OS) -$( ARCH)
2024-05-20 16:02:19 +00:00
shasum -a 256 release/$( BINARY_NAME) -$( OS) -$( ARCH) > release/$( BINARY_NAME) -$( OS) -$( ARCH) .sha256
2024-05-05 15:20:51 +00:00
e l s e
2023-05-23 15:12:48 +00:00
cp $( BINARY_NAME) release/$( BINARY_NAME) -$( BUILD_ID) -$( OS) -$( ARCH)
2024-05-20 16:02:19 +00:00
shasum -a 256 release/$( BINARY_NAME) -$( BUILD_ID) -$( OS) -$( ARCH) > release/$( BINARY_NAME) -$( BUILD_ID) -$( OS) -$( ARCH) .sha256
2024-05-05 15:20:51 +00:00
e n d i f
2023-05-23 15:12:48 +00:00
2024-07-05 17:19:38 +00:00
dist-cross-linux-arm64 :
2024-07-20 08:43:34 +00:00
CMAKE_ARGS = " $( CMAKE_ARGS) -DGGML_NATIVE=off " GRPC_BACKENDS = "backend-assets/grpc/llama-cpp-fallback backend-assets/grpc/llama-cpp-grpc backend-assets/util/llama-cpp-rpc-server" GO_TAGS = "p2p" \
2024-06-14 10:28:46 +00:00
STATIC = true $( MAKE) build
2024-06-09 13:11:37 +00:00
mkdir -p release
# if BUILD_ID is empty, then we don't append it to the binary name
i f e q ( $( BUILD_ID ) , )
cp $( BINARY_NAME) release/$( BINARY_NAME) -$( OS) -arm64
shasum -a 256 release/$( BINARY_NAME) -$( OS) -arm64 > release/$( BINARY_NAME) -$( OS) -arm64.sha256
e l s e
cp $( BINARY_NAME) release/$( BINARY_NAME) -$( BUILD_ID) -$( OS) -arm64
shasum -a 256 release/$( BINARY_NAME) -$( BUILD_ID) -$( OS) -arm64 > release/$( BINARY_NAME) -$( BUILD_ID) -$( OS) -arm64.sha256
e n d i f
2023-11-23 14:22:54 +00:00
osx-signed : build
codesign --deep --force --sign " $( OSX_SIGNING_IDENTITY) " --entitlements "./Entitlements.plist" " ./ $( BINARY_NAME) "
2023-05-03 09:45:22 +00:00
## Run
run : prepare ## run local-ai
2023-07-14 23:19:43 +00:00
CGO_LDFLAGS = " $( CGO_LDFLAGS) " $( GOCMD) run ./
2023-04-15 23:39:07 +00:00
2024-03-18 18:19:43 +00:00
test-models/testmodel.ggml :
2023-04-21 22:44:52 +00:00
mkdir test-models
2023-05-12 08:04:20 +00:00
mkdir test-dir
2024-03-18 18:19:43 +00:00
wget -q https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_0.bin -O test-models/testmodel.ggml
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
wget -q https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav
2023-05-18 13:59:03 +00:00
cp tests/models_fixtures/* test-models
2023-04-21 22:44:52 +00:00
2023-07-14 23:19:43 +00:00
prepare-test : grpcs
2024-02-27 14:04:19 +00:00
cp -rf backend-assets core/http
2023-05-18 13:59:03 +00:00
cp tests/models_fixtures/* test-models
2023-07-14 23:19:43 +00:00
2024-03-18 18:19:43 +00:00
test : prepare test -models /testmodel .ggml grpcs
2023-07-14 23:19:43 +00:00
@echo 'Running tests'
2024-03-22 20:14:04 +00:00
export GO_TAGS = "tts stablediffusion debug"
2023-07-14 23:19:43 +00:00
$( MAKE) prepare-test
2023-11-20 20:21:17 +00:00
HUGGINGFACE_GRPC = $( abspath ./) /backend/python/sentencetransformers/run.sh TEST_DIR = $( abspath ./) /test-dir/ FIXTURES = $( abspath ./) /tests/fixtures CONFIG_FILE = $( abspath ./) /test-models/config.yaml MODELS_PATH = $( abspath ./) /test-models \
2024-08-07 21:35:55 +00:00
$( GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter= "!llama && !llama-gguf" --flake-attempts $( TEST_FLAKES) --fail-fast -v -r $( TEST_PATHS)
2023-07-14 23:19:43 +00:00
$( MAKE) test-llama
2023-08-23 23:18:58 +00:00
$( MAKE) test-llama-gguf
2023-07-14 23:19:43 +00:00
$( MAKE) test-tts
$( MAKE) test-stablediffusion
2023-10-19 11:50:40 +00:00
prepare-e2e :
mkdir -p $( TEST_DIR)
cp -rfv $( abspath ./tests/e2e-fixtures) /gpu.yaml $( TEST_DIR) /gpu.yaml
test -e $( TEST_DIR) /ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $( TEST_DIR) /ggllm-test-model.bin
2024-07-23 21:35:31 +00:00
docker build --build-arg GRPC_BACKENDS = " $( GRPC_BACKENDS) " --build-arg IMAGE_TYPE = core --build-arg BUILD_TYPE = $( BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION = 12 --build-arg CUDA_MINOR_VERSION = 0 --build-arg FFMPEG = true -t localai-tests .
2023-10-19 11:50:40 +00:00
run-e2e-image :
ls -liah $( abspath ./tests/e2e-fixtures)
docker run -p 5390:8080 -e MODELS_PATH = /models -e THREADS = 1 -e DEBUG = true -d --rm -v $( TEST_DIR) :/models --gpus all --name e2e-tests-$( RANDOM) localai-tests
2024-09-26 10:44:55 +00:00
run-e2e-aio : protogen -go
2024-03-22 20:13:11 +00:00
@echo 'Running e2e AIO tests'
2024-10-08 07:24:19 +00:00
$( GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $( TEST_FLAKES) -v -r ./tests/e2e-aio
2024-03-22 20:13:11 +00:00
2023-10-19 11:50:40 +00:00
test-e2e :
@echo 'Running e2e tests'
BUILD_TYPE = $( BUILD_TYPE) \
LOCALAI_API = http://$( E2E_BRIDGE_IP) :5390/v1 \
2024-10-08 07:24:19 +00:00
$( GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $( TEST_FLAKES) -v -r ./tests/e2e
2023-10-19 11:50:40 +00:00
teardown-e2e :
rm -rf $( TEST_DIR) || true
docker stop $$ ( docker ps -q --filter ancestor = localai-tests)
2023-07-14 23:19:43 +00:00
test-llama : prepare -test
TEST_DIR = $( abspath ./) /test-dir/ FIXTURES = $( abspath ./) /tests/fixtures CONFIG_FILE = $( abspath ./) /test-models/config.yaml MODELS_PATH = $( abspath ./) /test-models \
2024-10-08 07:24:19 +00:00
$( GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter= "llama" --flake-attempts $( TEST_FLAKES) -v -r $( TEST_PATHS)
2023-07-14 23:19:43 +00:00
2023-08-23 23:18:58 +00:00
test-llama-gguf : prepare -test
TEST_DIR = $( abspath ./) /test-dir/ FIXTURES = $( abspath ./) /tests/fixtures CONFIG_FILE = $( abspath ./) /test-models/config.yaml MODELS_PATH = $( abspath ./) /test-models \
2024-10-08 07:24:19 +00:00
$( GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter= "llama-gguf" --flake-attempts $( TEST_FLAKES) -v -r $( TEST_PATHS)
2023-08-23 23:18:58 +00:00
2023-07-14 23:19:43 +00:00
test-tts : prepare -test
TEST_DIR = $( abspath ./) /test-dir/ FIXTURES = $( abspath ./) /tests/fixtures CONFIG_FILE = $( abspath ./) /test-models/config.yaml MODELS_PATH = $( abspath ./) /test-models \
2024-10-08 07:24:19 +00:00
$( GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter= "tts" --flake-attempts $( TEST_FLAKES) -v -r $( TEST_PATHS)
2023-07-14 23:19:43 +00:00
test-stablediffusion : prepare -test
TEST_DIR = $( abspath ./) /test-dir/ FIXTURES = $( abspath ./) /tests/fixtures CONFIG_FILE = $( abspath ./) /test-models/config.yaml MODELS_PATH = $( abspath ./) /test-models \
2024-10-08 07:24:19 +00:00
$( GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter= "stablediffusion" --flake-attempts $( TEST_FLAKES) -v -r $( TEST_PATHS)
2023-07-14 23:19:43 +00:00
2024-03-22 20:14:04 +00:00
test-stores : backend -assets /grpc /local -store
mkdir -p tests/integration/backend-assets/grpc
cp -f backend-assets/grpc/local-store tests/integration/backend-assets/grpc/
2024-10-08 07:24:19 +00:00
$( GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter= "stores" --flake-attempts $( TEST_FLAKES) -v -r tests/integration
2024-03-22 20:14:04 +00:00
2023-07-14 23:19:43 +00:00
test-container :
docker build --target requirements -t local-ai-test-container .
2023-07-20 20:10:42 +00:00
docker run -ti --rm --entrypoint /bin/bash -ti -v $( abspath ./) :/build local-ai-test-container
2023-04-15 23:39:07 +00:00
## Help:
help : ## Show this help.
@echo ''
@echo 'Usage:'
@echo ' ${YELLOW}make${RESET} ${GREEN}<target>${RESET}'
@echo ''
@echo 'Targets:'
@awk ' BEGIN { FS = ":.*?## " } { \
if ( /^[ a-zA-Z_-] +:.*?##.*$$ /) { printf " ${ YELLOW } %-20s ${ GREEN } %s ${ RESET } \n " , $$ 1, $$ 2} \
else if ( /^## .*$$ /) { printf " ${ CYAN } %s ${ RESET } \n " , substr( $$ 1,4) } \
2023-04-17 19:34:02 +00:00
} ' $( MAKEFILE_LIST)
2023-07-14 23:19:43 +00:00
2024-04-13 07:37:32 +00:00
.PHONY : protogen
2023-07-20 20:10:42 +00:00
protogen : protogen -go protogen -python
2024-04-13 07:37:32 +00:00
.PHONY : protogen -clean
protogen-clean : protogen -go -clean protogen -python -clean
.PHONY : protogen -go
2023-07-20 20:10:42 +00:00
protogen-go :
2024-04-13 07:37:32 +00:00
mkdir -p pkg/grpc/proto
2024-06-01 18:26:27 +00:00
protoc --experimental_allow_proto3_optional -Ibackend/ --go_out= pkg/grpc/proto/ --go_opt= paths = source_relative --go-grpc_out= pkg/grpc/proto/ --go-grpc_opt= paths = source_relative \
2023-11-13 21:40:16 +00:00
backend/backend.proto
2023-07-14 23:19:43 +00:00
2024-04-13 07:37:32 +00:00
.PHONY : protogen -go -clean
protogen-go-clean :
$( RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go
$( RM) bin/*
.PHONY : protogen -python
2024-09-13 19:52:13 +00:00
protogen-python : autogptq -protogen bark -protogen coqui -protogen diffusers -protogen exllama 2-protogen mamba -protogen rerankers -protogen sentencetransformers -protogen transformers -protogen parler -tts -protogen transformers -musicgen -protogen vall -e -x -protogen vllm -protogen openvoice -protogen
2024-04-13 07:37:32 +00:00
.PHONY : protogen -python -clean
2024-09-13 19:52:13 +00:00
protogen-python-clean : autogptq -protogen -clean bark -protogen -clean coqui -protogen -clean diffusers -protogen -clean exllama 2-protogen -clean mamba -protogen -clean sentencetransformers -protogen -clean rerankers -protogen -clean transformers -protogen -clean transformers -musicgen -protogen -clean parler -tts -protogen -clean vall -e -x -protogen -clean vllm -protogen -clean openvoice -protogen -clean
2024-04-13 07:37:32 +00:00
.PHONY : autogptq -protogen
autogptq-protogen :
$( MAKE) -C backend/python/autogptq protogen
.PHONY : autogptq -protogen -clean
autogptq-protogen-clean :
$( MAKE) -C backend/python/autogptq protogen-clean
.PHONY : bark -protogen
bark-protogen :
$( MAKE) -C backend/python/bark protogen
.PHONY : bark -protogen -clean
bark-protogen-clean :
$( MAKE) -C backend/python/bark protogen-clean
.PHONY : coqui -protogen
coqui-protogen :
$( MAKE) -C backend/python/coqui protogen
.PHONY : coqui -protogen -clean
coqui-protogen-clean :
$( MAKE) -C backend/python/coqui protogen-clean
.PHONY : diffusers -protogen
diffusers-protogen :
$( MAKE) -C backend/python/diffusers protogen
.PHONY : diffusers -protogen -clean
diffusers-protogen-clean :
$( MAKE) -C backend/python/diffusers protogen-clean
.PHONY : exllama 2-protogen
exllama2-protogen :
$( MAKE) -C backend/python/exllama2 protogen
.PHONY : exllama 2-protogen -clean
exllama2-protogen-clean :
$( MAKE) -C backend/python/exllama2 protogen-clean
.PHONY : mamba -protogen
mamba-protogen :
$( MAKE) -C backend/python/mamba protogen
.PHONY : mamba -protogen -clean
mamba-protogen-clean :
$( MAKE) -C backend/python/mamba protogen-clean
2024-04-24 22:19:02 +00:00
.PHONY : rerankers -protogen
rerankers-protogen :
$( MAKE) -C backend/python/rerankers protogen
.PHONY : rerankers -protogen -clean
rerankers-protogen-clean :
$( MAKE) -C backend/python/rerankers protogen-clean
2024-04-13 07:37:32 +00:00
.PHONY : sentencetransformers -protogen
sentencetransformers-protogen :
$( MAKE) -C backend/python/sentencetransformers protogen
.PHONY : sentencetransformers -protogen -clean
sentencetransformers-protogen-clean :
$( MAKE) -C backend/python/sentencetransformers protogen-clean
.PHONY : transformers -protogen
transformers-protogen :
$( MAKE) -C backend/python/transformers protogen
.PHONY : transformers -protogen -clean
transformers-protogen-clean :
$( MAKE) -C backend/python/transformers protogen-clean
2024-04-13 16:59:21 +00:00
.PHONY : parler -tts -protogen
parler-tts-protogen :
$( MAKE) -C backend/python/parler-tts protogen
.PHONY : parler -tts -protogen -clean
parler-tts-protogen-clean :
$( MAKE) -C backend/python/parler-tts protogen-clean
2024-04-13 07:37:32 +00:00
.PHONY : transformers -musicgen -protogen
transformers-musicgen-protogen :
$( MAKE) -C backend/python/transformers-musicgen protogen
.PHONY : transformers -musicgen -protogen -clean
transformers-musicgen-protogen-clean :
$( MAKE) -C backend/python/transformers-musicgen protogen-clean
.PHONY : vall -e -x -protogen
vall-e-x-protogen :
$( MAKE) -C backend/python/vall-e-x protogen
.PHONY : vall -e -x -protogen -clean
vall-e-x-protogen-clean :
$( MAKE) -C backend/python/vall-e-x protogen-clean
2024-05-19 14:27:08 +00:00
.PHONY : openvoice -protogen
openvoice-protogen :
$( MAKE) -C backend/python/openvoice protogen
.PHONY : openvoice -protogen -clean
openvoice-protogen-clean :
$( MAKE) -C backend/python/openvoice protogen-clean
2024-04-13 07:37:32 +00:00
.PHONY : vllm -protogen
vllm-protogen :
$( MAKE) -C backend/python/vllm protogen
.PHONY : vllm -protogen -clean
vllm-protogen-clean :
$( MAKE) -C backend/python/vllm protogen-clean
2023-07-20 20:10:42 +00:00
2023-07-14 23:19:43 +00:00
## GRPC
2023-11-05 09:31:33 +00:00
# Note: it is duplicated in the Dockerfile
2024-04-13 07:37:32 +00:00
prepare-extra-conda-environments : protogen -python
2023-11-13 21:40:16 +00:00
$( MAKE) -C backend/python/autogptq
$( MAKE) -C backend/python/bark
2023-12-24 18:38:54 +00:00
$( MAKE) -C backend/python/coqui
2023-11-13 21:40:16 +00:00
$( MAKE) -C backend/python/diffusers
$( MAKE) -C backend/python/vllm
2024-01-19 22:42:50 +00:00
$( MAKE) -C backend/python/mamba
2023-11-20 20:21:17 +00:00
$( MAKE) -C backend/python/sentencetransformers
2024-04-24 22:19:02 +00:00
$( MAKE) -C backend/python/rerankers
2023-11-20 20:21:17 +00:00
$( MAKE) -C backend/python/transformers
2023-12-08 09:01:02 +00:00
$( MAKE) -C backend/python/transformers-musicgen
2024-04-13 16:59:21 +00:00
$( MAKE) -C backend/python/parler-tts
2023-11-13 21:40:16 +00:00
$( MAKE) -C backend/python/vall-e-x
2024-05-19 14:27:08 +00:00
$( MAKE) -C backend/python/openvoice
2023-12-05 07:15:37 +00:00
$( MAKE) -C backend/python/exllama2
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
2024-04-13 07:37:32 +00:00
prepare-test-extra : protogen -python
2023-12-08 14:45:04 +00:00
$( MAKE) -C backend/python/transformers
2023-12-11 07:20:34 +00:00
$( MAKE) -C backend/python/diffusers
2023-12-08 14:45:04 +00:00
test-extra : prepare -test -extra
$( MAKE) -C backend/python/transformers test
2023-12-11 07:20:34 +00:00
$( MAKE) -C backend/python/diffusers test
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
2024-01-19 14:38:43 +00:00
backend-assets :
mkdir -p backend-assets
i f e q ( $( BUILD_API_ONLY ) , t r u e )
touch backend-assets/keep
e n d i f
2024-03-17 14:39:20 +00:00
backend-assets/espeak-ng-data : sources /go -piper sources /go -piper /libpiper_binding .a
mkdir -p backend-assets/espeak-ng-data
@cp -rf sources/go-piper/piper-phonemize/pi/share/espeak-ng-data/. backend-assets/espeak-ng-data
2024-04-13 07:37:32 +00:00
backend-assets/grpc : protogen -go replace
2023-07-14 23:19:43 +00:00
mkdir -p backend-assets/grpc
2024-05-04 15:56:12 +00:00
backend-assets/grpc/huggingface : backend -assets /grpc
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/huggingface ./backend/go/llm/langchain/
2024-07-22 13:39:57 +00:00
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/huggingface
e n d i f
2024-03-17 14:39:20 +00:00
backend/cpp/llama/llama.cpp :
LLAMA_VERSION = $( CPPLLAMA_VERSION) $( MAKE) -C backend/cpp/llama llama.cpp
2023-07-14 23:19:43 +00:00
2023-11-06 18:07:46 +00:00
INSTALLED_PACKAGES = $( CURDIR) /backend/cpp/grpc/installed_packages
INSTALLED_LIB_CMAKE = $( INSTALLED_PACKAGES) /lib/cmake
ADDED_CMAKE_ARGS = -Dabsl_DIR= ${ INSTALLED_LIB_CMAKE } /absl \
2024-03-17 14:39:20 +00:00
-DProtobuf_DIR= ${ INSTALLED_LIB_CMAKE } /protobuf \
-Dutf8_range_DIR= ${ INSTALLED_LIB_CMAKE } /utf8_range \
-DgRPC_DIR= ${ INSTALLED_LIB_CMAKE } /grpc \
-DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES= ${ INSTALLED_PACKAGES } /include
2024-05-04 15:56:12 +00:00
build-llama-cpp-grpc-server :
2024-03-17 14:39:20 +00:00
# Conditionally build grpc for the llama backend to use if needed
2023-11-06 18:07:46 +00:00
i f d e f B U I L D _ G R P C _ F O R _ B A C K E N D _ L L A M A
2024-01-13 09:08:26 +00:00
$( MAKE) -C backend/cpp/grpc build
2024-03-17 14:39:20 +00:00
_PROTOBUF_PROTOC = ${ INSTALLED_PACKAGES } /bin/proto \
_GRPC_CPP_PLUGIN_EXECUTABLE = ${ INSTALLED_PACKAGES } /bin/grpc_cpp_plugin \
PATH = " ${ INSTALLED_PACKAGES } /bin: ${ PATH } " \
CMAKE_ARGS = " ${ CMAKE_ARGS } ${ ADDED_CMAKE_ARGS } " \
LLAMA_VERSION = $( CPPLLAMA_VERSION) \
2024-05-04 15:56:12 +00:00
$( MAKE) -C backend/cpp/${ VARIANT } grpc-server
2023-11-06 18:07:46 +00:00
e l s e
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
2024-05-04 15:56:12 +00:00
LLAMA_VERSION = $( CPPLLAMA_VERSION) $( MAKE) -C backend/cpp/${ VARIANT } grpc-server
2023-11-06 18:07:46 +00:00
e n d i f
2024-01-13 09:08:26 +00:00
2024-05-31 16:35:33 +00:00
# This target is for manually building a variant with-auto detected flags
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-05-31 16:35:33 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-cpp
$( MAKE) -C backend/cpp/llama-cpp purge
$( info ${ GREEN } I llama-cpp build info:avx2${ RESET } )
$( MAKE) VARIANT = "llama-cpp" build-llama-cpp-grpc-server
cp -rfv backend/cpp/llama-cpp/grpc-server backend-assets/grpc/llama-cpp
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp-avx2 : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-05-13 09:37:52 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-avx2
$( MAKE) -C backend/cpp/llama-avx2 purge
$( info ${ GREEN } I llama-cpp build info:avx2${ RESET } )
2024-06-27 21:10:04 +00:00
CMAKE_ARGS = " $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on " $( MAKE) VARIANT = "llama-avx2" build-llama-cpp-grpc-server
2024-05-13 09:37:52 +00:00
cp -rfv backend/cpp/llama-avx2/grpc-server backend-assets/grpc/llama-cpp-avx2
2023-10-16 19:46:29 +00:00
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp-avx : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-05-13 09:37:52 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-avx
$( MAKE) -C backend/cpp/llama-avx purge
$( info ${ GREEN } I llama-cpp build info:avx${ RESET } )
2024-06-27 21:10:04 +00:00
CMAKE_ARGS = " $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off " $( MAKE) VARIANT = "llama-avx" build-llama-cpp-grpc-server
2024-05-13 09:37:52 +00:00
cp -rfv backend/cpp/llama-avx/grpc-server backend-assets/grpc/llama-cpp-avx
2024-05-04 15:56:12 +00:00
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp-fallback : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-05-04 15:56:12 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-fallback
$( MAKE) -C backend/cpp/llama-fallback purge
$( info ${ GREEN } I llama-cpp build info:fallback${ RESET } )
2024-06-27 21:10:04 +00:00
CMAKE_ARGS = " $( CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off " $( MAKE) VARIANT = "llama-fallback" build-llama-cpp-grpc-server
2024-05-04 15:56:12 +00:00
cp -rfv backend/cpp/llama-fallback/grpc-server backend-assets/grpc/llama-cpp-fallback
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp-cuda : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-05-14 17:40:18 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-cuda
$( MAKE) -C backend/cpp/llama-cuda purge
$( info ${ GREEN } I llama-cpp build info:cuda${ RESET } )
2024-06-27 21:10:04 +00:00
CMAKE_ARGS = " $( CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_CUDA=ON " $( MAKE) VARIANT = "llama-cuda" build-llama-cpp-grpc-server
2024-05-14 17:40:18 +00:00
cp -rfv backend/cpp/llama-cuda/grpc-server backend-assets/grpc/llama-cpp-cuda
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp-hipblas : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-06-05 06:44:15 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-hipblas
$( MAKE) -C backend/cpp/llama-hipblas purge
$( info ${ GREEN } I llama-cpp build info:hipblas${ RESET } )
2024-11-17 09:23:59 +00:00
CMAKE_ARGS = " $( CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off " BUILD_TYPE = "hipblas" $( MAKE) VARIANT = "llama-hipblas" build-llama-cpp-grpc-server
2024-06-05 06:44:15 +00:00
cp -rfv backend/cpp/llama-hipblas/grpc-server backend-assets/grpc/llama-cpp-hipblas
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp-sycl_f16 : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-06-06 06:40:51 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-sycl_f16
$( MAKE) -C backend/cpp/llama-sycl_f16 purge
$( info ${ GREEN } I llama-cpp build info:sycl_f16${ RESET } )
BUILD_TYPE = "sycl_f16" $( MAKE) VARIANT = "llama-sycl_f16" build-llama-cpp-grpc-server
cp -rfv backend/cpp/llama-sycl_f16/grpc-server backend-assets/grpc/llama-cpp-sycl_f16
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp-sycl_f32 : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-06-06 06:40:51 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-sycl_f32
$( MAKE) -C backend/cpp/llama-sycl_f32 purge
$( info ${ GREEN } I llama-cpp build info:sycl_f32${ RESET } )
BUILD_TYPE = "sycl_f32" $( MAKE) VARIANT = "llama-sycl_f32" build-llama-cpp-grpc-server
cp -rfv backend/cpp/llama-sycl_f32/grpc-server backend-assets/grpc/llama-cpp-sycl_f32
2024-07-15 20:54:16 +00:00
backend-assets/grpc/llama-cpp-grpc : backend -assets /grpc backend /cpp /llama /llama .cpp
2024-05-14 23:17:02 +00:00
cp -rf backend/cpp/llama backend/cpp/llama-grpc
$( MAKE) -C backend/cpp/llama-grpc purge
$( info ${ GREEN } I llama-cpp build info:grpc${ RESET } )
2024-06-27 21:10:04 +00:00
CMAKE_ARGS = " $( CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off " TARGET = "--target grpc-server --target rpc-server" $( MAKE) VARIANT = "llama-grpc" build-llama-cpp-grpc-server
2024-05-14 23:17:02 +00:00
cp -rfv backend/cpp/llama-grpc/grpc-server backend-assets/grpc/llama-cpp-grpc
backend-assets/util/llama-cpp-rpc-server : backend -assets /grpc /llama -cpp -grpc
mkdir -p backend-assets/util/
cp -rf backend/cpp/llama-grpc/llama.cpp/build/bin/rpc-server backend-assets/util/llama-cpp-rpc-server
2024-04-18 01:59:05 +00:00
backend-assets/grpc/llama-ggml : sources /go -llama .cpp sources /go -llama .cpp /libbinding .a backend -assets /grpc
CGO_LDFLAGS = " $( CGO_LDFLAGS) " C_INCLUDE_PATH = $( CURDIR) /sources/go-llama.cpp LIBRARY_PATH = $( CURDIR) /sources/go-llama.cpp \
2023-11-18 07:18:43 +00:00
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/llama-ggml ./backend/go/llm/llama-ggml/
2024-07-22 13:39:57 +00:00
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/llama-ggml
e n d i f
2024-04-17 21:33:49 +00:00
2024-11-28 21:16:44 +00:00
backend-assets/grpc/bark-cpp : backend /go /bark /libbark .a backend -assets /grpc
CGO_LDFLAGS = " $( CGO_LDFLAGS) " C_INCLUDE_PATH = $( CURDIR) /backend/go/bark/ LIBRARY_PATH = $( CURDIR) /backend/go/bark/ \
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/bark-cpp ./backend/go/bark/
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/bark-cpp
e n d i f
2024-03-17 14:39:20 +00:00
backend-assets/grpc/piper : sources /go -piper sources /go -piper /libpiper_binding .a backend -assets /grpc backend -assets /espeak -ng -data
CGO_CXXFLAGS = " $( PIPER_CGO_CXXFLAGS) " CGO_LDFLAGS = " $( PIPER_CGO_LDFLAGS) " LIBRARY_PATH = $( CURDIR) /sources/go-piper \
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/piper ./backend/go/tts/
2024-07-22 13:39:57 +00:00
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/piper
e n d i f
2023-07-14 23:19:43 +00:00
2024-03-17 14:39:20 +00:00
backend-assets/grpc/stablediffusion : sources /go -stable -diffusion sources /go -stable -diffusion /libstablediffusion .a backend -assets /grpc
2024-03-27 20:12:19 +00:00
CGO_LDFLAGS = " $( CGO_LDFLAGS) " CPATH = " $( CPATH) : $( CURDIR) /sources/go-stable-diffusion/:/usr/include/opencv4 " LIBRARY_PATH = $( CURDIR) /sources/go-stable-diffusion/ \
2024-03-17 14:39:20 +00:00
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/stablediffusion ./backend/go/image/stablediffusion
2024-07-22 13:39:57 +00:00
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/stablediffusion
e n d i f
2023-07-14 23:19:43 +00:00
2024-11-20 13:48:40 +00:00
backend-assets/grpc/silero-vad : backend -assets /grpc backend -assets /lib /libonnxruntime .so .1
CGO_LDFLAGS = " $( CGO_LDFLAGS) " CPATH = " $( CPATH) : $( CURDIR) /sources/onnxruntime/include/ " LIBRARY_PATH = $( CURDIR) /backend-assets/lib \
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/silero-vad ./backend/go/vad/silero
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/silero-vad
e n d i f
2024-03-17 14:39:20 +00:00
backend-assets/grpc/tinydream : sources /go -tiny -dream sources /go -tiny -dream /libtinydream .a backend -assets /grpc
2024-01-10 08:39:50 +00:00
CGO_LDFLAGS = " $( CGO_LDFLAGS) " LIBRARY_PATH = $( CURDIR) /go-tiny-dream \
2023-12-24 19:27:24 +00:00
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/tinydream ./backend/go/image/tinydream
2024-07-22 13:39:57 +00:00
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/tinydream
e n d i f
2023-12-24 19:27:24 +00:00
2024-03-17 14:39:20 +00:00
backend-assets/grpc/whisper : sources /whisper .cpp sources /whisper .cpp /libwhisper .a backend -assets /grpc
2024-07-04 21:09:50 +00:00
CGO_LDFLAGS = " $( CGO_LDFLAGS) $( CGO_LDFLAGS_WHISPER) " C_INCLUDE_PATH = " $( CURDIR) /sources/whisper.cpp/include: $( CURDIR) /sources/whisper.cpp/ggml/include " LIBRARY_PATH = $( CURDIR) /sources/whisper.cpp \
2024-09-02 13:48:53 +00:00
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/whisper ./backend/go/transcribe/whisper
2024-07-22 13:39:57 +00:00
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/whisper
e n d i f
2023-07-14 23:19:43 +00:00
2024-03-22 20:14:04 +00:00
backend-assets/grpc/local-store : backend -assets /grpc
$( GOCMD) build -ldflags " $( LD_FLAGS) " -tags " $( GO_TAGS) " -o backend-assets/grpc/local-store ./backend/go/stores/
2024-07-22 13:39:57 +00:00
i f n e q ( $( UPX ) , )
$( UPX) backend-assets/grpc/local-store
e n d i f
2024-03-22 20:14:04 +00:00
2023-08-13 18:04:08 +00:00
grpcs : prepare $( GRPC_BACKENDS )
2024-02-08 19:12:51 +00:00
DOCKER_IMAGE ?= local-ai
2024-03-21 00:12:20 +00:00
DOCKER_AIO_IMAGE ?= local-ai-aio
2024-02-08 19:12:51 +00:00
IMAGE_TYPE ?= core
BASE_IMAGE ?= ubuntu:22.04
docker :
docker build \
--build-arg BASE_IMAGE = $( BASE_IMAGE) \
--build-arg IMAGE_TYPE = $( IMAGE_TYPE) \
2024-03-27 20:12:19 +00:00
--build-arg GO_TAGS = " $( GO_TAGS) " \
2024-03-29 21:32:40 +00:00
--build-arg MAKEFLAGS = " $( DOCKER_MAKEFLAGS) " \
2024-02-08 19:12:51 +00:00
--build-arg BUILD_TYPE = $( BUILD_TYPE) \
-t $( DOCKER_IMAGE) .
2024-05-13 09:37:52 +00:00
2024-06-19 15:50:49 +00:00
docker-cuda11 :
docker build \
--build-arg CUDA_MAJOR_VERSION = 11 \
--build-arg CUDA_MINOR_VERSION = 8 \
--build-arg BASE_IMAGE = $( BASE_IMAGE) \
--build-arg IMAGE_TYPE = $( IMAGE_TYPE) \
--build-arg GO_TAGS = " $( GO_TAGS) " \
--build-arg MAKEFLAGS = " $( DOCKER_MAKEFLAGS) " \
--build-arg BUILD_TYPE = $( BUILD_TYPE) \
-t $( DOCKER_IMAGE) -cuda11 .
2024-03-21 00:12:20 +00:00
docker-aio :
2024-03-21 21:09:04 +00:00
@echo " Building AIO image with base $( BASE_IMAGE) as $( DOCKER_AIO_IMAGE) "
2024-03-21 00:12:20 +00:00
docker build \
--build-arg BASE_IMAGE = $( BASE_IMAGE) \
2024-03-29 21:32:40 +00:00
--build-arg MAKEFLAGS = " $( DOCKER_MAKEFLAGS) " \
2024-03-21 00:12:20 +00:00
-t $( DOCKER_AIO_IMAGE) -f Dockerfile.aio .
docker-aio-all :
$( MAKE) docker-aio DOCKER_AIO_SIZE = cpu
$( MAKE) docker-aio DOCKER_AIO_SIZE = cpu
2024-02-08 19:12:51 +00:00
docker-image-intel :
docker build \
2024-11-19 17:42:48 +00:00
--build-arg BASE_IMAGE = intel/oneapi-basekit:2025.0.0-0-devel-ubuntu22.04 \
2024-02-08 19:12:51 +00:00
--build-arg IMAGE_TYPE = $( IMAGE_TYPE) \
--build-arg GO_TAGS = "none" \
2024-03-29 21:32:40 +00:00
--build-arg MAKEFLAGS = " $( DOCKER_MAKEFLAGS) " \
2024-02-14 20:44:12 +00:00
--build-arg BUILD_TYPE = sycl_f32 -t $( DOCKER_IMAGE) .
2024-03-07 13:37:45 +00:00
docker-image-intel-xpu :
docker build \
2024-11-19 17:42:48 +00:00
--build-arg BASE_IMAGE = intel/oneapi-basekit:2025.0.0-0-devel-ubuntu22.04 \
2024-03-07 13:37:45 +00:00
--build-arg IMAGE_TYPE = $( IMAGE_TYPE) \
--build-arg GO_TAGS = "none" \
2024-03-29 21:32:40 +00:00
--build-arg MAKEFLAGS = " $( DOCKER_MAKEFLAGS) " \
2024-03-22 20:14:04 +00:00
--build-arg BUILD_TYPE = sycl_f32 -t $( DOCKER_IMAGE) .
2024-03-29 21:29:33 +00:00
.PHONY : swagger
swagger :
2024-04-20 23:19:57 +00:00
swag init -g core/http/app.go --output swagger
2024-05-19 16:24:27 +00:00
.PHONY : gen -assets
gen-assets :
2024-06-12 22:47:16 +00:00
$( GOCMD) run core/dependencies_manager/manager.go embedded/webui_static.yaml core/http/static/assets
## Documentation
2024-07-05 17:19:38 +00:00
docs/layouts/_default :
2024-06-12 22:47:16 +00:00
mkdir -p docs/layouts/_default
docs/static/gallery.html : docs /layouts /_default
$( GOCMD) run ./.github/ci/modelslist.go ./gallery/index.yaml > docs/static/gallery.html
docs/public : docs /layouts /_default docs /static /gallery .html
cd docs && hugo --minify
docs-clean :
rm -rf docs/public
rm -rf docs/static/gallery.html
.PHONY : docs
docs : docs /static /gallery .html
2024-07-05 17:19:38 +00:00
cd docs && hugo serve