mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-27 00:01:07 +00:00
e2de8a88f7
* feat: create bash library to handle install/run/test of python backends Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> * chore: minor cleanup Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> * fix: remove incorrect LIMIT_TARGETS from parler-tts Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> * fix: update runUnitests to handle running tests from a custom test file Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> * chore: document runUnittests Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> --------- Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com>
43 lines
1.0 KiB
Makefile
43 lines
1.0 KiB
Makefile
export CONDA_ENV_PATH = "parler.yml"
|
|
SKIP_CONDA?=0
|
|
ifeq ($(BUILD_TYPE), cublas)
|
|
export CONDA_ENV_PATH = "parler-nvidia.yml"
|
|
endif
|
|
|
|
# Intel GPU are supposed to have dependencies installed in the main python
|
|
# environment, so we skip conda installation for SYCL builds.
|
|
# https://github.com/intel/intel-extension-for-pytorch/issues/538
|
|
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
export SKIP_CONDA=1
|
|
endif
|
|
|
|
.PHONY: parler-tts
|
|
parler-tts: protogen
|
|
@echo "Installing $(CONDA_ENV_PATH)..."
|
|
bash install.sh $(CONDA_ENV_PATH)
|
|
|
|
.PHONY: run
|
|
run: protogen
|
|
@echo "Running transformers..."
|
|
bash run.sh
|
|
@echo "transformers run."
|
|
|
|
.PHONY: test
|
|
test: protogen
|
|
@echo "Testing transformers..."
|
|
bash test.sh
|
|
@echo "transformers tested."
|
|
|
|
.PHONY: protogen
|
|
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
|
|
.PHONY: protogen-clean
|
|
protogen-clean:
|
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
|
|
|
backend_pb2_grpc.py backend_pb2.py:
|
|
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
|
|
|
|
.PHONY: clean
|
|
clean: protogen-clean
|
|
$(RM) -r venv __pycache__
|