mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-23 14:32:25 +00:00
5d1018495f
* feat(intel): add diffusers support * try to consume upstream container image * Debug * Manually install deps * Map transformers/hf cache dir to modelpath if not specified * fix(compel): update initialization, pass by all gRPC options * fix: add dependencies, implement transformers for xpu * base it from the oneapi image * Add pillow * set threads if specified when launching the API * Skip conda install if intel * defaults to non-intel * ci: add to pipelines * prepare compel only if enabled * Skip conda install if intel * fix cleanup * Disable compel by default * Install torch 2.1.0 with Intel * Skip conda on some setups * Detect python * Quiet output * Do not override system python with conda * Prefer python3 * Fixups * exllama2: do not install without conda (overrides pytorch version) * exllama/exllama2: do not install if not using cuda * Add missing dataset dependency * Small fixups, symlink to python, add requirements * Add neural_speed to the deps * correctly handle model offloading * fix: device_map == xpu * go back at calling python, fixed at dockerfile level * Exllama2 restricted to only nvidia gpus * Tokenizer to xpu
27 lines
606 B
Makefile
27 lines
606 B
Makefile
export CONDA_ENV_PATH = "diffusers.yml"
|
|
|
|
ifeq ($(BUILD_TYPE), hipblas)
|
|
export CONDA_ENV_PATH = "diffusers-rocm.yml"
|
|
endif
|
|
|
|
# Intel GPU are supposed to have dependencies installed in the main python
|
|
# environment, so we skip conda installation for SYCL builds.
|
|
# https://github.com/intel/intel-extension-for-pytorch/issues/538
|
|
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
export SKIP_CONDA=1
|
|
endif
|
|
|
|
.PHONY: diffusers
|
|
diffusers:
|
|
@echo "Installing $(CONDA_ENV_PATH)..."
|
|
bash install.sh $(CONDA_ENV_PATH)
|
|
|
|
.PHONY: run
|
|
run:
|
|
@echo "Running diffusers..."
|
|
bash run.sh
|
|
@echo "Diffusers run."
|
|
|
|
test:
|
|
bash test.sh
|