2023-10-19 11:50:40 +00:00
ARG IMAGE_TYPE = extras
2024-02-08 19:12:51 +00:00
ARG BASE_IMAGE = ubuntu:22.04
2024-04-18 20:19:36 +00:00
ARG GRPC_BASE_IMAGE = ${ BASE_IMAGE }
2024-05-22 21:35:39 +00:00
ARG INTEL_BASE_IMAGE = ${ BASE_IMAGE }
2023-05-30 13:53:37 +00:00
2024-04-30 08:12:19 +00:00
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
2024-04-23 16:43:00 +00:00
FROM ${BASE_IMAGE} AS requirements-core
2023-05-29 21:12:27 +00:00
2024-03-07 13:37:45 +00:00
USER root
2024-08-14 07:06:41 +00:00
ARG GO_VERSION = 1 .22.6
2023-06-22 15:53:10 +00:00
ARG TARGETARCH
ARG TARGETVARIANT
2023-05-29 21:12:27 +00:00
2024-02-08 19:12:51 +00:00
ENV DEBIAN_FRONTEND = noninteractive
2024-05-19 14:27:08 +00:00
ENV EXTERNAL_GRPC_BACKENDS = "coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,petals:/build/backend/python/petals/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,exllama:/build/backend/python/exllama/run.sh,openvoice:/build/backend/python/openvoice/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
2023-12-08 09:01:02 +00:00
2023-05-29 21:12:27 +00:00
RUN apt-get update && \
2024-04-27 17:48:20 +00:00
apt-get install -y --no-install-recommends \
2024-04-30 08:12:19 +00:00
build-essential \
2024-05-14 23:17:02 +00:00
ccache \
2024-04-27 17:48:20 +00:00
ca-certificates \
2024-04-30 08:12:19 +00:00
cmake \
2024-04-27 17:48:20 +00:00
curl \
2024-04-30 08:12:19 +00:00
git \
2024-07-22 13:39:57 +00:00
unzip upx-ucl && \
2024-04-27 17:48:20 +00:00
apt-get clean && \
2024-05-27 20:07:48 +00:00
rm -rf /var/lib/apt/lists/*
2023-10-16 19:46:29 +00:00
2024-02-08 19:12:51 +00:00
# Install Go
2024-04-23 16:43:00 +00:00
RUN curl -L -s https://go.dev/dl/go${ GO_VERSION } .linux-${ TARGETARCH } .tar.gz | tar -C /usr/local -xz
2024-08-14 07:06:41 +00:00
ENV PATH = $PATH :/root/go/bin:/usr/local/go/bin
2023-07-20 20:10:42 +00:00
2024-04-13 07:37:32 +00:00
# Install grpc compilers
2024-06-19 15:50:49 +00:00
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
2024-06-10 08:40:02 +00:00
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
2024-04-13 07:37:32 +00:00
2023-11-01 19:10:14 +00:00
COPY --chmod= 644 custom-ca-certs/* /usr/local/share/ca-certificates/
RUN update-ca-certificates
2024-08-14 07:06:41 +00:00
RUN test -n " $TARGETARCH " \
|| ( echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`' )
2023-08-07 21:34:01 +00:00
# Use the variables in subsequent instructions
RUN echo " Target Architecture: $TARGETARCH "
RUN echo " Target Variant: $TARGETVARIANT "
2023-05-29 21:12:27 +00:00
2024-02-16 14:08:50 +00:00
# Cuda
2024-08-14 07:06:41 +00:00
ENV PATH = /usr/local/cuda/bin:${ PATH }
2023-05-29 21:12:27 +00:00
2024-02-16 14:08:50 +00:00
# HipBLAS requirements
2024-08-14 07:06:41 +00:00
ENV PATH = /opt/rocm/bin:${ PATH }
2024-02-16 14:08:50 +00:00
2023-10-22 15:12:02 +00:00
# OpenBLAS requirements and stable diffusion
2024-04-27 17:48:20 +00:00
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libopenblas-dev \
libopencv-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
2023-05-29 21:12:27 +00:00
2023-10-22 15:12:02 +00:00
# Set up OpenCV
RUN ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2
2023-06-27 14:31:02 +00:00
2023-10-19 11:50:40 +00:00
WORKDIR /build
2024-03-17 14:39:20 +00:00
###################################
###################################
2024-04-30 08:12:19 +00:00
# The requirements-extras target is for any builds with IMAGE_TYPE=extras. It should not be placed in this target unless every IMAGE_TYPE=extras build will use it
2024-04-23 16:43:00 +00:00
FROM requirements-core AS requirements-extras
2023-10-19 11:50:40 +00:00
2024-05-10 13:08:08 +00:00
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
2023-10-19 11:50:40 +00:00
ENV PATH = " /root/.cargo/bin: ${ PATH } "
2024-02-16 14:08:50 +00:00
2023-10-19 11:50:40 +00:00
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
2024-04-27 17:48:20 +00:00
RUN apt-get update && \
apt-get install -y --no-install-recommends \
espeak-ng \
2024-05-10 13:08:08 +00:00
espeak \
2024-05-27 20:07:48 +00:00
python3-pip \
python-is-python3 \
2024-05-10 13:08:08 +00:00
python3-dev \
python3-venv && \
2024-04-27 17:48:20 +00:00
apt-get clean && \
2024-05-27 20:07:48 +00:00
rm -rf /var/lib/apt/lists/* && \
pip install --upgrade pip
# Install grpcio-tools (the version in 22.04 is too old)
RUN pip install --user grpcio-tools
2023-06-22 15:53:10 +00:00
2024-04-30 08:12:19 +00:00
###################################
###################################
# The requirements-drivers target is for BUILD_TYPE specific items. If you need to install something specific to CUDA, or specific to ROCM, it goes here.
# This target will be built on top of requirements-core or requirements-extras as retermined by the IMAGE_TYPE build-arg
FROM requirements-${IMAGE_TYPE} AS requirements-drivers
ARG BUILD_TYPE
2024-06-19 15:50:49 +00:00
ARG CUDA_MAJOR_VERSION = 12
2024-07-23 21:35:31 +00:00
ARG CUDA_MINOR_VERSION = 0
2024-04-30 08:12:19 +00:00
ENV BUILD_TYPE = ${ BUILD_TYPE }
2024-06-24 18:04:58 +00:00
# Vulkan requirements
RUN <<EOT bash
if [ " ${ BUILD_TYPE } " = "vulkan" ] ; then
apt-get update && \
apt-get install -y --no-install-recommends \
2024-07-16 01:51:15 +00:00
software-properties-common pciutils wget gpg-agent && \
2024-06-24 18:04:58 +00:00
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
2024-07-16 01:51:15 +00:00
apt-get install -y \
2024-06-24 18:04:58 +00:00
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
2024-04-30 08:12:19 +00:00
# CuBLAS requirements
2024-05-28 08:34:59 +00:00
RUN <<EOT bash
if [ " ${ BUILD_TYPE } " = "cublas" ] ; then
apt-get update && \
apt-get install -y --no-install-recommends \
2024-07-16 01:51:15 +00:00
software-properties-common pciutils
2024-05-28 08:34:59 +00:00
if [ "amd64" = " $TARGETARCH " ] ; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
2024-07-16 01:51:15 +00:00
fi
2024-05-28 08:34:59 +00:00
if [ "arm64" = " $TARGETARCH " ] ; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
2024-04-30 08:12:19 +00:00
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
apt-get update && \
apt-get install -y --no-install-recommends \
cuda-nvcc-${ CUDA_MAJOR_VERSION } -${ CUDA_MINOR_VERSION } \
2024-06-06 06:41:04 +00:00
libcufft-dev-${ CUDA_MAJOR_VERSION } -${ CUDA_MINOR_VERSION } \
2024-04-30 08:12:19 +00:00
libcurand-dev-${ CUDA_MAJOR_VERSION } -${ CUDA_MINOR_VERSION } \
libcublas-dev-${ CUDA_MAJOR_VERSION } -${ CUDA_MINOR_VERSION } \
libcusparse-dev-${ CUDA_MAJOR_VERSION } -${ CUDA_MINOR_VERSION } \
libcusolver-dev-${ CUDA_MAJOR_VERSION } -${ CUDA_MINOR_VERSION } && \
apt-get clean && \
2024-07-16 01:51:15 +00:00
rm -rf /var/lib/apt/lists/*
fi
EOT
2024-04-30 08:12:19 +00:00
# If we are building with clblas support, we need the libraries for the builds
RUN if [ " ${ BUILD_TYPE } " = "clblas" ] ; then \
apt-get update && \
apt-get install -y --no-install-recommends \
libclblast-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
2024-03-07 13:37:45 +00:00
; fi
2024-05-03 16:46:49 +00:00
RUN if [ " ${ BUILD_TYPE } " = "hipblas" ] ; then \
apt-get update && \
apt-get install -y --no-install-recommends \
hipblas-dev \
rocblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
2023-06-26 20:34:03 +00:00
###################################
###################################
2024-05-22 21:35:39 +00:00
# Temporary workaround for Intel's repository to work correctly
# https://community.intel.com/t5/Intel-oneAPI-Math-Kernel-Library/APT-Repository-not-working-signatures-invalid/m-p/1599436/highlight/true#M36143
# This is a temporary workaround until Intel fixes their repository
FROM ${INTEL_BASE_IMAGE} AS intel
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
###################################
###################################
2024-04-30 08:12:19 +00:00
# The grpc target does one thing, it builds and installs GRPC. This is in it's own layer so that it can be effectively cached by CI.
# You probably don't need to change anything here, and if you do, make sure that CI is adjusted so that the cache continues to work.
2024-04-23 16:43:00 +00:00
FROM ${GRPC_BASE_IMAGE} AS grpc
2024-03-29 21:32:40 +00:00
2024-04-27 17:48:20 +00:00
# This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI
ARG GRPC_MAKEFLAGS = "-j4 -Otarget"
2024-07-12 19:54:08 +00:00
ARG GRPC_VERSION = v1.65.0
2024-03-29 21:32:40 +00:00
2024-04-27 17:48:20 +00:00
ENV MAKEFLAGS = ${ GRPC_MAKEFLAGS }
2024-03-29 21:32:40 +00:00
WORKDIR /build
RUN apt-get update && \
2024-04-27 17:48:20 +00:00
apt-get install -y --no-install-recommends \
ca-certificates \
build-essential \
cmake \
git && \
2024-03-29 21:32:40 +00:00
apt-get clean && \
rm -rf /var/lib/apt/lists/*
2024-04-27 17:48:20 +00:00
# We install GRPC to a different prefix here so that we can copy in only the build artifacts later
# saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree
# and running make install in the target container
2024-04-28 19:24:16 +00:00
RUN git clone --recurse-submodules --jobs 4 -b ${ GRPC_VERSION } --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
mkdir -p /build/grpc/cmake/build && \
cd /build/grpc/cmake/build && \
2024-07-11 01:40:54 +00:00
sed -i "216i\ TESTONLY" "../../third_party/abseil-cpp/absl/container/CMakeLists.txt" && \
2024-04-28 19:24:16 +00:00
cmake -DgRPC_INSTALL= ON -DgRPC_BUILD_TESTS= OFF -DCMAKE_INSTALL_PREFIX:PATH= /opt/grpc ../.. && \
2024-04-27 17:48:20 +00:00
make && \
2024-04-28 19:24:16 +00:00
make install && \
rm -rf /build
2024-03-29 21:32:40 +00:00
###################################
###################################
2024-08-14 07:06:41 +00:00
# The builder-base target has the arguments, variables, and copies shared between full builder images and the uncompiled devcontainer
FROM requirements-drivers AS builder-base
2023-05-30 13:53:37 +00:00
2024-05-22 08:15:36 +00:00
ARG GO_TAGS = "stablediffusion tts p2p"
2023-10-19 11:50:40 +00:00
ARG GRPC_BACKENDS
2024-03-17 14:39:20 +00:00
ARG MAKEFLAGS
2024-08-14 07:06:41 +00:00
ARG LD_FLAGS = "-s -w"
2024-03-17 14:39:20 +00:00
2023-10-19 11:50:40 +00:00
ENV GRPC_BACKENDS = ${ GRPC_BACKENDS }
2023-05-30 13:53:37 +00:00
ENV GO_TAGS = ${ GO_TAGS }
2024-03-17 14:39:20 +00:00
ENV MAKEFLAGS = ${ MAKEFLAGS }
2023-05-30 13:53:37 +00:00
ENV NVIDIA_DRIVER_CAPABILITIES = compute,utility
ENV NVIDIA_REQUIRE_CUDA = " cuda>= ${ CUDA_MAJOR_VERSION } .0 "
ENV NVIDIA_VISIBLE_DEVICES = all
2024-08-14 07:06:41 +00:00
ENV LD_FLAGS = ${ LD_FLAGS }
2023-05-30 13:53:37 +00:00
2024-08-14 07:06:41 +00:00
RUN echo " GO_TAGS: $GO_TAGS " && echo " TARGETARCH: $TARGETARCH "
2023-06-26 20:34:03 +00:00
2024-08-14 07:06:41 +00:00
WORKDIR /build
2024-04-13 07:37:32 +00:00
2023-07-02 09:14:09 +00:00
2024-04-27 17:48:20 +00:00
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
# here so that we can generate the grpc code for the stablediffusion build
2024-05-28 08:34:59 +00:00
RUN <<EOT bash
if [ "amd64" = " $TARGETARCH " ] ; then
2024-06-10 08:40:02 +00:00
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
2024-05-28 08:34:59 +00:00
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
if [ "arm64" = " $TARGETARCH " ] ; then
2024-06-10 08:40:02 +00:00
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-aarch_64.zip -o protoc.zip && \
2024-05-28 08:34:59 +00:00
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
EOT
2024-04-27 17:48:20 +00:00
2024-08-14 07:06:41 +00:00
###################################
###################################
# The builder target compiles LocalAI. This target is not the target that will be uploaded to the registry.
# Adjustments to the build process should likely be made here.
FROM builder-base AS builder
COPY . .
COPY .git .
RUN make prepare
2023-10-16 19:46:29 +00:00
# stablediffusion does not tolerate a newer version of abseil, build it first
RUN GRPC_BACKENDS = backend-assets/grpc/stablediffusion make build
2024-04-27 17:48:20 +00:00
# Install the pre-built GRPC
COPY --from= grpc /opt/grpc /usr/local
2023-10-16 19:46:29 +00:00
# Rebuild with defaults backends
2024-04-23 16:43:00 +00:00
WORKDIR /build
2024-07-01 20:50:36 +00:00
## Build the binary
2023-11-11 17:40:26 +00:00
RUN make build
2023-05-30 13:53:37 +00:00
2023-12-07 21:58:41 +00:00
RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ] ; then \
2024-04-27 17:48:20 +00:00
mkdir -p /build/sources/go-piper/piper-phonemize/pi/lib/ \
touch /build/sources/go-piper/piper-phonemize/pi/lib/keep \
2023-11-18 07:18:43 +00:00
; fi
2023-06-26 20:34:03 +00:00
###################################
###################################
2024-08-14 07:06:41 +00:00
# The devcontainer target is not used on CI. It is a target for developers to use locally -
# rather than copying files it mounts them locally and leaves building to the developer
FROM builder-base AS devcontainer
ARG FFMPEG
COPY --from= grpc /opt/grpc /usr/local
# This is somewhat of a dirty hack as this dev machine has issues with stablediffusion... but it should also speed up devcontainers?
# localai/localai:latest-aio-cpu
COPY --from= builder /build/backend-assets/grpc/stablediffusion /build/backend-assets/grpc/stablediffusion
# Add FFmpeg
RUN if [ " ${ FFMPEG } " = "true" ] ; then \
apt-get update && \
apt-get install -y --no-install-recommends \
ffmpeg && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
; fi
RUN go install github.com/go-delve/delve/cmd/dlv@latest
###################################
###################################
2024-04-30 08:12:19 +00:00
# This is the final target. The result of this target will be the image uploaded to the registry.
# If you cannot find a more suitable place for an addition, this layer is a suitable place for it.
FROM requirements-drivers
2023-06-13 06:39:38 +00:00
ARG FFMPEG
2023-08-12 06:56:01 +00:00
ARG BUILD_TYPE
ARG TARGETARCH
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
ARG IMAGE_TYPE = extras
2024-05-10 13:08:08 +00:00
ARG EXTRA_BACKENDS
2024-03-17 14:39:20 +00:00
ARG MAKEFLAGS
2023-06-13 06:39:38 +00:00
2023-08-12 06:56:01 +00:00
ENV BUILD_TYPE = ${ BUILD_TYPE }
2023-07-06 22:29:10 +00:00
ENV REBUILD = false
2023-06-13 06:39:38 +00:00
ENV HEALTHCHECK_ENDPOINT = http://localhost:8080/readyz
2024-03-17 14:39:20 +00:00
ENV MAKEFLAGS = ${ MAKEFLAGS }
2023-06-04 12:00:21 +00:00
2024-06-19 15:50:49 +00:00
ARG CUDA_MAJOR_VERSION = 12
2023-10-19 11:50:40 +00:00
ENV NVIDIA_DRIVER_CAPABILITIES = compute,utility
ENV NVIDIA_REQUIRE_CUDA = " cuda>= ${ CUDA_MAJOR_VERSION } .0 "
ENV NVIDIA_VISIBLE_DEVICES = all
2023-06-04 12:00:21 +00:00
# Add FFmpeg
RUN if [ " ${ FFMPEG } " = "true" ] ; then \
2024-04-27 17:48:20 +00:00
apt-get update && \
apt-get install -y --no-install-recommends \
ffmpeg && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
2023-06-04 12:00:21 +00:00
; fi
2023-06-13 06:39:38 +00:00
WORKDIR /build
2023-05-30 13:53:37 +00:00
2023-06-26 20:34:03 +00:00
# we start fresh & re-copy all assets because `make build` does not clean up nicely after itself
# so when `entrypoint.sh` runs `make build` again (which it does by default), the build would fail
# see https://github.com/go-skynet/LocalAI/pull/658#discussion_r1241971626 and
# https://github.com/go-skynet/LocalAI/pull/434
2023-06-17 21:22:04 +00:00
COPY . .
2023-11-25 07:48:24 +00:00
COPY --from= builder /build/sources ./sources/
2024-04-27 17:48:20 +00:00
COPY --from= grpc /opt/grpc /usr/local
2023-11-25 07:48:24 +00:00
2024-04-27 17:48:20 +00:00
RUN make prepare-sources
2023-10-16 19:46:29 +00:00
# Copy the binary
2023-05-30 13:53:37 +00:00
COPY --from= builder /build/local-ai ./
2023-09-04 17:25:23 +00:00
2023-11-11 17:40:26 +00:00
# Copy shared libraries for piper
2023-12-07 21:58:41 +00:00
COPY --from= builder /build/sources/go-piper/piper-phonemize/pi/lib/* /usr/lib/
2023-11-11 17:40:26 +00:00
2023-10-19 11:50:40 +00:00
# do not let stablediffusion rebuild (requires an older version of absl)
COPY --from= builder /build/backend-assets/grpc/stablediffusion ./backend-assets/grpc/stablediffusion
2023-10-16 19:46:29 +00:00
2024-05-10 13:08:08 +00:00
# Change the shell to bash so we can use [[ tests below
SHELL [ "/bin/bash" , "-c" ]
# We try to strike a balance between individual layer size (as that affects total push time) and total image size
# Splitting the backends into more groups with fewer items results in a larger image, but a smaller size for the largest layer
# Splitting the backends into fewer groups with more items results in a smaller image, but a larger size for the largest layer
RUN if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "coqui" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/coqui \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "parler-tts" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/parler-tts \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "diffusers" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/diffusers \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "transformers-musicgen" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/transformers-musicgen \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "exllama1" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/exllama \
2023-11-20 20:21:17 +00:00
; fi
2024-05-10 13:08:08 +00:00
RUN if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "vall-e-x" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/vall-e-x \
; fi && \
2024-05-19 14:27:08 +00:00
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "openvoice" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/openvoice \
; fi && \
2024-05-10 13:08:08 +00:00
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "petals" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/petals \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "sentencetransformers" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/sentencetransformers \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "exllama2" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/exllama2 \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "transformers" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/transformers \
2024-04-13 16:59:21 +00:00
; fi
2024-05-10 13:08:08 +00:00
RUN if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "vllm" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/vllm \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "autogptq" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/autogptq \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "bark" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/bark \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "rerankers" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/rerankers \
; fi && \
if [ [ ( " ${ EXTRA_BACKENDS } " = ~ "mamba" || -z " ${ EXTRA_BACKENDS } " ) && " $IMAGE_TYPE " = = "extras" ] ] ; then \
make -C backend/python/mamba \
2023-12-24 18:38:54 +00:00
; fi
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
2024-01-09 07:55:43 +00:00
# Make sure the models directory exists
RUN mkdir -p /build/models
2023-05-26 16:34:02 +00:00
# Define the health check command
2023-05-30 10:00:30 +00:00
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \
2024-04-23 16:43:00 +00:00
CMD curl -f ${ HEALTHCHECK_ENDPOINT } || exit 1
2024-05-26 07:56:06 +00:00
2024-03-25 02:01:30 +00:00
VOLUME /build/models
2023-04-27 16:45:24 +00:00
EXPOSE 8080
2023-06-26 20:34:03 +00:00
ENTRYPOINT [ "/build/entrypoint.sh" ]