2023-05-23 15:12:48 +00:00
|
|
|
name: Build and Release
|
|
|
|
|
2024-05-14 17:40:18 +00:00
|
|
|
on:
|
2024-04-13 11:30:40 +00:00
|
|
|
- push
|
|
|
|
- pull_request
|
2023-05-23 15:12:48 +00:00
|
|
|
|
2024-03-29 21:32:40 +00:00
|
|
|
env:
|
2024-05-21 12:33:47 +00:00
|
|
|
GRPC_VERSION: v1.64.0
|
2024-03-29 21:32:40 +00:00
|
|
|
|
2023-05-23 19:47:47 +00:00
|
|
|
permissions:
|
|
|
|
contents: write
|
|
|
|
|
2023-12-26 18:19:37 +00:00
|
|
|
concurrency:
|
|
|
|
group: ci-releases-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
|
|
|
cancel-in-progress: true
|
|
|
|
|
2023-05-23 15:12:48 +00:00
|
|
|
jobs:
|
|
|
|
build-linux:
|
|
|
|
runs-on: ubuntu-latest
|
|
|
|
steps:
|
|
|
|
- name: Clone
|
2023-10-21 06:55:44 +00:00
|
|
|
uses: actions/checkout@v4
|
2023-05-29 21:11:29 +00:00
|
|
|
with:
|
2023-05-23 15:12:48 +00:00
|
|
|
submodules: true
|
2024-04-11 13:10:32 +00:00
|
|
|
- uses: actions/setup-go@v5
|
2023-08-23 23:18:58 +00:00
|
|
|
with:
|
2024-03-29 21:32:40 +00:00
|
|
|
go-version: '1.21.x'
|
|
|
|
cache: false
|
2023-05-23 15:12:48 +00:00
|
|
|
- name: Dependencies
|
|
|
|
run: |
|
|
|
|
sudo apt-get update
|
2024-05-14 23:17:02 +00:00
|
|
|
sudo apt-get install build-essential ffmpeg protobuf-compiler ccache
|
2024-02-13 08:35:39 +00:00
|
|
|
- name: Install CUDA Dependencies
|
|
|
|
run: |
|
|
|
|
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
|
|
|
|
sudo dpkg -i cuda-keyring_1.1-1_all.deb
|
|
|
|
sudo apt-get update
|
|
|
|
sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION}
|
2024-05-14 17:40:18 +00:00
|
|
|
env:
|
|
|
|
CUDA_VERSION: 12-3
|
2024-01-05 22:16:33 +00:00
|
|
|
- name: Cache grpc
|
|
|
|
id: cache-grpc
|
2024-04-11 14:35:27 +00:00
|
|
|
uses: actions/cache@v4
|
2024-01-05 22:16:33 +00:00
|
|
|
with:
|
|
|
|
path: grpc
|
2024-03-29 21:32:40 +00:00
|
|
|
key: ${{ runner.os }}-grpc-${{ env.GRPC_VERSION }}
|
2024-01-05 22:16:33 +00:00
|
|
|
- name: Build grpc
|
|
|
|
if: steps.cache-grpc.outputs.cache-hit != 'true'
|
|
|
|
run: |
|
2024-03-29 21:32:40 +00:00
|
|
|
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
|
2024-01-05 22:16:33 +00:00
|
|
|
cd grpc && mkdir -p cmake/build && cd cmake/build && cmake -DgRPC_INSTALL=ON \
|
|
|
|
-DgRPC_BUILD_TESTS=OFF \
|
2024-03-29 21:32:40 +00:00
|
|
|
../.. && sudo make --jobs 5 --output-sync=target
|
2024-01-05 22:16:33 +00:00
|
|
|
- name: Install gRPC
|
|
|
|
run: |
|
2024-03-29 21:32:40 +00:00
|
|
|
cd grpc && cd cmake/build && sudo make --jobs 5 --output-sync=target install
|
2023-05-23 15:12:48 +00:00
|
|
|
- name: Build
|
|
|
|
id: build
|
|
|
|
run: |
|
2024-04-13 11:30:40 +00:00
|
|
|
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
|
|
|
|
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
|
|
|
export PATH=$PATH:$GOPATH/bin
|
2024-05-14 17:40:18 +00:00
|
|
|
export PATH=/usr/local/cuda/bin:$PATH
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
GO_TAGS=p2p make dist
|
2024-04-11 14:44:02 +00:00
|
|
|
- uses: actions/upload-artifact@v4
|
2023-05-23 15:12:48 +00:00
|
|
|
with:
|
2024-05-14 17:40:18 +00:00
|
|
|
name: LocalAI-linux
|
2023-05-23 15:12:48 +00:00
|
|
|
path: release/
|
|
|
|
- name: Release
|
2024-04-15 20:52:39 +00:00
|
|
|
uses: softprops/action-gh-release@v2
|
2023-05-23 15:12:48 +00:00
|
|
|
if: startsWith(github.ref, 'refs/tags/')
|
|
|
|
with:
|
|
|
|
files: |
|
|
|
|
release/*
|
|
|
|
|
2024-02-25 23:06:18 +00:00
|
|
|
build-stablediffusion:
|
|
|
|
runs-on: ubuntu-latest
|
|
|
|
steps:
|
|
|
|
- name: Clone
|
|
|
|
uses: actions/checkout@v4
|
|
|
|
with:
|
|
|
|
submodules: true
|
2024-04-11 13:10:32 +00:00
|
|
|
- uses: actions/setup-go@v5
|
2024-02-25 23:06:18 +00:00
|
|
|
with:
|
2024-03-29 21:32:40 +00:00
|
|
|
go-version: '1.21.x'
|
|
|
|
cache: false
|
2024-02-25 23:06:18 +00:00
|
|
|
- name: Dependencies
|
|
|
|
run: |
|
2024-05-31 16:08:39 +00:00
|
|
|
sudo apt-get update
|
2024-05-14 23:17:02 +00:00
|
|
|
sudo apt-get install -y --no-install-recommends libopencv-dev protobuf-compiler ccache
|
2024-04-13 11:30:40 +00:00
|
|
|
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
|
|
|
|
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
2024-02-25 23:06:18 +00:00
|
|
|
- name: Build stablediffusion
|
|
|
|
run: |
|
2024-04-13 11:30:40 +00:00
|
|
|
export PATH=$PATH:$GOPATH/bin
|
2024-02-25 23:06:18 +00:00
|
|
|
make backend-assets/grpc/stablediffusion
|
|
|
|
mkdir -p release && cp backend-assets/grpc/stablediffusion release
|
2024-05-23 06:34:37 +00:00
|
|
|
env:
|
|
|
|
GO_TAGS: stablediffusion
|
2024-04-11 14:44:02 +00:00
|
|
|
- uses: actions/upload-artifact@v4
|
2024-02-25 23:06:18 +00:00
|
|
|
with:
|
|
|
|
name: stablediffusion
|
|
|
|
path: release/
|
2024-05-25 07:33:50 +00:00
|
|
|
- name: Release
|
|
|
|
uses: softprops/action-gh-release@v2
|
|
|
|
if: startsWith(github.ref, 'refs/tags/')
|
|
|
|
with:
|
|
|
|
files: |
|
|
|
|
release/*
|
2024-02-25 23:06:18 +00:00
|
|
|
|
2024-04-11 17:22:30 +00:00
|
|
|
build-macOS-arm64:
|
|
|
|
runs-on: macos-14
|
|
|
|
steps:
|
|
|
|
- name: Clone
|
|
|
|
uses: actions/checkout@v4
|
|
|
|
with:
|
|
|
|
submodules: true
|
|
|
|
- uses: actions/setup-go@v5
|
|
|
|
with:
|
|
|
|
go-version: '1.21.x'
|
|
|
|
cache: false
|
|
|
|
- name: Dependencies
|
|
|
|
run: |
|
|
|
|
brew install protobuf grpc
|
2024-04-13 11:30:40 +00:00
|
|
|
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
|
|
|
|
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
2024-04-11 17:22:30 +00:00
|
|
|
- name: Build
|
|
|
|
id: build
|
|
|
|
run: |
|
|
|
|
export C_INCLUDE_PATH=/usr/local/include
|
|
|
|
export CPLUS_INCLUDE_PATH=/usr/local/include
|
2024-04-13 11:30:40 +00:00
|
|
|
export PATH=$PATH:$GOPATH/bin
|
feat(llama.cpp): Totally decentralized, private, distributed, p2p inference (#2343)
* feat(llama.cpp): Enable decentralized, distributed inference
As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to
@rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* go mod tidy
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: add p2p tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* better message
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-20 17:17:59 +00:00
|
|
|
GO_TAGS=p2p make dist
|
2024-04-11 17:22:30 +00:00
|
|
|
- uses: actions/upload-artifact@v4
|
|
|
|
with:
|
2024-05-05 15:20:51 +00:00
|
|
|
name: LocalAI-MacOS-arm64
|
2024-04-11 17:22:30 +00:00
|
|
|
path: release/
|
|
|
|
- name: Release
|
2024-04-15 20:52:39 +00:00
|
|
|
uses: softprops/action-gh-release@v2
|
2024-04-11 17:22:30 +00:00
|
|
|
if: startsWith(github.ref, 'refs/tags/')
|
|
|
|
with:
|
|
|
|
files: |
|
|
|
|
release/*
|