chore(cli): be consistent between workers and expose ExtraLLamaCPPArgs to both (#3428)

* chore(cli): be consistent between workers and expose ExtraLLamaCPPArgs to both

Fixes: https://github.com/mudler/LocalAI/issues/3427

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* bump grpcio

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
Ettore Di Giacinto 2024-08-30 00:10:17 +02:00 committed by GitHub
parent ae6d327698
commit 11d960b2a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 22 additions and 23 deletions

View File

@ -1,6 +1,6 @@
accelerate
auto-gptq==0.7.1
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi
transformers

View File

@ -1,4 +1,4 @@
bark==0.1.5
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi

View File

@ -1,2 +1,2 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf

View File

@ -1,4 +1,4 @@
TTS==0.22.0
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi

View File

@ -1,5 +1,5 @@
setuptools
grpcio==1.66.0
grpcio==1.66.1
pillow
protobuf
certifi

View File

@ -1,4 +1,4 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi
setuptools

View File

@ -1,4 +1,4 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi
wheel

View File

@ -1,3 +1,3 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi

View File

@ -2,7 +2,7 @@
intel-extension-for-pytorch
torch
optimum[openvino]
grpcio==1.66.0
grpcio==1.66.1
protobuf
librosa==0.9.1
faster-whisper==1.0.3

View File

@ -1,4 +1,4 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
librosa
faster-whisper

View File

@ -1,4 +1,4 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi
llvmlite==0.43.0

View File

@ -1,3 +1,3 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi

View File

@ -1,3 +1,3 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi

View File

@ -1,4 +1,4 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
scipy==1.14.0
certifi

View File

@ -1,4 +1,4 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi
setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406

View File

@ -1,3 +1,3 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi

View File

@ -1,4 +1,4 @@
grpcio==1.66.0
grpcio==1.66.1
protobuf
certifi
setuptools

View File

@ -2,6 +2,7 @@ package worker
type WorkerFlags struct {
BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
ExtraLLamaCPPArgs string `name:"llama-cpp-args" env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"`
}
type Worker struct {

View File

@ -3,6 +3,7 @@ package worker
import (
"fmt"
"os"
"strings"
"syscall"
cliContext "github.com/mudler/LocalAI/core/cli/context"
@ -12,7 +13,6 @@ import (
)
type LLamaCPP struct {
Args []string `arg:"" optional:"" name:"models" help:"Model configuration URLs to load"`
WorkerFlags `embed:""`
}
@ -34,9 +34,8 @@ func (r *LLamaCPP) Run(ctx *cliContext.Context) error {
"llama-cpp-rpc-server",
)
args := os.Args[4:]
args := strings.Split(r.ExtraLLamaCPPArgs, " ")
args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess)
args = append([]string{grpcProcess}, args...)
return syscall.Exec(
grpcProcess,

View File

@ -25,7 +25,6 @@ type P2P struct {
NoRunner bool `env:"LOCALAI_NO_RUNNER,NO_RUNNER" help:"Do not start the llama-cpp-rpc-server"`
RunnerAddress string `env:"LOCALAI_RUNNER_ADDRESS,RUNNER_ADDRESS" help:"Address of the llama-cpp-rpc-server"`
RunnerPort string `env:"LOCALAI_RUNNER_PORT,RUNNER_PORT" help:"Port of the llama-cpp-rpc-server"`
ExtraLLamaCPPArgs string `name:"llama-cpp-args" env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"`
Peer2PeerNetworkID string `env:"LOCALAI_P2P_NETWORK_ID,P2P_NETWORK_ID" help:"Network ID for P2P mode, can be set arbitrarly by the user for grouping a set of instances" group:"p2p"`
}

View File

@ -68,7 +68,7 @@ And navigate the WebUI to the "Swarm" section to see the instructions to connect
To start workers for distributing the computational load, run:
```bash
local-ai worker llama-cpp-rpc <listening_address> <listening_port>
local-ai worker llama-cpp-rpc --llama-cpp-args="-H <listening_address> -p <listening_port> -m <memory>"
```
And you can specify the address of the workers when starting LocalAI with the `LLAMACPP_GRPC_SERVERS` environment variable:
@ -98,7 +98,7 @@ To reuse the same token later, restart the server with `--p2ptoken` or `P2P_TOKE
2. Start the workers. Copy the `local-ai` binary to other hosts and run as many workers as needed using the token:
```bash
TOKEN=XXX ./local-ai worker p2p-llama-cpp-rpc
TOKEN=XXX ./local-ai worker p2p-llama-cpp-rpc --llama-cpp-args="-m <memory>"
# 1:06AM INF loading environment variables from file envFile=.env
# 1:06AM INF Setting logging to info
# {"level":"INFO","time":"2024-05-19T01:06:01.794+0200","caller":"config/config.go:288","message":"connmanager disabled\n"}