mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-18 20:27:57 +00:00
chore(cli): be consistent between workers and expose ExtraLLamaCPPArgs to both (#3428)
* chore(cli): be consistent between workers and expose ExtraLLamaCPPArgs to both Fixes: https://github.com/mudler/LocalAI/issues/3427 Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * bump grpcio Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
ae6d327698
commit
11d960b2a6
@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
auto-gptq==0.7.1
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
@ -1,4 +1,4 @@
|
||||
bark==0.1.5
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
@ -1,2 +1,2 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
@ -1,4 +1,4 @@
|
||||
TTS==0.22.0
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
@ -1,5 +1,5 @@
|
||||
setuptools
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
pillow
|
||||
protobuf
|
||||
certifi
|
||||
|
@ -1,4 +1,4 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
||||
setuptools
|
@ -1,4 +1,4 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
||||
wheel
|
||||
|
@ -1,3 +1,3 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
@ -2,7 +2,7 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
librosa==0.9.1
|
||||
faster-whisper==1.0.3
|
||||
|
@ -1,4 +1,4 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
librosa
|
||||
faster-whisper
|
||||
|
@ -1,4 +1,4 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
||||
llvmlite==0.43.0
|
@ -1,3 +1,3 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
@ -1,3 +1,3 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
@ -1,4 +1,4 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
scipy==1.14.0
|
||||
certifi
|
@ -1,4 +1,4 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
||||
setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406
|
@ -1,3 +1,3 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
@ -1,4 +1,4 @@
|
||||
grpcio==1.66.0
|
||||
grpcio==1.66.1
|
||||
protobuf
|
||||
certifi
|
||||
setuptools
|
@ -2,6 +2,7 @@ package worker
|
||||
|
||||
type WorkerFlags struct {
|
||||
BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
|
||||
ExtraLLamaCPPArgs string `name:"llama-cpp-args" env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"`
|
||||
}
|
||||
|
||||
type Worker struct {
|
||||
|
@ -3,6 +3,7 @@ package worker
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
@ -12,7 +13,6 @@ import (
|
||||
)
|
||||
|
||||
type LLamaCPP struct {
|
||||
Args []string `arg:"" optional:"" name:"models" help:"Model configuration URLs to load"`
|
||||
WorkerFlags `embed:""`
|
||||
}
|
||||
|
||||
@ -34,9 +34,8 @@ func (r *LLamaCPP) Run(ctx *cliContext.Context) error {
|
||||
"llama-cpp-rpc-server",
|
||||
)
|
||||
|
||||
args := os.Args[4:]
|
||||
args := strings.Split(r.ExtraLLamaCPPArgs, " ")
|
||||
args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess)
|
||||
|
||||
args = append([]string{grpcProcess}, args...)
|
||||
return syscall.Exec(
|
||||
grpcProcess,
|
||||
|
@ -25,7 +25,6 @@ type P2P struct {
|
||||
NoRunner bool `env:"LOCALAI_NO_RUNNER,NO_RUNNER" help:"Do not start the llama-cpp-rpc-server"`
|
||||
RunnerAddress string `env:"LOCALAI_RUNNER_ADDRESS,RUNNER_ADDRESS" help:"Address of the llama-cpp-rpc-server"`
|
||||
RunnerPort string `env:"LOCALAI_RUNNER_PORT,RUNNER_PORT" help:"Port of the llama-cpp-rpc-server"`
|
||||
ExtraLLamaCPPArgs string `name:"llama-cpp-args" env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"`
|
||||
Peer2PeerNetworkID string `env:"LOCALAI_P2P_NETWORK_ID,P2P_NETWORK_ID" help:"Network ID for P2P mode, can be set arbitrarly by the user for grouping a set of instances" group:"p2p"`
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ And navigate the WebUI to the "Swarm" section to see the instructions to connect
|
||||
To start workers for distributing the computational load, run:
|
||||
|
||||
```bash
|
||||
local-ai worker llama-cpp-rpc <listening_address> <listening_port>
|
||||
local-ai worker llama-cpp-rpc --llama-cpp-args="-H <listening_address> -p <listening_port> -m <memory>"
|
||||
```
|
||||
|
||||
And you can specify the address of the workers when starting LocalAI with the `LLAMACPP_GRPC_SERVERS` environment variable:
|
||||
@ -98,7 +98,7 @@ To reuse the same token later, restart the server with `--p2ptoken` or `P2P_TOKE
|
||||
2. Start the workers. Copy the `local-ai` binary to other hosts and run as many workers as needed using the token:
|
||||
|
||||
```bash
|
||||
TOKEN=XXX ./local-ai worker p2p-llama-cpp-rpc
|
||||
TOKEN=XXX ./local-ai worker p2p-llama-cpp-rpc --llama-cpp-args="-m <memory>"
|
||||
# 1:06AM INF loading environment variables from file envFile=.env
|
||||
# 1:06AM INF Setting logging to info
|
||||
# {"level":"INFO","time":"2024-05-19T01:06:01.794+0200","caller":"config/config.go:288","message":"connmanager disabled\n"}
|
||||
|
Loading…
Reference in New Issue
Block a user