mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-11 04:52:52 +00:00
fix(p2p): avoid starting the node twice (#3349)
* fix(p2p): avoid starting the node twice Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(p2p): keep exposing service if we don't start the llama.cpp runner Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
70e53bc191
commit
af095204fa
@ -120,9 +120,15 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
nodeContext := context.Background()
|
||||||
|
|
||||||
|
err = node.Start(nodeContext)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("starting new node: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
log.Info().Msg("Starting P2P server discovery...")
|
log.Info().Msg("Starting P2P server discovery...")
|
||||||
if err := p2p.ServiceDiscoverer(context.Background(), node, token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID), func(serviceID string, node p2p.NodeData) {
|
if err := p2p.ServiceDiscoverer(nodeContext, node, token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID), func(serviceID string, node p2p.NodeData) {
|
||||||
var tunnelAddresses []string
|
var tunnelAddresses []string
|
||||||
for _, v := range p2p.GetAvailableNodes(p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID)) {
|
for _, v := range p2p.GetAvailableNodes(p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID)) {
|
||||||
if v.IsOnline() {
|
if v.IsOnline() {
|
||||||
@ -146,6 +152,7 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fedCtx := context.Background()
|
fedCtx := context.Background()
|
||||||
|
|
||||||
node, err := p2p.ExposeService(fedCtx, "localhost", port, token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.FederatedID))
|
node, err := p2p.ExposeService(fedCtx, "localhost", port, token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.FederatedID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -65,44 +65,42 @@ func (r *P2P) Run(ctx *cliContext.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Info().Msgf("You need to start llama-cpp-rpc-server on '%s:%s'", address, p)
|
log.Info().Msgf("You need to start llama-cpp-rpc-server on '%s:%s'", address, p)
|
||||||
|
} else {
|
||||||
|
// Start llama.cpp directly from the version we have pre-packaged
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
log.Info().Msgf("Starting llama-cpp-rpc-server on '%s:%d'", address, port)
|
||||||
|
|
||||||
return nil
|
grpcProcess := assets.ResolvePath(
|
||||||
}
|
r.BackendAssetsPath,
|
||||||
|
"util",
|
||||||
|
"llama-cpp-rpc-server",
|
||||||
|
)
|
||||||
|
|
||||||
// Start llama.cpp directly from the version we have pre-packaged
|
args := append([]string{"--host", address, "--port", fmt.Sprint(port)}, r.ExtraLLamaCPPArgs...)
|
||||||
go func() {
|
args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess)
|
||||||
for {
|
|
||||||
log.Info().Msgf("Starting llama-cpp-rpc-server on '%s:%d'", address, port)
|
|
||||||
|
|
||||||
grpcProcess := assets.ResolvePath(
|
cmd := exec.Command(
|
||||||
r.BackendAssetsPath,
|
grpcProcess, args...,
|
||||||
"util",
|
)
|
||||||
"llama-cpp-rpc-server",
|
|
||||||
)
|
|
||||||
|
|
||||||
args := append([]string{"--host", address, "--port", fmt.Sprint(port)}, r.ExtraLLamaCPPArgs...)
|
cmd.Env = os.Environ()
|
||||||
args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess)
|
|
||||||
|
|
||||||
cmd := exec.Command(
|
cmd.Stderr = os.Stdout
|
||||||
grpcProcess, args...,
|
cmd.Stdout = os.Stdout
|
||||||
)
|
|
||||||
|
|
||||||
cmd.Env = os.Environ()
|
if err := cmd.Start(); err != nil {
|
||||||
|
log.Error().Any("grpcProcess", grpcProcess).Any("args", args).Err(err).Msg("Failed to start llama-cpp-rpc-server")
|
||||||
|
}
|
||||||
|
|
||||||
cmd.Stderr = os.Stdout
|
cmd.Wait()
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
log.Error().Any("grpcProcess", grpcProcess).Any("args", args).Err(err).Msg("Failed to start llama-cpp-rpc-server")
|
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
cmd.Wait()
|
_, err = p2p.ExposeService(context.Background(), address, fmt.Sprint(port), r.Token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
|
|
||||||
_, err = p2p.ExposeService(context.Background(), address, fmt.Sprint(port), r.Token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
@ -202,13 +202,9 @@ func ServiceDiscoverer(ctx context.Context, n *node.Node, token, servicesID stri
|
|||||||
func discoveryTunnels(ctx context.Context, n *node.Node, token, servicesID string, allocate bool) (chan NodeData, error) {
|
func discoveryTunnels(ctx context.Context, n *node.Node, token, servicesID string, allocate bool) (chan NodeData, error) {
|
||||||
tunnels := make(chan NodeData)
|
tunnels := make(chan NodeData)
|
||||||
|
|
||||||
err := n.Start(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("creating a new node: %w", err)
|
|
||||||
}
|
|
||||||
ledger, err := n.Ledger()
|
ledger, err := n.Ledger()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("creating a new node: %w", err)
|
return nil, fmt.Errorf("getting the ledger: %w", err)
|
||||||
}
|
}
|
||||||
// get new services, allocate and return to the channel
|
// get new services, allocate and return to the channel
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user