mirror of
https://github.com/mudler/LocalAI.git
synced 2024-12-19 04:37:53 +00:00
feat(federation): do not allocate local services for load balancing (#3337)
* refactor: extract proxy into functions * feat(federation): do not allocate services, directly connect with libp2p Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
bcd3c1deb2
commit
6d350ccce0
@ -40,9 +40,9 @@ func (fs *FederatedServer) RandomServer() string {
|
|||||||
var tunnelAddresses []string
|
var tunnelAddresses []string
|
||||||
for _, v := range GetAvailableNodes(fs.service) {
|
for _, v := range GetAvailableNodes(fs.service) {
|
||||||
if v.IsOnline() {
|
if v.IsOnline() {
|
||||||
tunnelAddresses = append(tunnelAddresses, v.TunnelAddress)
|
tunnelAddresses = append(tunnelAddresses, v.ID)
|
||||||
} else {
|
} else {
|
||||||
delete(fs.requestTable, v.TunnelAddress) // make sure it's not tracked
|
delete(fs.requestTable, v.ID) // make sure it's not tracked
|
||||||
log.Info().Msgf("Node %s is offline", v.ID)
|
log.Info().Msgf("Node %s is offline", v.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -61,8 +61,8 @@ func (fs *FederatedServer) syncTableStatus() {
|
|||||||
|
|
||||||
for _, v := range GetAvailableNodes(fs.service) {
|
for _, v := range GetAvailableNodes(fs.service) {
|
||||||
if v.IsOnline() {
|
if v.IsOnline() {
|
||||||
fs.ensureRecordExist(v.TunnelAddress)
|
fs.ensureRecordExist(v.ID)
|
||||||
currentTunnels[v.TunnelAddress] = struct{}{}
|
currentTunnels[v.ID] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,16 +8,12 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/mudler/edgevpn/pkg/node"
|
"github.com/mudler/edgevpn/pkg/node"
|
||||||
"github.com/mudler/edgevpn/pkg/protocol"
|
|
||||||
"github.com/mudler/edgevpn/pkg/types"
|
|
||||||
"github.com/rs/zerolog/log"
|
"github.com/rs/zerolog/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *FederatedServer) Start(ctx context.Context) error {
|
func (f *FederatedServer) Start(ctx context.Context) error {
|
||||||
|
|
||||||
n, err := NewNode(f.p2ptoken)
|
n, err := NewNode(f.p2ptoken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("creating a new node: %w", err)
|
return fmt.Errorf("creating a new node: %w", err)
|
||||||
@ -29,7 +25,7 @@ func (f *FederatedServer) Start(ctx context.Context) error {
|
|||||||
|
|
||||||
if err := ServiceDiscoverer(ctx, n, f.p2ptoken, f.service, func(servicesID string, tunnel NodeData) {
|
if err := ServiceDiscoverer(ctx, n, f.p2ptoken, f.service, func(servicesID string, tunnel NodeData) {
|
||||||
log.Debug().Msgf("Discovered node: %s", tunnel.ID)
|
log.Debug().Msgf("Discovered node: %s", tunnel.ID)
|
||||||
}, true); err != nil {
|
}, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,21 +46,8 @@ func (fs *FederatedServer) proxy(ctx context.Context, node *node.Node) error {
|
|||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
l.Close()
|
l.Close()
|
||||||
}()
|
}()
|
||||||
ledger, _ := node.Ledger()
|
|
||||||
|
|
||||||
// Announce ourselves so nodes accepts our connection
|
nodeAnnounce(ctx, node)
|
||||||
ledger.Announce(
|
|
||||||
ctx,
|
|
||||||
10*time.Second,
|
|
||||||
func() {
|
|
||||||
updatedMap := map[string]interface{}{}
|
|
||||||
updatedMap[node.Host().ID().String()] = &types.User{
|
|
||||||
PeerID: node.Host().ID().String(),
|
|
||||||
Timestamp: time.Now().String(),
|
|
||||||
}
|
|
||||||
ledger.Add(protocol.UsersLedgerKey, updatedMap)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
for {
|
for {
|
||||||
@ -82,52 +65,36 @@ func (fs *FederatedServer) proxy(ctx context.Context, node *node.Node) error {
|
|||||||
|
|
||||||
// Handle connections in a new goroutine, forwarding to the p2p service
|
// Handle connections in a new goroutine, forwarding to the p2p service
|
||||||
go func() {
|
go func() {
|
||||||
tunnelAddr := ""
|
workerID := ""
|
||||||
|
|
||||||
if fs.workerTarget != "" {
|
if fs.workerTarget != "" {
|
||||||
for _, v := range GetAvailableNodes(fs.service) {
|
workerID = fs.workerTarget
|
||||||
if v.ID == fs.workerTarget {
|
|
||||||
tunnelAddr = v.TunnelAddress
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if fs.loadBalanced {
|
} else if fs.loadBalanced {
|
||||||
log.Debug().Msgf("Load balancing request")
|
log.Debug().Msgf("Load balancing request")
|
||||||
|
|
||||||
tunnelAddr = fs.SelectLeastUsedServer()
|
workerID = fs.SelectLeastUsedServer()
|
||||||
if tunnelAddr == "" {
|
if workerID == "" {
|
||||||
log.Debug().Msgf("Least used server not found, selecting random")
|
log.Debug().Msgf("Least used server not found, selecting random")
|
||||||
tunnelAddr = fs.RandomServer()
|
workerID = fs.RandomServer()
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
tunnelAddr = fs.RandomServer()
|
workerID = fs.RandomServer()
|
||||||
}
|
}
|
||||||
|
|
||||||
if tunnelAddr == "" {
|
if workerID == "" {
|
||||||
log.Error().Msg("No available nodes yet")
|
log.Error().Msg("No available nodes yet")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug().Msgf("Selected tunnel %s", tunnelAddr)
|
log.Debug().Msgf("Selected node %s", workerID)
|
||||||
|
nodeData, exists := GetNode(fs.service, workerID)
|
||||||
tunnelConn, err := net.Dial("tcp", tunnelAddr)
|
if !exists {
|
||||||
if err != nil {
|
log.Error().Msgf("Node %s not found", workerID)
|
||||||
log.Error().Err(err).Msg("Error connecting to tunnel")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info().Msgf("Redirecting %s to %s", conn.LocalAddr().String(), tunnelConn.RemoteAddr().String())
|
proxyP2PConnection(ctx, node, nodeData.ServiceID, conn)
|
||||||
closer := make(chan struct{}, 2)
|
|
||||||
go copyStream(closer, tunnelConn, conn)
|
|
||||||
go copyStream(closer, conn, tunnelConn)
|
|
||||||
<-closer
|
|
||||||
|
|
||||||
tunnelConn.Close()
|
|
||||||
conn.Close()
|
|
||||||
|
|
||||||
if fs.loadBalanced {
|
if fs.loadBalanced {
|
||||||
fs.RecordRequest(tunnelAddr)
|
fs.RecordRequest(workerID)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ type NodeData struct {
|
|||||||
Name string
|
Name string
|
||||||
ID string
|
ID string
|
||||||
TunnelAddress string
|
TunnelAddress string
|
||||||
|
ServiceID string
|
||||||
LastSeen time.Time
|
LastSeen time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,6 +40,19 @@ func GetAvailableNodes(serviceID string) []NodeData {
|
|||||||
return availableNodes
|
return availableNodes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetNode(serviceID, nodeID string) (NodeData, bool) {
|
||||||
|
if serviceID == "" {
|
||||||
|
serviceID = defaultServicesID
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
if _, ok := nodes[serviceID]; !ok {
|
||||||
|
return NodeData{}, false
|
||||||
|
}
|
||||||
|
nd, exists := nodes[serviceID][nodeID]
|
||||||
|
return nd, exists
|
||||||
|
}
|
||||||
|
|
||||||
func AddNode(serviceID string, node NodeData) {
|
func AddNode(serviceID string, node NodeData) {
|
||||||
if serviceID == "" {
|
if serviceID == "" {
|
||||||
serviceID = defaultServicesID
|
serviceID = defaultServicesID
|
||||||
|
@ -66,22 +66,7 @@ func nodeID(s string) string {
|
|||||||
return fmt.Sprintf("%s-%s", hostname, s)
|
return fmt.Sprintf("%s-%s", hostname, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func allocateLocalService(ctx context.Context, node *node.Node, listenAddr, service string) error {
|
func nodeAnnounce(ctx context.Context, node *node.Node) {
|
||||||
|
|
||||||
zlog.Info().Msgf("Allocating service '%s' on: %s", service, listenAddr)
|
|
||||||
// Open local port for listening
|
|
||||||
l, err := net.Listen("tcp", listenAddr)
|
|
||||||
if err != nil {
|
|
||||||
zlog.Error().Err(err).Msg("Error listening")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
l.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// ll.Info("Binding local port on", srcaddr)
|
|
||||||
|
|
||||||
ledger, _ := node.Ledger()
|
ledger, _ := node.Ledger()
|
||||||
|
|
||||||
// Announce ourselves so nodes accepts our connection
|
// Announce ourselves so nodes accepts our connection
|
||||||
@ -97,25 +82,12 @@ func allocateLocalService(ctx context.Context, node *node.Node, listenAddr, serv
|
|||||||
ledger.Add(protocol.UsersLedgerKey, updatedMap)
|
ledger.Add(protocol.UsersLedgerKey, updatedMap)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
defer l.Close()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return errors.New("context canceled")
|
|
||||||
default:
|
|
||||||
zlog.Debug().Msg("New for connection")
|
|
||||||
// Listen for an incoming connection.
|
|
||||||
conn, err := l.Accept()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error accepting: ", err.Error())
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle connections in a new goroutine, forwarding to the p2p service
|
func proxyP2PConnection(ctx context.Context, node *node.Node, serviceID string, conn net.Conn) {
|
||||||
go func() {
|
ledger, _ := node.Ledger()
|
||||||
// Retrieve current ID for ip in the blockchain
|
// Retrieve current ID for ip in the blockchain
|
||||||
existingValue, found := ledger.GetKey(protocol.ServicesLedgerKey, service)
|
existingValue, found := ledger.GetKey(protocol.ServicesLedgerKey, serviceID)
|
||||||
service := &types.Service{}
|
service := &types.Service{}
|
||||||
existingValue.Unmarshal(service)
|
existingValue.Unmarshal(service)
|
||||||
// If mismatch, update the blockchain
|
// If mismatch, update the blockchain
|
||||||
@ -139,7 +111,7 @@ func allocateLocalService(ctx context.Context, node *node.Node, listenAddr, serv
|
|||||||
// Open a stream
|
// Open a stream
|
||||||
stream, err := node.Host().NewStream(ctx, d, protocol.ServiceProtocol.ID())
|
stream, err := node.Host().NewStream(ctx, d, protocol.ServiceProtocol.ID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zlog.Error().Msg("cannot open stream peer")
|
zlog.Error().Err(err).Msg("cannot open stream peer")
|
||||||
|
|
||||||
conn.Close()
|
conn.Close()
|
||||||
// ll.Debugf("could not open stream '%s'", err.Error())
|
// ll.Debugf("could not open stream '%s'", err.Error())
|
||||||
@ -154,7 +126,40 @@ func allocateLocalService(ctx context.Context, node *node.Node, listenAddr, serv
|
|||||||
|
|
||||||
stream.Close()
|
stream.Close()
|
||||||
conn.Close()
|
conn.Close()
|
||||||
// ll.Infof("(service %s) Done handling %s", serviceID, l.Addr().String())
|
}
|
||||||
|
|
||||||
|
func allocateLocalService(ctx context.Context, node *node.Node, listenAddr, service string) error {
|
||||||
|
zlog.Info().Msgf("Allocating service '%s' on: %s", service, listenAddr)
|
||||||
|
// Open local port for listening
|
||||||
|
l, err := net.Listen("tcp", listenAddr)
|
||||||
|
if err != nil {
|
||||||
|
zlog.Error().Err(err).Msg("Error listening")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
l.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
nodeAnnounce(ctx, node)
|
||||||
|
|
||||||
|
defer l.Close()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return errors.New("context canceled")
|
||||||
|
default:
|
||||||
|
zlog.Debug().Msg("New for connection")
|
||||||
|
// Listen for an incoming connection.
|
||||||
|
conn, err := l.Accept()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error accepting: ", err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle connections in a new goroutine, forwarding to the p2p service
|
||||||
|
go func() {
|
||||||
|
proxyP2PConnection(ctx, node, service, conn)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -258,6 +263,7 @@ var muservice sync.Mutex
|
|||||||
func ensureService(ctx context.Context, n *node.Node, nd *NodeData, sserv string, allocate bool) {
|
func ensureService(ctx context.Context, n *node.Node, nd *NodeData, sserv string, allocate bool) {
|
||||||
muservice.Lock()
|
muservice.Lock()
|
||||||
defer muservice.Unlock()
|
defer muservice.Unlock()
|
||||||
|
nd.ServiceID = sserv
|
||||||
if ndService, found := service[nd.Name]; !found {
|
if ndService, found := service[nd.Name]; !found {
|
||||||
if !nd.IsOnline() {
|
if !nd.IsOnline() {
|
||||||
// if node is offline and not present, do nothing
|
// if node is offline and not present, do nothing
|
||||||
|
Loading…
Reference in New Issue
Block a user