Wire up peer announcement in cluster.

This commit is contained in:
Adam Ierymenko 2015-10-20 16:24:21 -07:00
parent 57e29857cf
commit eb79d4a2f3
5 changed files with 87 additions and 30 deletions

View File

@ -263,8 +263,6 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
remotePeerAddress.appendTo(rendezvousForDest); remotePeerAddress.appendTo(rendezvousForDest);
Buffer<2048> rendezvousForOtherEnd; Buffer<2048> rendezvousForOtherEnd;
rendezvousForOtherEnd.addSize(2); // leave room for payload size
rendezvousForOtherEnd.append((uint8_t)STATE_MESSAGE_PROXY_SEND);
remotePeerAddress.appendTo(rendezvousForOtherEnd); remotePeerAddress.appendTo(rendezvousForOtherEnd);
rendezvousForOtherEnd.append((uint8_t)Packet::VERB_RENDEZVOUS); rendezvousForOtherEnd.append((uint8_t)Packet::VERB_RENDEZVOUS);
const unsigned int rendezvousForOtherEndPayloadSizePtr = rendezvousForOtherEnd.size(); const unsigned int rendezvousForOtherEndPayloadSizePtr = rendezvousForOtherEnd.size();
@ -298,9 +296,8 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
} }
if (haveMatch) { if (haveMatch) {
_send(fromMemberId,STATE_MESSAGE_PROXY_SEND,rendezvousForOtherEnd.data(),rendezvousForOtherEnd.size());
RR->sw->send(rendezvousForDest,true,0); RR->sw->send(rendezvousForDest,true,0);
rendezvousForOtherEnd.setAt<uint16_t>(0,(uint16_t)(rendezvousForOtherEnd.size() - 2));
_send(fromMemberId,rendezvousForOtherEnd.data(),rendezvousForOtherEnd.size());
} }
} }
} }
@ -331,14 +328,64 @@ void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
void Cluster::replicateHavePeer(const Identity &peerId) void Cluster::replicateHavePeer(const Identity &peerId)
{ {
{ // Use peer affinity table to track our own last announce time for peers
_PeerAffinity pa(peerId.address(),_id,RR->node->now());
Mutex::Lock _l2(_peerAffinities_m);
std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),pa)); // O(log(n))
if ((i != _peerAffinities.end())&&(i->key == pa.key)) {
if ((pa.timestamp - i->timestamp) >= ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD) {
i->timestamp = pa.timestamp;
// continue to announcement
} else {
// we've already announced this peer recently, so skip
return;
}
} else {
_peerAffinities.push_back(pa);
std::sort(_peerAffinities.begin(),_peerAffinities.end()); // probably a more efficient way to insert but okay for now
// continue to announcement
}
}
// announcement
Buffer<4096> buf;
peerId.serialize(buf,false);
{
Mutex::Lock _l(_memberIds_m);
for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
Mutex::Lock _l2(_members[*mid].lock);
_send(*mid,STATE_MESSAGE_HAVE_PEER,buf.data(),buf.size());
}
}
} }
void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group) void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group)
{ {
Buffer<4096> buf;
buf.append((uint64_t)nwid);
peerAddress.appendTo(buf);
group.mac().appendTo(buf);
buf.append((uint32_t)group.adi());
{
Mutex::Lock _l(_memberIds_m);
for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
Mutex::Lock _l2(_members[*mid].lock);
_send(*mid,STATE_MESSAGE_MULTICAST_LIKE,buf.data(),buf.size());
}
}
} }
void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com) void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com)
{ {
Buffer<4096> buf;
com.serialize(buf);
{
Mutex::Lock _l(_memberIds_m);
for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
Mutex::Lock _l2(_members[*mid].lock);
_send(*mid,STATE_MESSAGE_COM,buf.data(),buf.size());
}
}
} }
void Cluster::doPeriodicTasks() void Cluster::doPeriodicTasks()
@ -371,7 +418,7 @@ void Cluster::doPeriodicTasks()
alive.append((uint8_t)_zeroTierPhysicalEndpoints.size()); alive.append((uint8_t)_zeroTierPhysicalEndpoints.size());
for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe) for(std::vector<InetAddress>::const_iterator pe(_zeroTierPhysicalEndpoints.begin());pe!=_zeroTierPhysicalEndpoints.end();++pe)
pe->serialize(alive); pe->serialize(alive);
_send(*mid,alive.data(),alive.size()); _send(*mid,STATE_MESSAGE_ALIVE,alive.data(),alive.size());
_members[*mid].lastAnnouncedAliveTo = now; _members[*mid].lastAnnouncedAliveTo = now;
} }
@ -498,18 +545,15 @@ bool Cluster::redirectPeer(const SharedPtr<Peer> &peer,const InetAddress &peerPh
} }
} }
void Cluster::_send(uint16_t memberId,const void *msg,unsigned int len) void Cluster::_send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len)
{ {
_Member &m = _members[memberId]; _Member &m = _members[memberId];
// assumes m.lock is locked! // assumes m.lock is locked!
for(;;) { if ((m.q.size() + len + 3) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
if ((m.q.size() + len) > ZT_CLUSTER_MAX_MESSAGE_LENGTH) _flush(memberId);
_flush(memberId); m.q.append((uint16_t)(len + 1));
else { m.q.append((uint8_t)type);
m.q.append(msg,len); m.q.append(msg,len);
break;
}
}
} }
void Cluster::_flush(uint16_t memberId) void Cluster::_flush(uint16_t memberId)

View File

@ -42,12 +42,18 @@
#include "Buffer.hpp" #include "Buffer.hpp"
#include "Mutex.hpp" #include "Mutex.hpp"
#include "SharedPtr.hpp" #include "SharedPtr.hpp"
#include "Hashtable.hpp"
/** /**
* Timeout for cluster members being considered "alive" * Timeout for cluster members being considered "alive"
*/ */
#define ZT_CLUSTER_TIMEOUT 30000 #define ZT_CLUSTER_TIMEOUT 30000
/**
* How often should we announce that we have a peer?
*/
#define ZT_CLUSTER_HAVE_PEER_ANNOUNCE_PERIOD 60000
/** /**
* Desired period between doPeriodicTasks() in milliseconds * Desired period between doPeriodicTasks() in milliseconds
*/ */
@ -238,7 +244,7 @@ public:
bool redirectPeer(const SharedPtr<Peer> &peer,const InetAddress &peerPhysicalAddress,bool offload); bool redirectPeer(const SharedPtr<Peer> &peer,const InetAddress &peerPhysicalAddress,bool offload);
private: private:
void _send(uint16_t memberId,const void *msg,unsigned int len); void _send(uint16_t memberId,StateMessageType type,const void *msg,unsigned int len);
void _flush(uint16_t memberId); void _flush(uint16_t memberId);
// These are initialized in the constructor and remain static // These are initialized in the constructor and remain static
@ -292,7 +298,7 @@ private:
std::vector<uint16_t> _memberIds; std::vector<uint16_t> _memberIds;
Mutex _memberIds_m; Mutex _memberIds_m;
// Record tracking which members have which peers and how recently they claimed this // Record tracking which members have which peers and how recently they claimed this -- also used to track our last claimed time
struct _PeerAffinity struct _PeerAffinity
{ {
_PeerAffinity(const Address &a,uint16_t mid,uint64_t ts) : _PeerAffinity(const Address &a,uint16_t mid,uint64_t ts) :

View File

@ -272,7 +272,6 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str()); TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
return true; return true;
} }
peer = RR->topology->addPeer(newPeer); peer = RR->topology->addPeer(newPeer);
// Continue at // VALID // Continue at // VALID

View File

@ -34,6 +34,7 @@
#include "Network.hpp" #include "Network.hpp"
#include "AntiRecursion.hpp" #include "AntiRecursion.hpp"
#include "SelfAwareness.hpp" #include "SelfAwareness.hpp"
#include "Cluster.hpp"
#include <algorithm> #include <algorithm>
@ -107,7 +108,6 @@ void Peer::received(
// Learn paths if they've been confirmed via a HELLO or an ECHO // Learn paths if they've been confirmed via a HELLO or an ECHO
RemotePath *slot = (RemotePath *)0; RemotePath *slot = (RemotePath *)0;
if (np < ZT_MAX_PEER_NETWORK_PATHS) { if (np < ZT_MAX_PEER_NETWORK_PATHS) {
// Add new path
slot = &(_paths[np++]); slot = &(_paths[np++]);
} else { } else {
uint64_t slotLRmin = 0xffffffffffffffffULL; uint64_t slotLRmin = 0xffffffffffffffffULL;
@ -141,6 +141,11 @@ void Peer::received(
} }
} }
} }
#ifdef ZT_ENABLE_CLUSTER
if ((pathIsConfirmed)&&(RR->cluster))
RR->cluster->replicateHavePeer(_id);
#endif
} }
if ((now - _lastAnnouncedTo) >= ((ZT_MULTICAST_LIKE_EXPIRE / 2) - 1000)) { if ((now - _lastAnnouncedTo) >= ((ZT_MULTICAST_LIKE_EXPIRE / 2) - 1000)) {

View File

@ -122,18 +122,22 @@ Topology::~Topology()
SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer) SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
{ {
if (peer->address() == RR->identity.address()) { if (peer->address() == RR->identity.address()) {
TRACE("BUG: addNewPeer() caught and ignored attempt to add peer for self"); TRACE("BUG: addPeer() caught and ignored attempt to add peer for self");
throw std::logic_error("cannot add peer for self"); throw std::logic_error("cannot add peer for self");
} }
const uint64_t now = RR->node->now(); SharedPtr<Peer> np;
Mutex::Lock _l(_lock); {
Mutex::Lock _l(_lock);
SharedPtr<Peer> &hp = _peers[peer->address()];
if (!hp)
hp = peer;
np = hp;
}
np->use(RR->node->now());
saveIdentity(np->identity());
SharedPtr<Peer> &p = _peers.set(peer->address(),peer); return np;
p->use(now);
saveIdentity(p->identity());
return p;
} }
SharedPtr<Peer> Topology::getPeer(const Address &zta) SharedPtr<Peer> Topology::getPeer(const Address &zta)
@ -143,13 +147,12 @@ SharedPtr<Peer> Topology::getPeer(const Address &zta)
return SharedPtr<Peer>(); return SharedPtr<Peer>();
} }
const uint64_t now = RR->node->now();
Mutex::Lock _l(_lock); Mutex::Lock _l(_lock);
SharedPtr<Peer> &ap = _peers[zta]; SharedPtr<Peer> &ap = _peers[zta];
if (ap) { if (ap) {
ap->use(now); ap->use(RR->node->now());
return ap; return ap;
} }
@ -157,13 +160,13 @@ SharedPtr<Peer> Topology::getPeer(const Address &zta)
if (id) { if (id) {
try { try {
ap = SharedPtr<Peer>(new Peer(RR->identity,id)); ap = SharedPtr<Peer>(new Peer(RR->identity,id));
ap->use(now); ap->use(RR->node->now());
return ap; return ap;
} catch ( ... ) {} // invalid identity? } catch ( ... ) {} // invalid identity?
} }
// If we get here it means we read an invalid cache identity or had some other error
_peers.erase(zta); _peers.erase(zta);
return SharedPtr<Peer>(); return SharedPtr<Peer>();
} }