mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-02-01 00:45:27 +00:00
Root server terminology cleanup, and tighten up a security check by checking full identity of peers instead of just address.
This commit is contained in:
parent
07f84a99b4
commit
7bae95836c
@ -559,8 +559,8 @@ typedef struct
|
||||
*/
|
||||
enum ZT1_PeerRole {
|
||||
ZT1_PEER_ROLE_LEAF = 0, // ordinary node
|
||||
ZT1_PEER_ROLE_HUB = 1, // locally federated hub
|
||||
ZT1_PEER_ROLE_ROOTSERVER = 2 // planetary rootserver
|
||||
ZT1_PEER_ROLE_RELAY = 1, // relay node
|
||||
ZT1_PEER_ROLE_ROOT = 2 // root server
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -254,7 +254,7 @@
|
||||
/**
|
||||
* Delay between scans of the topology active peer DB for peers that need ping
|
||||
*
|
||||
* This is also how often pings will be retried to upstream peers (rootservers)
|
||||
* This is also how often pings will be retried to upstream peers (relays, roots)
|
||||
* constantly until something is heard.
|
||||
*/
|
||||
#define ZT_PING_CHECK_INVERVAL 6250
|
||||
|
@ -110,7 +110,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
|
||||
case Packet::ERROR_OBJ_NOT_FOUND:
|
||||
if (inReVerb == Packet::VERB_WHOIS) {
|
||||
if (RR->topology->isRootserver(peer->address()))
|
||||
if (RR->topology->isRoot(peer->identity()))
|
||||
RR->sw->cancelWhoisRequest(Address(field(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH));
|
||||
} else if (inReVerb == Packet::VERB_NETWORK_CONFIG_REQUEST) {
|
||||
SharedPtr<Network> network(RR->node->network(at<uint64_t>(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD)));
|
||||
@ -128,7 +128,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
break;
|
||||
|
||||
case Packet::ERROR_IDENTITY_COLLISION:
|
||||
if (RR->topology->isRootserver(peer->address()))
|
||||
if (RR->topology->isRoot(peer->identity()))
|
||||
RR->node->postEvent(ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION);
|
||||
break;
|
||||
|
||||
@ -268,7 +268,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
|
||||
peer->setRemoteVersion(protoVersion,vMajor,vMinor,vRevision);
|
||||
|
||||
bool trusted = false;
|
||||
if (RR->topology->isRootserver(id.address())) {
|
||||
if (RR->topology->isRoot(id)) {
|
||||
RR->node->postNewerVersionIfNewer(vMajor,vMinor,vRevision);
|
||||
trusted = true;
|
||||
}
|
||||
@ -353,7 +353,7 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
|
||||
peer->setRemoteVersion(vProto,vMajor,vMinor,vRevision);
|
||||
|
||||
bool trusted = false;
|
||||
if (RR->topology->isRootserver(peer->address())) {
|
||||
if (RR->topology->isRoot(peer->identity())) {
|
||||
RR->node->postNewerVersionIfNewer(vMajor,vMinor,vRevision);
|
||||
trusted = true;
|
||||
}
|
||||
@ -362,10 +362,10 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
|
||||
} break;
|
||||
|
||||
case Packet::VERB_WHOIS: {
|
||||
// Right now only rootservers are allowed to send OK(WHOIS) to prevent
|
||||
// poisoning attacks. Further decentralization will require some other
|
||||
// kind of trust mechanism.
|
||||
if (RR->topology->isRootserver(peer->address())) {
|
||||
/* Right now only root servers are allowed to send OK(WHOIS) to prevent
|
||||
* poisoning attacks. Further decentralization will require some other
|
||||
* kind of trust mechanism. */
|
||||
if (RR->topology->isRoot(peer->identity())) {
|
||||
const Identity id(*this,ZT_PROTO_VERB_WHOIS__OK__IDX_IDENTITY);
|
||||
if (id.locallyValidate())
|
||||
RR->sw->doAnythingWaitingForPeer(RR->topology->addPeer(SharedPtr<Peer>(new Peer(RR->identity,id))));
|
||||
|
@ -216,7 +216,7 @@ void Multicaster::send(
|
||||
|
||||
if ((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY) {
|
||||
gs.lastExplicitGather = now;
|
||||
SharedPtr<Peer> sn(RR->topology->getBestRootserver());
|
||||
SharedPtr<Peer> sn(RR->topology->getBestRoot());
|
||||
if (sn) {
|
||||
TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
|
||||
|
||||
@ -269,51 +269,6 @@ void Multicaster::send(
|
||||
// Free allocated memory buffer if any
|
||||
if (indexes != idxbuf)
|
||||
delete [] indexes;
|
||||
|
||||
#ifdef ZT_SUPPORT_LEGACY_MULTICAST
|
||||
// This sends a P5 multicast up to our rootserver, who then
|
||||
// redistributes it manually down to all <1.0.0 peers for
|
||||
// legacy support. These peers don't support the new multicast
|
||||
// frame type, so even if they receive it they will ignore it.
|
||||
{
|
||||
SharedPtr<Peer> sn(RR->topology->getBestRootserver());
|
||||
if (sn) {
|
||||
uint32_t rn = RR->prng->next32();
|
||||
Packet outp(sn->address(),RR->identity.address(),Packet::VERB_P5_MULTICAST_FRAME);
|
||||
|
||||
outp.append((uint16_t)0xffff); // do not forward
|
||||
outp.append((unsigned char)0,320 + 1024); // empty queue and bloom filter
|
||||
|
||||
outp.append((unsigned char)((com) ? ZT_PROTO_VERB_P5_MULTICAST_FRAME_FLAGS_HAS_MEMBERSHIP_CERTIFICATE : 0));
|
||||
outp.append((uint64_t)nwid);
|
||||
outp.append((uint16_t)0);
|
||||
outp.append((unsigned char)0);
|
||||
outp.append((unsigned char)0);
|
||||
RR->identity.address().appendTo(outp);
|
||||
outp.append((const void *)&rn,3); // random multicast ID
|
||||
if (src)
|
||||
src.appendTo(outp);
|
||||
else MAC(RR->identity.address(),nwid).appendTo(outp);
|
||||
mg.mac().appendTo(outp);
|
||||
outp.append((uint32_t)mg.adi());
|
||||
outp.append((uint16_t)etherType);
|
||||
outp.append((uint16_t)len);
|
||||
outp.append(data,len);
|
||||
unsigned int signedPortionLen = outp.size() - ZT_PROTO_VERB_P5_MULTICAST_FRAME_IDX__START_OF_SIGNED_PORTION;
|
||||
|
||||
C25519::Signature sig(RR->identity.sign(outp.field(ZT_PROTO_VERB_P5_MULTICAST_FRAME_IDX__START_OF_SIGNED_PORTION,signedPortionLen),signedPortionLen));
|
||||
|
||||
outp.append((uint16_t)sig.size());
|
||||
outp.append(sig.data,(unsigned int)sig.size());
|
||||
|
||||
if (com) com->serialize(outp);
|
||||
|
||||
outp.compress();
|
||||
outp.armor(sn->key(),true);
|
||||
sn->send(RR,outp.data(),outp.size(),now);
|
||||
}
|
||||
}
|
||||
#endif // ZT_SUPPORT_LEGACY_MULTICAST
|
||||
}
|
||||
|
||||
void Multicaster::clean(uint64_t now)
|
||||
|
@ -518,13 +518,13 @@ public:
|
||||
RR(renv),
|
||||
_now(renv->node->now()),
|
||||
_network(nw),
|
||||
_rootserverAddresses(renv->topology->rootserverAddresses()),
|
||||
_rootAddresses(renv->topology->rootAddresses()),
|
||||
_allMulticastGroups(nw->_allMulticastGroups())
|
||||
{}
|
||||
|
||||
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
|
||||
{
|
||||
if ( ( (p->hasActiveDirectPath(_now)) && (_network->_isAllowed(p->address())) ) || (std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),p->address()) != _rootserverAddresses.end()) ) {
|
||||
if ( ( (p->hasActiveDirectPath(_now)) && (_network->_isAllowed(p->address())) ) || (std::find(_rootAddresses.begin(),_rootAddresses.end(),p->address()) != _rootAddresses.end()) ) {
|
||||
Packet outp(p->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
|
||||
for(std::vector<MulticastGroup>::iterator mg(_allMulticastGroups.begin());mg!=_allMulticastGroups.end();++mg) {
|
||||
@ -551,7 +551,7 @@ private:
|
||||
const RuntimeEnvironment *RR;
|
||||
uint64_t _now;
|
||||
Network *_network;
|
||||
std::vector<Address> _rootserverAddresses;
|
||||
std::vector<Address> _rootAddresses;
|
||||
std::vector<MulticastGroup> _allMulticastGroups;
|
||||
};
|
||||
|
||||
|
@ -133,9 +133,7 @@ Node::Node(
|
||||
if (!rt.size())
|
||||
rt.fromString(ZT_DEFAULTS.defaultRootTopology);
|
||||
}
|
||||
Dictionary rootservers(rt.get("rootservers",""));
|
||||
rootservers.update(rt.get("supernodes",""));
|
||||
RR->topology->setRootservers(rootservers);
|
||||
RR->topology->setRootServers(Dictionary(rt.get("rootservers","")));
|
||||
|
||||
postEvent(ZT1_EVENT_UP);
|
||||
}
|
||||
@ -191,7 +189,7 @@ public:
|
||||
RR(renv),
|
||||
_now(now),
|
||||
_relays(relays),
|
||||
_rootservers(RR->topology->rootserverAddresses())
|
||||
_rootAddresses(RR->topology->rootAddresses())
|
||||
{
|
||||
}
|
||||
|
||||
@ -207,7 +205,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if ((isRelay)||(std::find(_rootservers.begin(),_rootservers.end(),p->address()) != _rootservers.end())) {
|
||||
if ((isRelay)||(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->address()) != _rootAddresses.end())) {
|
||||
p->doPingAndKeepalive(RR,_now);
|
||||
if (p->lastReceive() > lastReceiveFromUpstream)
|
||||
lastReceiveFromUpstream = p->lastReceive();
|
||||
@ -221,7 +219,7 @@ private:
|
||||
const RuntimeEnvironment *RR;
|
||||
uint64_t _now;
|
||||
const std::vector< std::pair<Address,InetAddress> > &_relays;
|
||||
std::vector<Address> _rootservers;
|
||||
std::vector<Address> _rootAddresses;
|
||||
};
|
||||
|
||||
ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
|
||||
@ -386,7 +384,7 @@ ZT1_PeerList *Node::peers() const
|
||||
p->versionRev = -1;
|
||||
}
|
||||
p->latency = pi->second->latency();
|
||||
p->role = RR->topology->isRootserver(pi->second->address()) ? ZT1_PEER_ROLE_ROOTSERVER : ZT1_PEER_ROLE_LEAF;
|
||||
p->role = RR->topology->isRoot(pi->second->identity()) ? ZT1_PEER_ROLE_ROOT : ZT1_PEER_ROLE_LEAF;
|
||||
|
||||
std::vector<Path> paths(pi->second->paths());
|
||||
Path *bestPath = pi->second->getBestPath(_now);
|
||||
|
@ -126,12 +126,12 @@ void Peer::received(
|
||||
if ((pathIsConfirmed)&&((now - _lastAnnouncedTo) >= ((ZT_MULTICAST_LIKE_EXPIRE / 2) - 1000))) {
|
||||
_lastAnnouncedTo = now;
|
||||
|
||||
const bool isRootserver = RR->topology->isRootserver(_id.address());
|
||||
const bool isRoot = RR->topology->isRoot(_id);
|
||||
|
||||
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
const std::vector< SharedPtr<Network> > networks(RR->node->allNetworks());
|
||||
for(std::vector< SharedPtr<Network> >::const_iterator n(networks.begin());n!=networks.end();++n) {
|
||||
if ( (isRootserver) || ((*n)->isAllowed(_id.address())) ) {
|
||||
if ( (isRoot) || ((*n)->isAllowed(_id.address())) ) {
|
||||
const std::vector<MulticastGroup> mgs((*n)->allMulticastGroups());
|
||||
for(std::vector<MulticastGroup>::const_iterator mg(mgs.begin());mg!=mgs.end();++mg) {
|
||||
if ((outp.size() + 18) > ZT_UDP_DEFAULT_PAYLOAD_MTU) {
|
||||
|
@ -118,7 +118,7 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
|
||||
|
||||
// For all peers for whom we forgot an address, send a packet indirectly if
|
||||
// they are still considered alive so that we will re-establish direct links.
|
||||
SharedPtr<Peer> sn(RR->topology->getBestRootserver());
|
||||
SharedPtr<Peer> sn(RR->topology->getBestRoot());
|
||||
if (sn) {
|
||||
Path *snp = sn->getBestPath(now);
|
||||
if (snp) {
|
||||
|
@ -320,8 +320,8 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force)
|
||||
* P2 in randomized order in terms of which gets sent first. This is done
|
||||
* since in a few cases NAT-t can be sensitive to slight timing differences
|
||||
* in terms of when the two peers initiate. Normally this is accounted for
|
||||
* by the nearly-simultaneous RENDEZVOUS kickoff from the rootserver, but
|
||||
* given that rootservers are hosted on cloud providers this can in some
|
||||
* by the nearly-simultaneous RENDEZVOUS kickoff from the relay, but
|
||||
* given that relay are hosted on cloud providers this can in some
|
||||
* cases have a few ms of latency between packet departures. By randomizing
|
||||
* the order we make each attempted NAT-t favor one or the other going
|
||||
* first, meaning if it doesn't succeed the first time it might the second
|
||||
@ -566,7 +566,7 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void
|
||||
SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
|
||||
if ((!relayTo)||(!relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now()))) {
|
||||
// Don't know peer or no direct path -- so relay via root server
|
||||
relayTo = RR->topology->getBestRootserver();
|
||||
relayTo = RR->topology->getBestRoot();
|
||||
if (relayTo)
|
||||
relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now());
|
||||
}
|
||||
@ -642,7 +642,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat
|
||||
unite(source,destination,false);
|
||||
} else {
|
||||
// Don't know peer or no direct path -- so relay via root server
|
||||
relayTo = RR->topology->getBestRootserver(&source,1,true);
|
||||
relayTo = RR->topology->getBestRoot(&source,1,true);
|
||||
if (relayTo)
|
||||
relayTo->send(RR,packet->data(),packet->size(),RR->node->now());
|
||||
}
|
||||
@ -712,13 +712,13 @@ void Switch::_handleBeacon(const InetAddress &fromAddr,const Buffer<ZT_PROTO_BEA
|
||||
|
||||
Address Switch::_sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted)
|
||||
{
|
||||
SharedPtr<Peer> rootserver(RR->topology->getBestRootserver(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
|
||||
if (rootserver) {
|
||||
Packet outp(rootserver->address(),RR->identity.address(),Packet::VERB_WHOIS);
|
||||
SharedPtr<Peer> root(RR->topology->getBestRoot(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
|
||||
if (root) {
|
||||
Packet outp(root->address(),RR->identity.address(),Packet::VERB_WHOIS);
|
||||
addr.appendTo(outp);
|
||||
outp.armor(rootserver->key(),true);
|
||||
if (rootserver->send(RR,outp.data(),outp.size(),RR->node->now()))
|
||||
return rootserver->address();
|
||||
outp.armor(root->key(),true);
|
||||
if (root->send(RR,outp.data(),outp.size(),RR->node->now()))
|
||||
return root->address();
|
||||
}
|
||||
return Address();
|
||||
}
|
||||
@ -752,7 +752,7 @@ bool Switch::_trySend(const Packet &packet,bool encrypt,uint64_t nwid)
|
||||
}
|
||||
|
||||
if (!relay)
|
||||
relay = RR->topology->getBestRootserver();
|
||||
relay = RR->topology->getBestRoot();
|
||||
|
||||
if (!(relay)||(!(viaPath = relay->getBestPath(now))))
|
||||
return false;
|
||||
|
@ -36,7 +36,7 @@ namespace ZeroTier {
|
||||
|
||||
Topology::Topology(const RuntimeEnvironment *renv) :
|
||||
RR(renv),
|
||||
_amRootserver(false)
|
||||
_amRoot(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -44,16 +44,16 @@ Topology::~Topology()
|
||||
{
|
||||
}
|
||||
|
||||
void Topology::setRootservers(const std::map< Identity,std::vector<InetAddress> > &sn)
|
||||
void Topology::setRootServers(const std::map< Identity,std::vector<InetAddress> > &sn)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
|
||||
if (_rootservers == sn)
|
||||
if (_roots == sn)
|
||||
return; // no change
|
||||
|
||||
_rootservers = sn;
|
||||
_rootserverAddresses.clear();
|
||||
_rootserverPeers.clear();
|
||||
_roots = sn;
|
||||
_rootAddresses.clear();
|
||||
_rootPeers.clear();
|
||||
const uint64_t now = RR->node->now();
|
||||
|
||||
for(std::map< Identity,std::vector<InetAddress> >::const_iterator i(sn.begin());i!=sn.end();++i) {
|
||||
@ -64,17 +64,17 @@ void Topology::setRootservers(const std::map< Identity,std::vector<InetAddress>
|
||||
for(std::vector<InetAddress>::const_iterator j(i->second.begin());j!=i->second.end();++j)
|
||||
p->addPath(Path(*j,true));
|
||||
p->use(now);
|
||||
_rootserverPeers.push_back(p);
|
||||
_rootPeers.push_back(p);
|
||||
}
|
||||
_rootserverAddresses.push_back(i->first.address());
|
||||
_rootAddresses.push_back(i->first.address());
|
||||
}
|
||||
|
||||
std::sort(_rootserverAddresses.begin(),_rootserverAddresses.end());
|
||||
std::sort(_rootAddresses.begin(),_rootAddresses.end());
|
||||
|
||||
_amRootserver = (_rootservers.find(RR->identity) != _rootservers.end());
|
||||
_amRoot = (_roots.find(RR->identity) != _roots.end());
|
||||
}
|
||||
|
||||
void Topology::setRootservers(const Dictionary &sn)
|
||||
void Topology::setRootServers(const Dictionary &sn)
|
||||
{
|
||||
std::map< Identity,std::vector<InetAddress> > m;
|
||||
for(Dictionary::const_iterator d(sn.begin());d!=sn.end();++d) {
|
||||
@ -90,7 +90,7 @@ void Topology::setRootservers(const Dictionary &sn)
|
||||
}
|
||||
}
|
||||
}
|
||||
this->setRootservers(m);
|
||||
this->setRootServers(m);
|
||||
}
|
||||
|
||||
SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
|
||||
@ -141,28 +141,28 @@ SharedPtr<Peer> Topology::getPeer(const Address &zta)
|
||||
return SharedPtr<Peer>();
|
||||
}
|
||||
|
||||
SharedPtr<Peer> Topology::getBestRootserver(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
|
||||
SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
|
||||
{
|
||||
SharedPtr<Peer> bestRootserver;
|
||||
SharedPtr<Peer> bestRoot;
|
||||
const uint64_t now = RR->node->now();
|
||||
Mutex::Lock _l(_lock);
|
||||
|
||||
if (_amRootserver) {
|
||||
if (_amRoot) {
|
||||
/* If I am a root server, the "best" root server is the one whose address
|
||||
* is numerically greater than mine (with wrap at top of list). This
|
||||
* causes packets searching for a route to pretty much literally
|
||||
* circumnavigate the globe rather than bouncing between just two. */
|
||||
|
||||
if (_rootserverAddresses.size() > 1) { // gotta be one other than me for this to work
|
||||
std::vector<Address>::const_iterator sna(std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),RR->identity.address()));
|
||||
if (sna != _rootserverAddresses.end()) { // sanity check -- _amRootserver should've been false in this case
|
||||
if (_rootAddresses.size() > 1) { // gotta be one other than me for this to work
|
||||
std::vector<Address>::const_iterator sna(std::find(_rootAddresses.begin(),_rootAddresses.end(),RR->identity.address()));
|
||||
if (sna != _rootAddresses.end()) { // sanity check -- _amRoot should've been false in this case
|
||||
for(;;) {
|
||||
if (++sna == _rootserverAddresses.end())
|
||||
sna = _rootserverAddresses.begin(); // wrap around at end
|
||||
if (++sna == _rootAddresses.end())
|
||||
sna = _rootAddresses.begin(); // wrap around at end
|
||||
if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order
|
||||
std::map< Address,SharedPtr<Peer> >::const_iterator p(_activePeers.find(*sna));
|
||||
if ((p != _activePeers.end())&&(p->second->hasActiveDirectPath(now))) {
|
||||
bestRootserver = p->second;
|
||||
bestRoot = p->second;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -173,77 +173,84 @@ SharedPtr<Peer> Topology::getBestRootserver(const Address *avoid,unsigned int av
|
||||
/* If I am not a root server, the best root server is the active one with
|
||||
* the lowest latency. */
|
||||
|
||||
unsigned int l,bestRootserverLatency = 65536;
|
||||
unsigned int l,bestLatency = 65536;
|
||||
uint64_t lds,ldr;
|
||||
|
||||
// First look for a best rootserver by comparing latencies, but exclude
|
||||
// First look for a best root by comparing latencies, but exclude
|
||||
// root servers that have not responded to direct messages in order to
|
||||
// try to exclude any that are dead or unreachable.
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator sn(_rootserverPeers.begin());sn!=_rootserverPeers.end();) {
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator sn(_rootPeers.begin());sn!=_rootPeers.end();) {
|
||||
// Skip explicitly avoided relays
|
||||
for(unsigned int i=0;i<avoidCount;++i) {
|
||||
if (avoid[i] == (*sn)->address())
|
||||
goto keep_searching_for_rootservers;
|
||||
goto keep_searching_for_roots;
|
||||
}
|
||||
|
||||
// Skip possibly comatose or unreachable relays
|
||||
lds = (*sn)->lastDirectSend();
|
||||
ldr = (*sn)->lastDirectReceive();
|
||||
if ((lds)&&(lds > ldr)&&((lds - ldr) > ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD))
|
||||
goto keep_searching_for_rootservers;
|
||||
goto keep_searching_for_roots;
|
||||
|
||||
if ((*sn)->hasActiveDirectPath(now)) {
|
||||
l = (*sn)->latency();
|
||||
if (bestRootserver) {
|
||||
if ((l)&&(l < bestRootserverLatency)) {
|
||||
bestRootserverLatency = l;
|
||||
bestRootserver = *sn;
|
||||
if (bestRoot) {
|
||||
if ((l)&&(l < bestLatency)) {
|
||||
bestLatency = l;
|
||||
bestRoot = *sn;
|
||||
}
|
||||
} else {
|
||||
if (l)
|
||||
bestRootserverLatency = l;
|
||||
bestRootserver = *sn;
|
||||
bestLatency = l;
|
||||
bestRoot = *sn;
|
||||
}
|
||||
}
|
||||
|
||||
keep_searching_for_rootservers:
|
||||
keep_searching_for_roots:
|
||||
++sn;
|
||||
}
|
||||
|
||||
if (bestRootserver) {
|
||||
bestRootserver->use(now);
|
||||
return bestRootserver;
|
||||
if (bestRoot) {
|
||||
bestRoot->use(now);
|
||||
return bestRoot;
|
||||
} else if (strictAvoid)
|
||||
return SharedPtr<Peer>();
|
||||
|
||||
// If we have nothing from above, just pick one without avoidance criteria.
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator sn=_rootserverPeers.begin();sn!=_rootserverPeers.end();++sn) {
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator sn=_rootPeers.begin();sn!=_rootPeers.end();++sn) {
|
||||
if ((*sn)->hasActiveDirectPath(now)) {
|
||||
unsigned int l = (*sn)->latency();
|
||||
if (bestRootserver) {
|
||||
if ((l)&&(l < bestRootserverLatency)) {
|
||||
bestRootserverLatency = l;
|
||||
bestRootserver = *sn;
|
||||
if (bestRoot) {
|
||||
if ((l)&&(l < bestLatency)) {
|
||||
bestLatency = l;
|
||||
bestRoot = *sn;
|
||||
}
|
||||
} else {
|
||||
if (l)
|
||||
bestRootserverLatency = l;
|
||||
bestRootserver = *sn;
|
||||
bestLatency = l;
|
||||
bestRoot = *sn;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bestRootserver)
|
||||
bestRootserver->use(now);
|
||||
return bestRootserver;
|
||||
if (bestRoot)
|
||||
bestRoot->use(now);
|
||||
return bestRoot;
|
||||
}
|
||||
|
||||
bool Topology::isRoot(const Identity &id) const
|
||||
throw()
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return (_roots.count(id) != 0);
|
||||
}
|
||||
|
||||
void Topology::clean(uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
for(std::map< Address,SharedPtr<Peer> >::iterator p(_activePeers.begin());p!=_activePeers.end();) {
|
||||
if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),p->first) == _rootserverAddresses.end())) {
|
||||
if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->first) == _rootAddresses.end())) {
|
||||
_activePeers.erase(p++);
|
||||
} else ++p;
|
||||
}
|
||||
|
@ -59,11 +59,9 @@ public:
|
||||
~Topology();
|
||||
|
||||
/**
|
||||
* Set up rootservers for this network
|
||||
*
|
||||
* @param sn Rootservers for this network
|
||||
* @param sn Root server identities and addresses
|
||||
*/
|
||||
void setRootservers(const std::map< Identity,std::vector<InetAddress> > &sn);
|
||||
void setRootServers(const std::map< Identity,std::vector<InetAddress> > &sn);
|
||||
|
||||
/**
|
||||
* Set up root servers for this network
|
||||
@ -71,9 +69,9 @@ public:
|
||||
* This performs no signature verification of any kind. The caller must
|
||||
* check the signature of the root topology dictionary first.
|
||||
*
|
||||
* @param sn Rootservers dictionary from root-topology
|
||||
* @param sn 'rootservers' key from root-topology Dictionary (deserialized as Dictionary)
|
||||
*/
|
||||
void setRootservers(const Dictionary &sn);
|
||||
void setRootServers(const Dictionary &sn);
|
||||
|
||||
/**
|
||||
* Add a peer to database
|
||||
@ -97,19 +95,10 @@ public:
|
||||
/**
|
||||
* @return Vector of peers that are root servers
|
||||
*/
|
||||
inline std::vector< SharedPtr<Peer> > rootserverPeers() const
|
||||
inline std::vector< SharedPtr<Peer> > rootPeers() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return _rootserverPeers;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Number of rootservers
|
||||
*/
|
||||
inline unsigned int numRootservers() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return (unsigned int)_rootserverPeers.size();
|
||||
return _rootPeers;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -117,9 +106,9 @@ public:
|
||||
*
|
||||
* @return Root server with lowest latency or NULL if none
|
||||
*/
|
||||
inline SharedPtr<Peer> getBestRootserver()
|
||||
inline SharedPtr<Peer> getBestRoot()
|
||||
{
|
||||
return getBestRootserver((const Address *)0,0,false);
|
||||
return getBestRoot((const Address *)0,0,false);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -132,28 +121,24 @@ public:
|
||||
* @param avoid Nodes to avoid
|
||||
* @param avoidCount Number of nodes to avoid
|
||||
* @param strictAvoid If false, consider avoided root servers anyway if no non-avoid root servers are available
|
||||
* @return Rootserver or NULL if none
|
||||
* @return Root server or NULL if none available
|
||||
*/
|
||||
SharedPtr<Peer> getBestRootserver(const Address *avoid,unsigned int avoidCount,bool strictAvoid);
|
||||
SharedPtr<Peer> getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid);
|
||||
|
||||
/**
|
||||
* @param zta ZeroTier address
|
||||
* @param id Identity to check
|
||||
* @return True if this is a designated root server
|
||||
*/
|
||||
inline bool isRootserver(const Address &zta) const
|
||||
throw()
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return (std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),zta) != _rootserverAddresses.end());
|
||||
}
|
||||
bool isRoot(const Identity &id) const
|
||||
throw();
|
||||
|
||||
/**
|
||||
* @return Vector of root server addresses
|
||||
*/
|
||||
inline std::vector<Address> rootserverAddresses() const
|
||||
inline std::vector<Address> rootAddresses() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return _rootserverAddresses;
|
||||
return _rootAddresses;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -206,13 +191,13 @@ private:
|
||||
const RuntimeEnvironment *RR;
|
||||
|
||||
std::map< Address,SharedPtr<Peer> > _activePeers;
|
||||
std::map< Identity,std::vector<InetAddress> > _rootservers;
|
||||
std::vector< Address > _rootserverAddresses;
|
||||
std::vector< SharedPtr<Peer> > _rootserverPeers;
|
||||
std::map< Identity,std::vector<InetAddress> > _roots;
|
||||
std::vector< Address > _rootAddresses;
|
||||
std::vector< SharedPtr<Peer> > _rootPeers;
|
||||
|
||||
Mutex _lock;
|
||||
|
||||
bool _amRootserver;
|
||||
bool _amRoot;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
@ -212,8 +212,8 @@ static void _jsonAppend(unsigned int depth,std::string &buf,const ZT1_Peer *peer
|
||||
const char *prole = "";
|
||||
switch(peer->role) {
|
||||
case ZT1_PEER_ROLE_LEAF: prole = "LEAF"; break;
|
||||
case ZT1_PEER_ROLE_HUB: prole = "HUB"; break;
|
||||
case ZT1_PEER_ROLE_ROOTSERVER: prole = "ROOT"; break;
|
||||
case ZT1_PEER_ROLE_RELAY: prole = "RELAY"; break;
|
||||
case ZT1_PEER_ROLE_ROOT: prole = "ROOT"; break;
|
||||
}
|
||||
|
||||
Utils::snprintf(json,sizeof(json),
|
||||
|
Loading…
x
Reference in New Issue
Block a user