mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-01-18 18:56:24 +00:00
Merge branch 'adamierymenko-dev' into android-jni
This commit is contained in:
commit
dfdd46db80
@ -164,7 +164,7 @@ Users behind certain types of firewalls and "symmetric" NAT devices may not able
|
||||
|
||||
If you're interested, there's a [technical deep dive about NAT traversal on our blog](https://www.zerotier.com/blog/?p=226). A troubleshooting tool to help you diagnose NAT issues is planned for the future as are uPnP/IGD/NAT-PMP and IPv6 transport.
|
||||
|
||||
If a firewall between you and the Internet blocks ZeroTier's UDP traffic, you will fall back to last-resort TCP tunneling to supernodes over port 443 (https impersonation). This will work almost anywhere but is *very slow* compared to UDP or direct peer to peer connectivity.
|
||||
If a firewall between you and the Internet blocks ZeroTier's UDP traffic, you will fall back to last-resort TCP tunneling to rootservers over port 443 (https impersonation). This will work almost anywhere but is *very slow* compared to UDP or direct peer to peer connectivity.
|
||||
|
||||
### License
|
||||
|
||||
|
@ -164,8 +164,8 @@ SqliteNetworkController::SqliteNetworkController(const char *dbPath) :
|
||||
||(sqlite3_prepare_v2(_db,"DELETE FROM IpAssignment WHERE networkId = ? AND nodeId = ?",-1,&_sDeleteIpAllocations,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"SELECT nodeId,phyAddress FROM Relay WHERE networkId = ? ORDER BY nodeId ASC",-1,&_sGetRelays,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"SELECT id FROM Network ORDER BY id ASC",-1,&_sListNetworks,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"SELECT n.id FROM Member AS m,Node AS n WHERE m.networkId = ? AND n.id = m.nodeId ORDER BY n.id ASC",-1,&_sListNetworkMembers,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"SELECT m.authorized,m.activeBridge,n.identity,n.lastAt,n.lastSeen,n.firstSeen FROM Member AS m,Node AS n WHERE m.networkId = ? AND m.nodeId = ?",-1,&_sGetMember2,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"SELECT m.nodeId FROM Member AS m WHERE m.networkId = ? ORDER BY m.nodeId ASC",-1,&_sListNetworkMembers,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"SELECT m.authorized,m.activeBridge,n.identity,n.lastAt,n.lastSeen,n.firstSeen FROM Member AS m JOIN Node AS n ON n.id = m.nodeId WHERE m.networkId = ? AND m.nodeId = ?",-1,&_sGetMember2,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"SELECT ipNetwork,ipNetmaskBits,ipVersion FROM IpAssignmentPool WHERE networkId = ? ORDER BY ipNetwork ASC",-1,&_sGetIpAssignmentPools2,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"SELECT ruleNo,nodeId,vlanId,vlanPcp,etherType,macSource,macDest,ipSource,ipDest,ipTos,ipProtocol,ipSourcePort,ipDestPort,\"flags\",invFlags,\"action\" FROM Rule WHERE networkId = ? ORDER BY ruleNo ASC",-1,&_sListRules,(const char **)0) != SQLITE_OK)
|
||||
||(sqlite3_prepare_v2(_db,"INSERT INTO Rule (networkId,ruleNo,nodeId,vlanId,vlanPcP,etherType,macSource,macDest,ipSource,ipDest,ipTos,ipProtocol,ipSourcePort,ipDestPort,\"action\") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",-1,&_sCreateRule,(const char **)0) != SQLITE_OK)
|
||||
@ -747,6 +747,7 @@ unsigned int SqliteNetworkController::handleControlPlaneHttpPOST(
|
||||
} // else 404
|
||||
|
||||
} else {
|
||||
std::vector<std::string> path_copy(path);
|
||||
|
||||
if (!networkExists) {
|
||||
if (path[1].substr(10) == "______") {
|
||||
@ -786,6 +787,7 @@ unsigned int SqliteNetworkController::handleControlPlaneHttpPOST(
|
||||
sqlite3_bind_int64(_sCreateNetwork,3,(long long)OSUtils::now());
|
||||
if (sqlite3_step(_sCreateNetwork) != SQLITE_DONE)
|
||||
return 500;
|
||||
path_copy[1].assign(nwids);
|
||||
}
|
||||
|
||||
json_value *j = json_parse(body.c_str(),body.length());
|
||||
@ -1041,7 +1043,7 @@ unsigned int SqliteNetworkController::handleControlPlaneHttpPOST(
|
||||
sqlite3_bind_text(_sSetNetworkRevision,2,nwids,16,SQLITE_STATIC);
|
||||
sqlite3_step(_sSetNetworkRevision);
|
||||
|
||||
return _doCPGet(path,urlArgs,headers,body,responseBody,responseContentType);
|
||||
return _doCPGet(path_copy,urlArgs,headers,body,responseBody,responseContentType);
|
||||
}
|
||||
|
||||
} // else 404
|
||||
@ -1169,7 +1171,7 @@ unsigned int SqliteNetworkController::_doCPGet(
|
||||
sqlite3_bind_text(_sGetIpAssignmentsForNode2,1,nwids,16,SQLITE_STATIC);
|
||||
sqlite3_bind_text(_sGetIpAssignmentsForNode2,2,addrs,10,SQLITE_STATIC);
|
||||
bool firstIp = true;
|
||||
while (sqlite3_step(_sGetIpAssignmentPools2) == SQLITE_ROW) {
|
||||
while (sqlite3_step(_sGetIpAssignmentsForNode2) == SQLITE_ROW) {
|
||||
InetAddress ip((const void *)sqlite3_column_blob(_sGetIpAssignmentsForNode2,0),(sqlite3_column_int(_sGetIpAssignmentsForNode2,2) == 6) ? 16 : 4,(unsigned int)sqlite3_column_int(_sGetIpAssignmentPools2,1));
|
||||
responseBody.append(firstIp ? "\"" : ",\"");
|
||||
firstIp = false;
|
||||
|
@ -251,7 +251,7 @@ enum ZT1_Event
|
||||
/**
|
||||
* A more recent version was observed on the network
|
||||
*
|
||||
* Right now this is only triggered if a hub or supernode reports a
|
||||
* Right now this is only triggered if a hub or rootserver reports a
|
||||
* more recent version, and only once. It can be used to trigger a
|
||||
* software update check.
|
||||
*
|
||||
@ -560,7 +560,7 @@ typedef struct
|
||||
enum ZT1_PeerRole {
|
||||
ZT1_PEER_ROLE_LEAF = 0, // ordinary node
|
||||
ZT1_PEER_ROLE_HUB = 1, // locally federated hub
|
||||
ZT1_PEER_ROLE_SUPERNODE = 2 // planetary supernode
|
||||
ZT1_PEER_ROLE_ROOTSERVER = 2 // planetary rootserver
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -174,8 +174,8 @@ jobject createPeerRole(JNIEnv *env, ZT1_PeerRole role)
|
||||
case ZT1_PEER_ROLE_HUB:
|
||||
fieldName = "PEER_ROLE_HUB";
|
||||
break;
|
||||
case ZT1_PEER_ROLE_SUPERNODE:
|
||||
fieldName = "PEER_ROLE_SUPERNODE";
|
||||
case ZT1_PEER_ROLE_ROOTSERVER:
|
||||
fieldName = "PEER_ROLE_ROOTSERVER";
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ public enum Event {
|
||||
/**
|
||||
* A more recent version was observed on the network
|
||||
*
|
||||
* <p>Right now this is only triggered if a hub or supernode reports a
|
||||
* <p>Right now this is only triggered if a hub or rootserver reports a
|
||||
* more recent version, and only once. It can be used to trigger a
|
||||
* software update check.</p>
|
||||
*
|
||||
|
@ -39,7 +39,7 @@ public enum PeerRole {
|
||||
PEER_ROLE_HUB,
|
||||
|
||||
/**
|
||||
* planetary supernode
|
||||
* planetary rootserver
|
||||
*/
|
||||
PEER_ROLE_SUPERNODE
|
||||
PEER_ROLE_ROOTSERVER
|
||||
}
|
@ -254,7 +254,7 @@
|
||||
/**
|
||||
* Delay between scans of the topology active peer DB for peers that need ping
|
||||
*
|
||||
* This is also how often pings will be retried to upstream peers (supernodes)
|
||||
* This is also how often pings will be retried to upstream peers (rootservers)
|
||||
* constantly until something is heard.
|
||||
*/
|
||||
#define ZT_PING_CHECK_INVERVAL 6250
|
||||
@ -279,9 +279,9 @@
|
||||
*
|
||||
* When we send something (including frames), we generally expect a response.
|
||||
* Switching relays if no response in a short period of time causes more
|
||||
* rapid failover if a supernode goes down or becomes unreachable. In the
|
||||
* rapid failover if a rootserver goes down or becomes unreachable. In the
|
||||
* mistaken case, little harm is done as it'll pick the next-fastest
|
||||
* supernode and will switch back eventually.
|
||||
* rootserver and will switch back eventually.
|
||||
*/
|
||||
#define ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD 10000
|
||||
|
||||
|
@ -32,9 +32,8 @@
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
void Dictionary::fromString(const char *s,unsigned int maxlen)
|
||||
void Dictionary::updateFromString(const char *s,unsigned int maxlen)
|
||||
{
|
||||
clear();
|
||||
bool escapeState = false;
|
||||
std::string keyBuf;
|
||||
std::string *element = &keyBuf;
|
||||
@ -75,6 +74,12 @@ void Dictionary::fromString(const char *s,unsigned int maxlen)
|
||||
(*this)[keyBuf];
|
||||
}
|
||||
|
||||
void Dictionary::fromString(const char *s,unsigned int maxlen)
|
||||
{
|
||||
clear();
|
||||
updateFromString(s,maxlen);
|
||||
}
|
||||
|
||||
bool Dictionary::sign(const Identity &id,uint64_t now)
|
||||
{
|
||||
try {
|
||||
|
@ -259,6 +259,9 @@ public:
|
||||
*/
|
||||
void fromString(const char *s,unsigned int maxlen);
|
||||
inline void fromString(const std::string &s) { fromString(s.c_str(),(unsigned int)s.length()); }
|
||||
void updateFromString(const char *s,unsigned int maxlen);
|
||||
inline void update(const char *s,unsigned int maxlen) { updateFromString(s, maxlen); }
|
||||
inline void update(const std::string &s) { updateFromString(s.c_str(),(unsigned int)s.length()); }
|
||||
|
||||
/**
|
||||
* @return True if this dictionary is cryptographically signed
|
||||
|
@ -110,7 +110,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
|
||||
case Packet::ERROR_OBJ_NOT_FOUND:
|
||||
if (inReVerb == Packet::VERB_WHOIS) {
|
||||
if (RR->topology->isSupernode(peer->address()))
|
||||
if (RR->topology->isRootserver(peer->address()))
|
||||
RR->sw->cancelWhoisRequest(Address(field(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH));
|
||||
} else if (inReVerb == Packet::VERB_NETWORK_CONFIG_REQUEST) {
|
||||
SharedPtr<Network> network(RR->node->network(at<uint64_t>(ZT_PROTO_VERB_ERROR_IDX_PAYLOAD)));
|
||||
@ -128,7 +128,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
break;
|
||||
|
||||
case Packet::ERROR_IDENTITY_COLLISION:
|
||||
if (RR->topology->isSupernode(peer->address()))
|
||||
if (RR->topology->isRootserver(peer->address()))
|
||||
RR->node->postEvent(ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION);
|
||||
break;
|
||||
|
||||
@ -268,7 +268,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
|
||||
peer->setRemoteVersion(protoVersion,vMajor,vMinor,vRevision);
|
||||
|
||||
bool trusted = false;
|
||||
if (RR->topology->isSupernode(id.address())) {
|
||||
if (RR->topology->isRootserver(id.address())) {
|
||||
RR->node->postNewerVersionIfNewer(vMajor,vMinor,vRevision);
|
||||
trusted = true;
|
||||
}
|
||||
@ -353,7 +353,7 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
|
||||
peer->setRemoteVersion(vProto,vMajor,vMinor,vRevision);
|
||||
|
||||
bool trusted = false;
|
||||
if (RR->topology->isSupernode(peer->address())) {
|
||||
if (RR->topology->isRootserver(peer->address())) {
|
||||
RR->node->postNewerVersionIfNewer(vMajor,vMinor,vRevision);
|
||||
trusted = true;
|
||||
}
|
||||
@ -362,10 +362,10 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
|
||||
} break;
|
||||
|
||||
case Packet::VERB_WHOIS: {
|
||||
// Right now only supernodes are allowed to send OK(WHOIS) to prevent
|
||||
// Right now only rootservers are allowed to send OK(WHOIS) to prevent
|
||||
// poisoning attacks. Further decentralization will require some other
|
||||
// kind of trust mechanism.
|
||||
if (RR->topology->isSupernode(peer->address())) {
|
||||
if (RR->topology->isRootserver(peer->address())) {
|
||||
const Identity id(*this,ZT_PROTO_VERB_WHOIS__OK__IDX_IDENTITY);
|
||||
if (id.locallyValidate())
|
||||
RR->sw->doAnythingWaitingForPeer(RR->topology->addPeer(SharedPtr<Peer>(new Peer(RR->identity,id))));
|
||||
@ -689,6 +689,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
|
||||
outp.append((uint16_t)netconfStr.length());
|
||||
outp.append(netconfStr.data(),(unsigned int)netconfStr.length());
|
||||
outp.compress();
|
||||
outp.armor(peer->key(),true);
|
||||
if (outp.size() > ZT_PROTO_MAX_PACKET_LENGTH) {
|
||||
TRACE("NETWORK_CONFIG_REQUEST failed: internal error: netconf size %u is too large",(unsigned int)netconfStr.length());
|
||||
} else {
|
||||
|
@ -216,7 +216,7 @@ void Multicaster::send(
|
||||
|
||||
if ((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY) {
|
||||
gs.lastExplicitGather = now;
|
||||
SharedPtr<Peer> sn(RR->topology->getBestSupernode());
|
||||
SharedPtr<Peer> sn(RR->topology->getBestRootserver());
|
||||
if (sn) {
|
||||
TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
|
||||
|
||||
@ -271,12 +271,12 @@ void Multicaster::send(
|
||||
delete [] indexes;
|
||||
|
||||
#ifdef ZT_SUPPORT_LEGACY_MULTICAST
|
||||
// This sends a P5 multicast up to our supernode, who then
|
||||
// This sends a P5 multicast up to our rootserver, who then
|
||||
// redistributes it manually down to all <1.0.0 peers for
|
||||
// legacy support. These peers don't support the new multicast
|
||||
// frame type, so even if they receive it they will ignore it.
|
||||
{
|
||||
SharedPtr<Peer> sn(RR->topology->getBestSupernode());
|
||||
SharedPtr<Peer> sn(RR->topology->getBestRootserver());
|
||||
if (sn) {
|
||||
uint32_t rn = RR->prng->next32();
|
||||
Packet outp(sn->address(),RR->identity.address(),Packet::VERB_P5_MULTICAST_FRAME);
|
||||
|
@ -518,13 +518,13 @@ public:
|
||||
RR(renv),
|
||||
_now(renv->node->now()),
|
||||
_network(nw),
|
||||
_supernodeAddresses(renv->topology->supernodeAddresses()),
|
||||
_rootserverAddresses(renv->topology->rootserverAddresses()),
|
||||
_allMulticastGroups(nw->_allMulticastGroups())
|
||||
{}
|
||||
|
||||
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
|
||||
{
|
||||
if ( ( (p->hasActiveDirectPath(_now)) && (_network->_isAllowed(p->address())) ) || (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->address()) != _supernodeAddresses.end()) ) {
|
||||
if ( ( (p->hasActiveDirectPath(_now)) && (_network->_isAllowed(p->address())) ) || (std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),p->address()) != _rootserverAddresses.end()) ) {
|
||||
Packet outp(p->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
|
||||
for(std::vector<MulticastGroup>::iterator mg(_allMulticastGroups.begin());mg!=_allMulticastGroups.end();++mg) {
|
||||
@ -551,7 +551,7 @@ private:
|
||||
const RuntimeEnvironment *RR;
|
||||
uint64_t _now;
|
||||
Network *_network;
|
||||
std::vector<Address> _supernodeAddresses;
|
||||
std::vector<Address> _rootserverAddresses;
|
||||
std::vector<MulticastGroup> _allMulticastGroups;
|
||||
};
|
||||
|
||||
|
@ -133,7 +133,9 @@ Node::Node(
|
||||
if (!rt.size())
|
||||
rt.fromString(ZT_DEFAULTS.defaultRootTopology);
|
||||
}
|
||||
RR->topology->setSupernodes(Dictionary(rt.get("supernodes","")));
|
||||
Dictionary rootservers(rt.get("rootservers",""));
|
||||
rootservers.update(rt.get("supernodes",""));
|
||||
RR->topology->setRootservers(rootservers);
|
||||
|
||||
postEvent(ZT1_EVENT_UP);
|
||||
}
|
||||
@ -189,7 +191,7 @@ public:
|
||||
RR(renv),
|
||||
_now(now),
|
||||
_relays(relays),
|
||||
_supernodes(RR->topology->supernodeAddresses())
|
||||
_rootservers(RR->topology->rootserverAddresses())
|
||||
{
|
||||
}
|
||||
|
||||
@ -205,7 +207,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if ((isRelay)||(std::find(_supernodes.begin(),_supernodes.end(),p->address()) != _supernodes.end())) {
|
||||
if ((isRelay)||(std::find(_rootservers.begin(),_rootservers.end(),p->address()) != _rootservers.end())) {
|
||||
p->doPingAndKeepalive(RR,_now);
|
||||
if (p->lastReceive() > lastReceiveFromUpstream)
|
||||
lastReceiveFromUpstream = p->lastReceive();
|
||||
@ -219,7 +221,7 @@ private:
|
||||
const RuntimeEnvironment *RR;
|
||||
uint64_t _now;
|
||||
const std::vector< std::pair<Address,InetAddress> > &_relays;
|
||||
std::vector<Address> _supernodes;
|
||||
std::vector<Address> _rootservers;
|
||||
};
|
||||
|
||||
ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
|
||||
@ -260,7 +262,7 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
|
||||
}
|
||||
}
|
||||
|
||||
// Ping living or supernode/relay peers
|
||||
// Ping living or rootserver/relay peers
|
||||
_PingPeersThatNeedPing pfunc(RR,now,networkRelays);
|
||||
RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
|
||||
|
||||
@ -384,7 +386,7 @@ ZT1_PeerList *Node::peers() const
|
||||
p->versionRev = -1;
|
||||
}
|
||||
p->latency = pi->second->latency();
|
||||
p->role = RR->topology->isSupernode(pi->second->address()) ? ZT1_PEER_ROLE_SUPERNODE : ZT1_PEER_ROLE_LEAF;
|
||||
p->role = RR->topology->isRootserver(pi->second->address()) ? ZT1_PEER_ROLE_ROOTSERVER : ZT1_PEER_ROLE_LEAF;
|
||||
|
||||
std::vector<Path> paths(pi->second->paths());
|
||||
Path *bestPath = pi->second->getBestPath(_now);
|
||||
|
@ -626,7 +626,7 @@ public:
|
||||
* [... additional tuples of network/address/adi ...]
|
||||
*
|
||||
* LIKEs are sent to peers with whom you have a direct peer to peer
|
||||
* connection, and always including supernodes.
|
||||
* connection, and always including rootservers.
|
||||
*
|
||||
* OK/ERROR are not generated.
|
||||
*/
|
||||
|
@ -122,16 +122,16 @@ void Peer::received(
|
||||
|
||||
/* Announce multicast groups of interest to direct peers if they are
|
||||
* considered authorized members of a given network. Also announce to
|
||||
* supernodes and network controllers. */
|
||||
* rootservers and network controllers. */
|
||||
if ((pathIsConfirmed)&&((now - _lastAnnouncedTo) >= ((ZT_MULTICAST_LIKE_EXPIRE / 2) - 1000))) {
|
||||
_lastAnnouncedTo = now;
|
||||
|
||||
const bool isSupernode = RR->topology->isSupernode(_id.address());
|
||||
const bool isRootserver = RR->topology->isRootserver(_id.address());
|
||||
|
||||
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
const std::vector< SharedPtr<Network> > networks(RR->node->allNetworks());
|
||||
for(std::vector< SharedPtr<Network> >::const_iterator n(networks.begin());n!=networks.end();++n) {
|
||||
if ( (isSupernode) || ((*n)->isAllowed(_id.address())) ) {
|
||||
if ( (isRootserver) || ((*n)->isAllowed(_id.address())) ) {
|
||||
const std::vector<MulticastGroup> mgs((*n)->allMulticastGroups());
|
||||
for(std::vector<MulticastGroup>::const_iterator mg(mgs.begin());mg!=mgs.end();++mg) {
|
||||
if ((outp.size() + 18) > ZT_UDP_DEFAULT_PAYLOAD_MTU) {
|
||||
|
@ -118,7 +118,7 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
|
||||
|
||||
// For all peers for whom we forgot an address, send a packet indirectly if
|
||||
// they are still considered alive so that we will re-establish direct links.
|
||||
SharedPtr<Peer> sn(RR->topology->getBestSupernode());
|
||||
SharedPtr<Peer> sn(RR->topology->getBestRootserver());
|
||||
if (sn) {
|
||||
Path *snp = sn->getBestPath(now);
|
||||
if (snp) {
|
||||
|
@ -320,8 +320,8 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force)
|
||||
* P2 in randomized order in terms of which gets sent first. This is done
|
||||
* since in a few cases NAT-t can be sensitive to slight timing differences
|
||||
* in terms of when the two peers initiate. Normally this is accounted for
|
||||
* by the nearly-simultaneous RENDEZVOUS kickoff from the supernode, but
|
||||
* given that supernodes are hosted on cloud providers this can in some
|
||||
* by the nearly-simultaneous RENDEZVOUS kickoff from the rootserver, but
|
||||
* given that rootservers are hosted on cloud providers this can in some
|
||||
* cases have a few ms of latency between packet departures. By randomizing
|
||||
* the order we make each attempted NAT-t favor one or the other going
|
||||
* first, meaning if it doesn't succeed the first time it might the second
|
||||
@ -565,8 +565,8 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void
|
||||
// It wouldn't hurt anything, just redundant and unnecessary.
|
||||
SharedPtr<Peer> relayTo = RR->topology->getPeer(destination);
|
||||
if ((!relayTo)||(!relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now()))) {
|
||||
// Don't know peer or no direct path -- so relay via supernode
|
||||
relayTo = RR->topology->getBestSupernode();
|
||||
// Don't know peer or no direct path -- so relay via rootserver
|
||||
relayTo = RR->topology->getBestRootserver();
|
||||
if (relayTo)
|
||||
relayTo->send(RR,fragment.data(),fragment.size(),RR->node->now());
|
||||
}
|
||||
@ -641,8 +641,8 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat
|
||||
if ((relayTo)&&((relayTo->send(RR,packet->data(),packet->size(),RR->node->now())))) {
|
||||
unite(source,destination,false);
|
||||
} else {
|
||||
// Don't know peer or no direct path -- so relay via supernode
|
||||
relayTo = RR->topology->getBestSupernode(&source,1,true);
|
||||
// Don't know peer or no direct path -- so relay via rootserver
|
||||
relayTo = RR->topology->getBestRootserver(&source,1,true);
|
||||
if (relayTo)
|
||||
relayTo->send(RR,packet->data(),packet->size(),RR->node->now());
|
||||
}
|
||||
@ -712,13 +712,13 @@ void Switch::_handleBeacon(const InetAddress &fromAddr,const Buffer<ZT_PROTO_BEA
|
||||
|
||||
Address Switch::_sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted)
|
||||
{
|
||||
SharedPtr<Peer> supernode(RR->topology->getBestSupernode(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
|
||||
if (supernode) {
|
||||
Packet outp(supernode->address(),RR->identity.address(),Packet::VERB_WHOIS);
|
||||
SharedPtr<Peer> rootserver(RR->topology->getBestRootserver(peersAlreadyConsulted,numPeersAlreadyConsulted,false));
|
||||
if (rootserver) {
|
||||
Packet outp(rootserver->address(),RR->identity.address(),Packet::VERB_WHOIS);
|
||||
addr.appendTo(outp);
|
||||
outp.armor(supernode->key(),true);
|
||||
if (supernode->send(RR,outp.data(),outp.size(),RR->node->now()))
|
||||
return supernode->address();
|
||||
outp.armor(rootserver->key(),true);
|
||||
if (rootserver->send(RR,outp.data(),outp.size(),RR->node->now()))
|
||||
return rootserver->address();
|
||||
}
|
||||
return Address();
|
||||
}
|
||||
@ -752,7 +752,7 @@ bool Switch::_trySend(const Packet &packet,bool encrypt,uint64_t nwid)
|
||||
}
|
||||
|
||||
if (!relay)
|
||||
relay = RR->topology->getBestSupernode();
|
||||
relay = RR->topology->getBestRootserver();
|
||||
|
||||
if (!(relay)||(!(viaPath = relay->getBestPath(now))))
|
||||
return false;
|
||||
|
@ -36,7 +36,7 @@ namespace ZeroTier {
|
||||
|
||||
Topology::Topology(const RuntimeEnvironment *renv) :
|
||||
RR(renv),
|
||||
_amSupernode(false)
|
||||
_amRootserver(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -44,16 +44,16 @@ Topology::~Topology()
|
||||
{
|
||||
}
|
||||
|
||||
void Topology::setSupernodes(const std::map< Identity,std::vector<InetAddress> > &sn)
|
||||
void Topology::setRootservers(const std::map< Identity,std::vector<InetAddress> > &sn)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
|
||||
if (_supernodes == sn)
|
||||
if (_rootservers == sn)
|
||||
return; // no change
|
||||
|
||||
_supernodes = sn;
|
||||
_supernodeAddresses.clear();
|
||||
_supernodePeers.clear();
|
||||
_rootservers = sn;
|
||||
_rootserverAddresses.clear();
|
||||
_rootserverPeers.clear();
|
||||
const uint64_t now = RR->node->now();
|
||||
|
||||
for(std::map< Identity,std::vector<InetAddress> >::const_iterator i(sn.begin());i!=sn.end();++i) {
|
||||
@ -64,17 +64,17 @@ void Topology::setSupernodes(const std::map< Identity,std::vector<InetAddress> >
|
||||
for(std::vector<InetAddress>::const_iterator j(i->second.begin());j!=i->second.end();++j)
|
||||
p->addPath(Path(*j,true));
|
||||
p->use(now);
|
||||
_supernodePeers.push_back(p);
|
||||
_rootserverPeers.push_back(p);
|
||||
}
|
||||
_supernodeAddresses.push_back(i->first.address());
|
||||
_rootserverAddresses.push_back(i->first.address());
|
||||
}
|
||||
|
||||
std::sort(_supernodeAddresses.begin(),_supernodeAddresses.end());
|
||||
std::sort(_rootserverAddresses.begin(),_rootserverAddresses.end());
|
||||
|
||||
_amSupernode = (_supernodes.find(RR->identity) != _supernodes.end());
|
||||
_amRootserver = (_rootservers.find(RR->identity) != _rootservers.end());
|
||||
}
|
||||
|
||||
void Topology::setSupernodes(const Dictionary &sn)
|
||||
void Topology::setRootservers(const Dictionary &sn)
|
||||
{
|
||||
std::map< Identity,std::vector<InetAddress> > m;
|
||||
for(Dictionary::const_iterator d(sn.begin());d!=sn.end();++d) {
|
||||
@ -86,11 +86,11 @@ void Topology::setSupernodes(const Dictionary &sn)
|
||||
if (udp.length() > 0)
|
||||
a.push_back(InetAddress(udp));
|
||||
} catch ( ... ) {
|
||||
TRACE("supernode list contained invalid entry for: %s",d->first.c_str());
|
||||
TRACE("rootserver list contained invalid entry for: %s",d->first.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
this->setSupernodes(m);
|
||||
this->setRootservers(m);
|
||||
}
|
||||
|
||||
SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
|
||||
@ -141,28 +141,28 @@ SharedPtr<Peer> Topology::getPeer(const Address &zta)
|
||||
return SharedPtr<Peer>();
|
||||
}
|
||||
|
||||
SharedPtr<Peer> Topology::getBestSupernode(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
|
||||
SharedPtr<Peer> Topology::getBestRootserver(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
|
||||
{
|
||||
SharedPtr<Peer> bestSupernode;
|
||||
SharedPtr<Peer> bestRootserver;
|
||||
const uint64_t now = RR->node->now();
|
||||
Mutex::Lock _l(_lock);
|
||||
|
||||
if (_amSupernode) {
|
||||
/* If I am a supernode, the "best" supernode is the one whose address
|
||||
if (_amRootserver) {
|
||||
/* If I am a rootserver, the "best" rootserver is the one whose address
|
||||
* is numerically greater than mine (with wrap at top of list). This
|
||||
* causes packets searching for a route to pretty much literally
|
||||
* circumnavigate the globe rather than bouncing between just two. */
|
||||
|
||||
if (_supernodeAddresses.size() > 1) { // gotta be one other than me for this to work
|
||||
std::vector<Address>::const_iterator sna(std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),RR->identity.address()));
|
||||
if (sna != _supernodeAddresses.end()) { // sanity check -- _amSupernode should've been false in this case
|
||||
if (_rootserverAddresses.size() > 1) { // gotta be one other than me for this to work
|
||||
std::vector<Address>::const_iterator sna(std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),RR->identity.address()));
|
||||
if (sna != _rootserverAddresses.end()) { // sanity check -- _amRootserver should've been false in this case
|
||||
for(;;) {
|
||||
if (++sna == _supernodeAddresses.end())
|
||||
sna = _supernodeAddresses.begin(); // wrap around at end
|
||||
if (++sna == _rootserverAddresses.end())
|
||||
sna = _rootserverAddresses.begin(); // wrap around at end
|
||||
if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order
|
||||
std::map< Address,SharedPtr<Peer> >::const_iterator p(_activePeers.find(*sna));
|
||||
if ((p != _activePeers.end())&&(p->second->hasActiveDirectPath(now))) {
|
||||
bestSupernode = p->second;
|
||||
bestRootserver = p->second;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -170,80 +170,80 @@ SharedPtr<Peer> Topology::getBestSupernode(const Address *avoid,unsigned int avo
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* If I am not a supernode, the best supernode is the active one with
|
||||
/* If I am not a rootserver, the best rootserver is the active one with
|
||||
* the lowest latency. */
|
||||
|
||||
unsigned int l,bestSupernodeLatency = 65536;
|
||||
unsigned int l,bestRootserverLatency = 65536;
|
||||
uint64_t lds,ldr;
|
||||
|
||||
// First look for a best supernode by comparing latencies, but exclude
|
||||
// supernodes that have not responded to direct messages in order to
|
||||
// First look for a best rootserver by comparing latencies, but exclude
|
||||
// rootservers that have not responded to direct messages in order to
|
||||
// try to exclude any that are dead or unreachable.
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator sn(_supernodePeers.begin());sn!=_supernodePeers.end();) {
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator sn(_rootserverPeers.begin());sn!=_rootserverPeers.end();) {
|
||||
// Skip explicitly avoided relays
|
||||
for(unsigned int i=0;i<avoidCount;++i) {
|
||||
if (avoid[i] == (*sn)->address())
|
||||
goto keep_searching_for_supernodes;
|
||||
goto keep_searching_for_rootservers;
|
||||
}
|
||||
|
||||
// Skip possibly comatose or unreachable relays
|
||||
lds = (*sn)->lastDirectSend();
|
||||
ldr = (*sn)->lastDirectReceive();
|
||||
if ((lds)&&(lds > ldr)&&((lds - ldr) > ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD))
|
||||
goto keep_searching_for_supernodes;
|
||||
goto keep_searching_for_rootservers;
|
||||
|
||||
if ((*sn)->hasActiveDirectPath(now)) {
|
||||
l = (*sn)->latency();
|
||||
if (bestSupernode) {
|
||||
if ((l)&&(l < bestSupernodeLatency)) {
|
||||
bestSupernodeLatency = l;
|
||||
bestSupernode = *sn;
|
||||
if (bestRootserver) {
|
||||
if ((l)&&(l < bestRootserverLatency)) {
|
||||
bestRootserverLatency = l;
|
||||
bestRootserver = *sn;
|
||||
}
|
||||
} else {
|
||||
if (l)
|
||||
bestSupernodeLatency = l;
|
||||
bestSupernode = *sn;
|
||||
bestRootserverLatency = l;
|
||||
bestRootserver = *sn;
|
||||
}
|
||||
}
|
||||
|
||||
keep_searching_for_supernodes:
|
||||
keep_searching_for_rootservers:
|
||||
++sn;
|
||||
}
|
||||
|
||||
if (bestSupernode) {
|
||||
bestSupernode->use(now);
|
||||
return bestSupernode;
|
||||
if (bestRootserver) {
|
||||
bestRootserver->use(now);
|
||||
return bestRootserver;
|
||||
} else if (strictAvoid)
|
||||
return SharedPtr<Peer>();
|
||||
|
||||
// If we have nothing from above, just pick one without avoidance criteria.
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator sn=_supernodePeers.begin();sn!=_supernodePeers.end();++sn) {
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator sn=_rootserverPeers.begin();sn!=_rootserverPeers.end();++sn) {
|
||||
if ((*sn)->hasActiveDirectPath(now)) {
|
||||
unsigned int l = (*sn)->latency();
|
||||
if (bestSupernode) {
|
||||
if ((l)&&(l < bestSupernodeLatency)) {
|
||||
bestSupernodeLatency = l;
|
||||
bestSupernode = *sn;
|
||||
if (bestRootserver) {
|
||||
if ((l)&&(l < bestRootserverLatency)) {
|
||||
bestRootserverLatency = l;
|
||||
bestRootserver = *sn;
|
||||
}
|
||||
} else {
|
||||
if (l)
|
||||
bestSupernodeLatency = l;
|
||||
bestSupernode = *sn;
|
||||
bestRootserverLatency = l;
|
||||
bestRootserver = *sn;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bestSupernode)
|
||||
bestSupernode->use(now);
|
||||
return bestSupernode;
|
||||
if (bestRootserver)
|
||||
bestRootserver->use(now);
|
||||
return bestRootserver;
|
||||
}
|
||||
|
||||
void Topology::clean(uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
for(std::map< Address,SharedPtr<Peer> >::iterator p(_activePeers.begin());p!=_activePeers.end();) {
|
||||
if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->first) == _supernodeAddresses.end())) {
|
||||
if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),p->first) == _rootserverAddresses.end())) {
|
||||
_activePeers.erase(p++);
|
||||
} else ++p;
|
||||
}
|
||||
|
@ -59,21 +59,21 @@ public:
|
||||
~Topology();
|
||||
|
||||
/**
|
||||
* Set up supernodes for this network
|
||||
* Set up rootservers for this network
|
||||
*
|
||||
* @param sn Supernodes for this network
|
||||
* @param sn Rootservers for this network
|
||||
*/
|
||||
void setSupernodes(const std::map< Identity,std::vector<InetAddress> > &sn);
|
||||
void setRootservers(const std::map< Identity,std::vector<InetAddress> > &sn);
|
||||
|
||||
/**
|
||||
* Set up supernodes for this network
|
||||
* Set up rootservers for this network
|
||||
*
|
||||
* This performs no signature verification of any kind. The caller must
|
||||
* check the signature of the root topology dictionary first.
|
||||
*
|
||||
* @param sn Supernodes dictionary from root-topology
|
||||
* @param sn Rootservers dictionary from root-topology
|
||||
*/
|
||||
void setSupernodes(const Dictionary &sn);
|
||||
void setRootservers(const Dictionary &sn);
|
||||
|
||||
/**
|
||||
* Add a peer to database
|
||||
@ -95,65 +95,65 @@ public:
|
||||
SharedPtr<Peer> getPeer(const Address &zta);
|
||||
|
||||
/**
|
||||
* @return Vector of peers that are supernodes
|
||||
* @return Vector of peers that are rootservers
|
||||
*/
|
||||
inline std::vector< SharedPtr<Peer> > supernodePeers() const
|
||||
inline std::vector< SharedPtr<Peer> > rootserverPeers() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return _supernodePeers;
|
||||
return _rootserverPeers;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Number of supernodes
|
||||
* @return Number of rootservers
|
||||
*/
|
||||
inline unsigned int numSupernodes() const
|
||||
inline unsigned int numRootservers() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return (unsigned int)_supernodePeers.size();
|
||||
return (unsigned int)_rootserverPeers.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current favorite supernode
|
||||
* Get the current favorite rootserver
|
||||
*
|
||||
* @return Supernode with lowest latency or NULL if none
|
||||
* @return Rootserver with lowest latency or NULL if none
|
||||
*/
|
||||
inline SharedPtr<Peer> getBestSupernode()
|
||||
inline SharedPtr<Peer> getBestRootserver()
|
||||
{
|
||||
return getBestSupernode((const Address *)0,0,false);
|
||||
return getBestRootserver((const Address *)0,0,false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the best supernode, avoiding supernodes listed in an array
|
||||
* Get the best rootserver, avoiding rootservers listed in an array
|
||||
*
|
||||
* This will get the best supernode (lowest latency, etc.) but will
|
||||
* try to avoid the listed supernodes, only using them if no others
|
||||
* This will get the best rootserver (lowest latency, etc.) but will
|
||||
* try to avoid the listed rootservers, only using them if no others
|
||||
* are available.
|
||||
*
|
||||
* @param avoid Nodes to avoid
|
||||
* @param avoidCount Number of nodes to avoid
|
||||
* @param strictAvoid If false, consider avoided supernodes anyway if no non-avoid supernodes are available
|
||||
* @return Supernode or NULL if none
|
||||
* @param strictAvoid If false, consider avoided rootservers anyway if no non-avoid rootservers are available
|
||||
* @return Rootserver or NULL if none
|
||||
*/
|
||||
SharedPtr<Peer> getBestSupernode(const Address *avoid,unsigned int avoidCount,bool strictAvoid);
|
||||
SharedPtr<Peer> getBestRootserver(const Address *avoid,unsigned int avoidCount,bool strictAvoid);
|
||||
|
||||
/**
|
||||
* @param zta ZeroTier address
|
||||
* @return True if this is a designated supernode
|
||||
* @return True if this is a designated rootserver
|
||||
*/
|
||||
inline bool isSupernode(const Address &zta) const
|
||||
inline bool isRootserver(const Address &zta) const
|
||||
throw()
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),zta) != _supernodeAddresses.end());
|
||||
return (std::find(_rootserverAddresses.begin(),_rootserverAddresses.end(),zta) != _rootserverAddresses.end());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Vector of supernode addresses
|
||||
* @return Vector of rootserver addresses
|
||||
*/
|
||||
inline std::vector<Address> supernodeAddresses() const
|
||||
inline std::vector<Address> rootserverAddresses() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return _supernodeAddresses;
|
||||
return _rootserverAddresses;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -206,13 +206,13 @@ private:
|
||||
const RuntimeEnvironment *RR;
|
||||
|
||||
std::map< Address,SharedPtr<Peer> > _activePeers;
|
||||
std::map< Identity,std::vector<InetAddress> > _supernodes;
|
||||
std::vector< Address > _supernodeAddresses;
|
||||
std::vector< SharedPtr<Peer> > _supernodePeers;
|
||||
std::map< Identity,std::vector<InetAddress> > _rootservers;
|
||||
std::vector< Address > _rootserverAddresses;
|
||||
std::vector< SharedPtr<Peer> > _rootserverPeers;
|
||||
|
||||
Mutex _lock;
|
||||
|
||||
bool _amSupernode;
|
||||
bool _amRootserver;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
@ -2,9 +2,9 @@ This folder contains the source files to compile the signed network root topolog
|
||||
|
||||
Keys in the root topology dictionary are:
|
||||
|
||||
* **supernodes**: contains another Dictionary mapping supernode address to supernode definition
|
||||
* **##########**: supernode address, contains supernode definition
|
||||
* **id**: supernode identity (public) in string-serialized format
|
||||
* **rootservers**: contains another Dictionary mapping rootserver address to rootserver definition
|
||||
* **##########**: rootserver address, contains rootserver definition
|
||||
* **id**: rootserver identity (public) in string-serialized format
|
||||
* **udp**: comma-delimited list of ip/port UDP addresses of node
|
||||
* **tcp**: *DEPRECATED* comma-delimited list of ip/port TCP addresses of node
|
||||
* **desc**: human-readable description (optional)
|
||||
|
@ -1,4 +1,4 @@
|
||||
supernodes=7e19876aba\=id\\\=7e19876aba:0:2a6e2b2318930f60eb097f70d0f4b028b2cd6d3d0c63c014b9039ff35390e41181f216fb2e6fa8d95c1ee9667156411905c3dccfea78d8c6dfafba688170b3fa\\nudp\\\=198.199.97.220/9993\\ntcp\\\=198.199.97.220/443\\ndesc\\\=San Francisco, California, USA\\ndns\\\=nyarlathotep.zerotier.com\\n\n8841408a2e\=id\\\=8841408a2e:0:bb1d31f2c323e264e9e64172c1a74f77899555ed10751cd56e86405cde118d02dffe555d462ccf6a85b5631c12350c8d5dc409ba10b9025d0f445cf449d92b1c\\nudp\\\=107.191.46.210/9993\\ntcp\\\=107.191.46.210/443\\ndesc\\\=Paris, France\\ndns\\\=shoggoth.zerotier.com\\n\n8acf059fe3\=id\\\=8acf059fe3:0:482f6ee5dfe902319b419de5bdc765209c0ecda38c4d6e4fcf0d33658398b4527dcd22f93112fb9befd02fd78bf7261b333fc105d192a623ca9e50fc60b374a5\\nudp\\\=162.243.77.111/9993\\ntcp\\\=162.243.77.111/443\\ndesc\\\=New York, New York, USA\\ndns\\\=cthulhu.zerotier.com\\n\n9d219039f3\=id\\\=9d219039f3:0:01f0922a98e3b34ebcbff333269dc265d7a020aab69d72be4d4acc9c8c9294785771256cd1d942a90d1bd1d2dca3ea84ef7d85afe6611fb43ff0b74126d90a6e\\nudp\\\=128.199.197.217/9993\\ntcp\\\=128.199.197.217/443\\ndesc\\\=Singapore\\ndns\\\=mi-go.zerotier.com\\n\n
|
||||
rootservers=7e19876aba\=id\\\=7e19876aba:0:2a6e2b2318930f60eb097f70d0f4b028b2cd6d3d0c63c014b9039ff35390e41181f216fb2e6fa8d95c1ee9667156411905c3dccfea78d8c6dfafba688170b3fa\\nudp\\\=198.199.97.220/9993\\ntcp\\\=198.199.97.220/443\\ndesc\\\=San Francisco, California, USA\\ndns\\\=nyarlathotep.zerotier.com\\n\n8841408a2e\=id\\\=8841408a2e:0:bb1d31f2c323e264e9e64172c1a74f77899555ed10751cd56e86405cde118d02dffe555d462ccf6a85b5631c12350c8d5dc409ba10b9025d0f445cf449d92b1c\\nudp\\\=107.191.46.210/9993\\ntcp\\\=107.191.46.210/443\\ndesc\\\=Paris, France\\ndns\\\=shoggoth.zerotier.com\\n\n8acf059fe3\=id\\\=8acf059fe3:0:482f6ee5dfe902319b419de5bdc765209c0ecda38c4d6e4fcf0d33658398b4527dcd22f93112fb9befd02fd78bf7261b333fc105d192a623ca9e50fc60b374a5\\nudp\\\=162.243.77.111/9993\\ntcp\\\=162.243.77.111/443\\ndesc\\\=New York, New York, USA\\ndns\\\=cthulhu.zerotier.com\\n\n9d219039f3\=id\\\=9d219039f3:0:01f0922a98e3b34ebcbff333269dc265d7a020aab69d72be4d4acc9c8c9294785771256cd1d942a90d1bd1d2dca3ea84ef7d85afe6611fb43ff0b74126d90a6e\\nudp\\\=128.199.197.217/9993\\ntcp\\\=128.199.197.217/443\\ndesc\\\=Singapore\\ndns\\\=mi-go.zerotier.com\\n\n
|
||||
~!ed25519=b7493f5a4b79a1dcc423fd25d2d8aa8d6293c490a12ceb6395417dd5868c17bfbcee685de58019d21f92576a78a45235d342efa2a00a544ded34766dd32d6f0e11809197f9baeedf4c6a0e8d2d657d280a579f2f2478b2f7c7a08089a5016b55
|
||||
~!sigid=77792b1c02:0:b5c361e8e9c2154e82c3e902fdfc337468b092a7c4d8dc685c37eb10ee4f3c17cc0bb1d024167e8cb0824d12263428373582da3d0a9a14b36e4546c317e811e6
|
||||
~!sigts=14ae42d0314
|
||||
|
@ -30,21 +30,21 @@ int main(int argc,char **argv)
|
||||
if (OSUtils::readFile("template.dict",buf))
|
||||
topology.fromString(buf);
|
||||
|
||||
// Read all entries in supernodes/ that correspond to supernode entry dictionaries
|
||||
// and add them to topology under supernodes/ subkey.
|
||||
Dictionary supernodes;
|
||||
std::vector<std::string> supernodeDictionaries(OSUtils::listDirectory("supernodes"));
|
||||
for(std::vector<std::string>::const_iterator sn(supernodeDictionaries.begin());sn!=supernodeDictionaries.end();++sn) {
|
||||
// Read all entries in rootservers/ that correspond to rootserver entry dictionaries
|
||||
// and add them to topology under rootservers/ subkey.
|
||||
Dictionary rootservers;
|
||||
std::vector<std::string> rootserverDictionaries(OSUtils::listDirectory("rootservers"));
|
||||
for(std::vector<std::string>::const_iterator sn(rootserverDictionaries.begin());sn!=rootserverDictionaries.end();++sn) {
|
||||
if (sn->length() == 10) {
|
||||
buf.clear();
|
||||
if (!OSUtils::readFile((std::string("supernodes/")+(*sn)).c_str(),buf)) {
|
||||
std::cerr << "Cannot read supernodes/" << *sn << std::endl;
|
||||
if (!OSUtils::readFile((std::string("rootservers/")+(*sn)).c_str(),buf)) {
|
||||
std::cerr << "Cannot read rootservers/" << *sn << std::endl;
|
||||
return 1;
|
||||
}
|
||||
supernodes[*sn] = buf;
|
||||
rootservers[*sn] = buf;
|
||||
}
|
||||
}
|
||||
topology["supernodes"] = supernodes.toString();
|
||||
topology["rootservers"] = rootservers.toString();
|
||||
|
||||
if ((topologyAuthority)&&(topologyAuthority.hasPrivate())) {
|
||||
// Sign topology with root-topology-authority.secret
|
||||
|
@ -1,6 +1,6 @@
|
||||
Test Root Topology Script
|
||||
======
|
||||
|
||||
This builds a test-root-topology from any number of running test-supernode-# Docker containers. This can then be used with the (undocumented) -T (override root topology) option to run test networks under Docker.
|
||||
This builds a test-root-topology from any number of running test-rootserver-# Docker containers. This can then be used with the (undocumented) -T (override root topology) option to run test networks under Docker.
|
||||
|
||||
Once you have a local Docker test network running you can use iptables rules to simulate a variety of network pathologies, or you can just use it to test any new changes to the protocol or node behavior at some limited scale.
|
||||
|
@ -5,18 +5,18 @@ if [ ! -e ../mktopology ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo 'Populating supernodes/* with all Docker test-supernode-* container IPs and identities...'
|
||||
echo 'Populating rootservers/* with all Docker test-rootserver-* container IPs and identities...'
|
||||
|
||||
rm -rf supernodes
|
||||
mkdir supernodes
|
||||
rm -rf rootservers
|
||||
mkdir rootservers
|
||||
|
||||
for cid in `docker ps -f 'name=test-supernode-*' -q`; do
|
||||
for cid in `docker ps -f 'name=test-rootserver-*' -q`; do
|
||||
id=`docker exec $cid cat /var/lib/zerotier-one/identity.secret | cut -d : -f 1-3`
|
||||
ztaddr=`echo $id | cut -d : -f 1`
|
||||
ip=`docker exec $cid ifconfig | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p'`
|
||||
echo $cid $ztaddr $id $ip
|
||||
echo "id=$id" >supernodes/$ztaddr
|
||||
echo "udp=$ip/9993" >>supernodes/$ztaddr
|
||||
echo "id=$id" >rootservers/$ztaddr
|
||||
echo "udp=$ip/9993" >>rootservers/$ztaddr
|
||||
done
|
||||
|
||||
echo 'Creating test-root-topology...'
|
||||
|
@ -213,7 +213,7 @@ static void _jsonAppend(unsigned int depth,std::string &buf,const ZT1_Peer *peer
|
||||
switch(peer->role) {
|
||||
case ZT1_PEER_ROLE_LEAF: prole = "LEAF"; break;
|
||||
case ZT1_PEER_ROLE_HUB: prole = "HUB"; break;
|
||||
case ZT1_PEER_ROLE_SUPERNODE: prole = "SUPERNODE"; break;
|
||||
case ZT1_PEER_ROLE_ROOTSERVER: prole = "ROOT"; break;
|
||||
}
|
||||
|
||||
Utils::snprintf(json,sizeof(json),
|
||||
|
@ -106,7 +106,7 @@ Getting /peer returns an array of peer objects for all current peers. See below
|
||||
<tr><td>versionRev</td><td>integer</td><td>Revision of remote if known</td><td>no</td></tr>
|
||||
<tr><td>version</td><td>string</td><td>Version in major.minor.rev format</td><td>no</td></tr>
|
||||
<tr><td>latency</td><td>integer</td><td>Latency in milliseconds if known</td><td>no</td></tr>
|
||||
<tr><td>role</td><td>string</td><td>LEAF, HUB, or SUPERNODE</td><td>no</td></tr>
|
||||
<tr><td>role</td><td>string</td><td>LEAF, HUB, or ROOTSERVER</td><td>no</td></tr>
|
||||
<tr><td>paths</td><td>[object]</td><td>Array of path objects (see below)</td><td>no</td></tr>
|
||||
</table>
|
||||
|
||||
@ -184,7 +184,7 @@ Relays, IP assignment pools, and rules are edited via direct POSTs to the networ
|
||||
|
||||
**Relay object format:**
|
||||
|
||||
Relay objects define network-specific preferred relay nodes. Traffic to peers on this network will preferentially use these relays if they are available, and otherwise will fall back to the global supernode infrastructure.
|
||||
Relay objects define network-specific preferred relay nodes. Traffic to peers on this network will preferentially use these relays if they are available, and otherwise will fall back to the global rootserver infrastructure.
|
||||
|
||||
<table>
|
||||
<tr><td><b>Field</b></td><td><b>Type</b></td><td><b>Description</b></td></tr>
|
||||
|
@ -85,7 +85,7 @@ using namespace ZeroTier;
|
||||
* in which every encapsulated ZT packet is prepended by an IP address where
|
||||
* it should be forwarded (or where it came from for replies). This causes
|
||||
* this proxy to act as a remote UDP socket similar to a socks proxy, which
|
||||
* will allow us to move this function off the supernodes and onto dedicated
|
||||
* will allow us to move this function off the rootservers and onto dedicated
|
||||
* proxy nodes.
|
||||
*
|
||||
* Older ZT clients that do not send this message get their packets relayed
|
||||
|
Loading…
Reference in New Issue
Block a user