Tons and tons of cleanup and cruft removal

This commit is contained in:
Adam Ierymenko 2019-08-21 14:24:45 -07:00
parent 5280d28505
commit 0b5472f9fb
No known key found for this signature in database
GPG Key ID: 1657198823E52A61
17 changed files with 202 additions and 377 deletions

View File

@ -2017,7 +2017,7 @@ ZT_SDK_API void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node);
ZT_SDK_API int ZT_Node_sendUserMessage(ZT_Node *node,void *tptr,uint64_t dest,uint64_t typeId,const void *data,unsigned int len);
/**
* Set a network configuration master instance for this node
* Set a network controller instance for this node
*
* Normal nodes should not need to use this. This is for nodes with
* special compiled-in support for acting as network configuration
@ -2031,7 +2031,7 @@ ZT_SDK_API int ZT_Node_sendUserMessage(ZT_Node *node,void *tptr,uint64_t dest,ui
* @param networkConfigMasterInstance Instance of NetworkConfigMaster C++ class or NULL to disable
* @return OK (0) or error code if a fatal error condition has occurred
*/
ZT_SDK_API void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkConfigMasterInstance);
ZT_SDK_API void ZT_Node_setController(ZT_Node *node,void *networkConfigMasterInstance);
/**
* Set configuration for a given physical path

View File

@ -318,13 +318,6 @@
*/
#define ZT_MULTIPATH_PROPORTION_WIN_SZ 128
/**
* How often we will sample packet latency. Should be at least greater than ZT_PING_CHECK_INVERVAL
* since we will record a 0 bit/s measurement if no valid latency measurement was made within this
* window of time.
*/
#define ZT_PATH_LATENCY_SAMPLE_INTERVAL (ZT_MULTIPATH_PEER_PING_PERIOD * 2)
/**
* Interval used for rate-limiting the computation of path quality estimates.
*/
@ -453,11 +446,6 @@
*/
#define ZT_QOS_DEFAULT_BUCKET 0
/**
* How frequently to send heartbeats over in-use paths
*/
#define ZT_PATH_HEARTBEAT_PERIOD 14000
/**
* Do not accept HELLOs over a given path more often than this
*/
@ -465,18 +453,13 @@
/**
* Delay between full-fledge pings of directly connected peers
*/
#define ZT_PEER_PING_PERIOD 60000
/**
* Delay between full-fledge pings of directly connected peers.
*
* With multipath bonding enabled ping peers more often to measure
* packet loss and latency. This uses more bandwidth so is disabled
* by default to avoid increasing idle bandwidth use for regular
* links.
* See https://conferences.sigcomm.org/imc/2010/papers/p260.pdf for
* some real world data on NAT UDP timeouts. From the paper: "the
* lowest measured timeout when a binding has seen bidirectional
* traffic is 54 sec." We use 45 to be a bit under this.
*/
#define ZT_MULTIPATH_PEER_PING_PERIOD 5000
#define ZT_PEER_PING_PERIOD 45000
/**
* Paths are considered expired if they have not sent us a real packet in this long
@ -524,11 +507,6 @@
*/
#define ZT_MIN_UNITE_INTERVAL 30000
/**
* How often should peers try memorized or statically defined paths?
*/
#define ZT_TRY_MEMORIZED_PATH_INTERVAL 30000
/**
* Sanity limit on maximum bridge routes
*

View File

@ -223,6 +223,7 @@ bool IncomingPacket::_doQOS_MEASUREMENT(const RuntimeEnvironment *RR,void *tPtr,
{
if (!peer->rateGateQoS(RR->node->now()))
return true;
/* Dissect incoming QoS packet. From this we can compute latency values and their variance.
* The latency variance is used as a measure of "jitter". */
if (peer->localMultipathSupport()) {
@ -349,12 +350,14 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR,void *tPtr,const bool
// VALID -- if we made it here, packet passed identity and authenticity checks!
// Get external surface address if present (was not in old versions)
InetAddress externalSurfaceAddress;
if (ptr < size()) {
ptr += externalSurfaceAddress.deserialize(*this,ptr);
if ((externalSurfaceAddress)&&(hops() == 0))
RR->sa->iam(tPtr,id.address(),_path->localSocket(),_path->address(),externalSurfaceAddress,RR->topology->isRoot(id),now);
// Get address to which this packet was sent to learn our external surface address if packet was direct.
if (hops() == 0) {
InetAddress externalSurfaceAddress;
if (ptr < size()) {
ptr += externalSurfaceAddress.deserialize(*this,ptr);
if ((externalSurfaceAddress)&&(hops() == 0))
RR->sa->iam(tPtr,id.address(),_path->localSocket(),_path->address(),externalSurfaceAddress,RR->topology->isRoot(id),now);
}
}
// Send OK(HELLO) with an echo of the packet's timestamp and some of the same
@ -398,20 +401,17 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,void *tPtr,const SharedP
if (vProto < ZT_PROTO_VERSION_MIN)
return true;
InetAddress externalSurfaceAddress;
unsigned int ptr = ZT_PROTO_VERB_HELLO__OK__IDX_REVISION + 2;
// Get reported external surface address if present
if (ptr < size())
ptr += externalSurfaceAddress.deserialize(*this,ptr);
if (!hops()) {
if (hops() == 0) {
_path->updateLatency((unsigned int)latency,RR->node->now());
if ((ZT_PROTO_VERB_HELLO__OK__IDX_REVISION + 2) < size()) {
InetAddress externalSurfaceAddress;
externalSurfaceAddress.deserialize(*this,ZT_PROTO_VERB_HELLO__OK__IDX_REVISION + 2);
if (externalSurfaceAddress)
RR->sa->iam(tPtr,peer->address(),_path->localSocket(),_path->address(),externalSurfaceAddress,RR->topology->isRoot(peer->identity()),RR->node->now());
}
}
peer->setRemoteVersion(vProto,vMajor,vMinor,vRevision);
if ((externalSurfaceAddress)&&(hops() == 0))
RR->sa->iam(tPtr,peer->address(),_path->localSocket(),_path->address(),externalSurfaceAddress,RR->topology->isRoot(peer->identity()),RR->node->now());
} break;
case Packet::VERB_WHOIS:
@ -528,16 +528,14 @@ bool IncomingPacket::_doRENDEZVOUS(const RuntimeEnvironment *RR,void *tPtr,const
if ((port > 0)&&((addrlen == 4)||(addrlen == 16))) {
InetAddress atAddr(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRESS,addrlen),addrlen,port);
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr,with,_path->localSocket(),atAddr)) {
const uint64_t junk = RR->node->prng();
const uint64_t junk = Utils::random();
RR->node->putPacket(tPtr,_path->localSocket(),atAddr,&junk,4,2); // send low-TTL junk packet to 'open' local NAT(s) and stateful firewalls
rendezvousWith->attemptToContactAt(tPtr,_path->localSocket(),atAddr,RR->node->now(),false);
rendezvousWith->sendHELLO(tPtr,_path->localSocket(),atAddr,RR->node->now());
}
}
}
}
peer->received(tPtr,_path,hops(),packetId(),payloadLength(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP,0);
return true;
}
@ -560,9 +558,7 @@ bool IncomingPacket::_doFRAME(const RuntimeEnvironment *RR,void *tPtr,const Shar
return false;
}
}
peer->received(tPtr,_path,hops(),packetId(),payloadLength(),Packet::VERB_FRAME,0,Packet::VERB_NOP,nwid);
return true;
}
@ -1003,8 +999,6 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,void *tPt
unsigned int ptr = ZT_PACKET_IDX_PAYLOAD + 2;
while (count--) { // if ptr overflows Buffer will throw
// TODO: some flags are not yet implemented
unsigned int flags = (*this)[ptr++];
unsigned int extLen = at<uint16_t>(ptr); ptr += 2;
ptr += extLen; // unused right now
@ -1014,26 +1008,20 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,void *tPt
switch(addrType) {
case 4: {
const InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
if ((!( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) == 0) && (peer->hasActivePathTo(now,a)) )) && // not already known
(RR->node->shouldUsePathForZeroTierTraffic(tPtr,peer->address(),_path->localSocket(),a)) ) // should use path
if ((!peer->hasActivePathTo(now,a)) && // not already known
(RR->node->shouldUsePathForZeroTierTraffic(tPtr,peer->address(),-1,a)) ) // should use path
{
if ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) != 0) {
peer->clusterRedirect(tPtr,_path,a,now);
} else if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
peer->attemptToContactAt(tPtr,InetAddress(),a,now,false);
}
if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY)
peer->sendHELLO(tPtr,-1,a,now);
}
} break;
case 6: {
const InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
if ((!( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) == 0) && (peer->hasActivePathTo(now,a)) )) && // not already known
(RR->node->shouldUsePathForZeroTierTraffic(tPtr,peer->address(),_path->localSocket(),a)) ) // should use path
if ((!peer->hasActivePathTo(now,a)) && // not already known
(RR->node->shouldUsePathForZeroTierTraffic(tPtr,peer->address(),-1,a)) ) // should use path
{
if ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) != 0) {
peer->clusterRedirect(tPtr,_path,a,now);
} else if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
peer->attemptToContactAt(tPtr,InetAddress(),a,now,false);
}
if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY)
peer->sendHELLO(tPtr,-1,a,now);
}
} break;
}

View File

@ -110,10 +110,10 @@ public:
return nconf.com.agreesWith(_com); // check timestamp agreement window
}
inline bool recentlyAssociated(const int64_t now) const
{
return ((_com)&&((now - _com.timestamp()) < ZT_PEER_ACTIVITY_TIMEOUT));
}
/**
* @return True if this peer has sent us a valid certificate within ZT_PEER_ACTIVITY_TIMEOUT
*/
inline bool recentlyAssociated(const int64_t now) const { return ((_com)&&((now - _com.timestamp()) < ZT_PEER_ACTIVITY_TIMEOUT)); }
/**
* Check whether the peer represented by this Membership owns a given address
@ -170,6 +170,26 @@ public:
*/
static uint64_t credentialKey(const Credential::Type &t,const uint32_t i) { return (((uint64_t)t << 32) | (uint64_t)i); }
/**
* @return Bytes received so far
*/
inline uint64_t receivedBytes() const { return _received; }
/**
* @return Bytes sent so far
*/
inline uint64_t sentBytes() const { return _sent; }
/**
* @param bytes Bytes received
*/
inline void logReceivedBytes(const unsigned int bytes) { _received = (uint64_t)bytes; }
/**
* @param bytes Bytes sent
*/
inline void logSentBytes(const unsigned int bytes) { _sent = (uint64_t)bytes; }
private:
// This returns true if a resource is an IPv6 NDP-emulated address. These embed the ZT
// address of the peer and therefore cannot be spoofed, causing peerOwnsAddress() to
@ -221,6 +241,12 @@ private:
// Time we last pushed credentials
int64_t _lastPushedCredentials;
// Number of Ethernet frame bytes received
uint64_t _received;
// Number of Ethernet frame bytes sent
uint64_t _sent;
// Remote member's latest network COM
CertificateOfMembership _com;

View File

@ -111,7 +111,7 @@ unsigned int Multicaster::gather(const Address &queryingPeer,uint64_t nwid,const
// will return different subsets of a large multicast group.
k = 0;
while ((added < limit)&&(k < s->members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_PROTO_MAX_PACKET_LENGTH)) {
rptr = (unsigned int)RR->node->prng();
rptr = (unsigned int)Utils::random();
restart_member_scan:
a = s->members[rptr % (unsigned int)s->members.size()].address.toInt();
@ -183,7 +183,7 @@ void Multicaster::send(
for(unsigned long i=0;i<gs.members.size();++i)
indexes[i] = i;
for(unsigned long i=(unsigned long)gs.members.size()-1;i>0;--i) {
unsigned long j = (unsigned long)RR->node->prng() % (i + 1);
unsigned long j = (unsigned long)Utils::random() % (i + 1);
unsigned long tmp = indexes[j];
indexes[j] = indexes[i];
indexes[i] = tmp;
@ -251,7 +251,7 @@ void Multicaster::send(
for(unsigned int i=0;i<accnt;++i)
shuffled[i] = i;
for(unsigned int i=0,k=accnt>>1;i<k;++i) {
const uint64_t x = RR->node->prng();
const uint64_t x = Utils::random();
const unsigned int x1 = shuffled[(unsigned int)x % accnt];
const unsigned int x2 = shuffled[(unsigned int)(x >> 32) % accnt];
const unsigned int tmp = shuffled[x1];

View File

@ -51,17 +51,14 @@ public:
inline void lock() const
{
const uint16_t myTicket = __sync_fetch_and_add(&(const_cast<Mutex *>(this)->nextTicket),1);
const uint16_t myTicket = __sync_fetch_and_add(&(const_cast<Mutex *>(this)->nextTicket),1);
while (nowServing != myTicket) {
__asm__ __volatile__("rep;nop"::);
__asm__ __volatile__("":::"memory");
}
}
}
inline void unlock() const
{
++(const_cast<Mutex *>(this)->nowServing);
}
inline void unlock() const { ++(const_cast<Mutex *>(this)->nowServing); }
/**
* Uses C++ contexts and constructor/destructor to lock/unlock automatically

View File

@ -432,7 +432,7 @@ static _doZtFilterResult _doZtFilter(
thisRuleMatches = (uint8_t)((frameLen >= (unsigned int)rules[rn].v.frameSize[0])&&(frameLen <= (unsigned int)rules[rn].v.frameSize[1]));
break;
case ZT_NETWORK_RULE_MATCH_RANDOM:
thisRuleMatches = (uint8_t)((uint32_t)(RR->node->prng() & 0xffffffffULL) <= rules[rn].v.randomProbability);
thisRuleMatches = (uint8_t)((uint32_t)(Utils::random() & 0xffffffffULL) <= rules[rn].v.randomProbability);
break;
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
@ -702,6 +702,9 @@ bool Network::filterOutgoingPacket(
}
if (accept) {
if (membership)
membership->logSentBytes(frameLen);
if ((!noTee)&&(cc)) {
Packet outp(cc,RR->identity.address(),Packet::VERB_EXT_FRAME);
outp.append(_id);
@ -820,6 +823,8 @@ int Network::filterIncomingPacket(
}
if (accept) {
membership.logReceivedBytes(frameLen);
if (cc) {
Packet outp(cc,RR->identity.address(),Packet::VERB_EXT_FRAME);
outp.append(_id);

View File

@ -64,9 +64,6 @@ Node::Node(void *uptr,void *tptr,const struct ZT_Node_Callbacks *callbacks,int64
{
memcpy(&_cb,callbacks,sizeof(ZT_Node_Callbacks));
// Initialize non-cryptographic PRNG from a good random source
Utils::getSecureRandom((void *)_prngState,sizeof(_prngState));
_online = false;
memset(_expectingRepliesToBucketPtr,0,sizeof(_expectingRepliesToBucketPtr));
@ -211,7 +208,7 @@ struct _PingPeersThatNeedPing
bool contacted = (sent != 0);
if ((sent & 0x1) == 0) { // bit 0x1 == IPv4 sent
for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)alwaysContactEndpoints->size();++k) {
for(unsigned long k=0,ptr=(unsigned long)Utils::random();k<(unsigned long)alwaysContactEndpoints->size();++k) {
const InetAddress &addr = (*alwaysContactEndpoints)[ptr++ % alwaysContactEndpoints->size()];
if (addr.ss_family == AF_INET) {
p->sendHELLO(_tPtr,-1,addr,_now);
@ -222,7 +219,7 @@ struct _PingPeersThatNeedPing
}
if ((sent & 0x2) == 0) { // bit 0x2 == IPv6 sent
for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)alwaysContactEndpoints->size();++k) {
for(unsigned long k=0,ptr=(unsigned long)Utils::random();k<(unsigned long)alwaysContactEndpoints->size();++k) {
const InetAddress &addr = (*alwaysContactEndpoints)[ptr++ % alwaysContactEndpoints->size()];
if (addr.ss_family == AF_INET6) {
p->sendHELLO(_tPtr,-1,addr,_now);
@ -427,10 +424,13 @@ void Node::status(ZT_NodeStatus *status) const
status->online = _online ? 1 : 0;
}
struct _sortPeerPtrsByAddress { inline bool cmp(const SharedPtr<Peer> &a,const SharedPtr<Peer> &b) const { return (a->address() < b->address()); } };
ZT_PeerList *Node::peers() const
{
std::vector< std::pair< Address,SharedPtr<Peer> > > peers(RR->topology->allPeers());
std::sort(peers.begin(),peers.end());
std::vector< SharedPtr<Peer> > peers;
RR->topology->getAllPeers(peers);
std::sort(peers.begin(),peers.end(),_sortPeerPtrsByAddress());
char *buf = (char *)::malloc(sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()));
if (!buf)
@ -439,27 +439,27 @@ ZT_PeerList *Node::peers() const
pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
pl->peerCount = 0;
for(std::vector< std::pair< Address,SharedPtr<Peer> > >::iterator pi(peers.begin());pi!=peers.end();++pi) {
for(std::vector< SharedPtr<Peer> >::iterator pi(peers.begin());pi!=peers.end();++pi) {
ZT_Peer *p = &(pl->peers[pl->peerCount++]);
p->address = pi->second->address().toInt();
p->address = (*pi)->address().toInt();
p->hadAggregateLink = 0;
if (pi->second->remoteVersionKnown()) {
p->versionMajor = pi->second->remoteVersionMajor();
p->versionMinor = pi->second->remoteVersionMinor();
p->versionRev = pi->second->remoteVersionRevision();
if ((*pi)->remoteVersionKnown()) {
p->versionMajor = (*pi)->remoteVersionMajor();
p->versionMinor = (*pi)->remoteVersionMinor();
p->versionRev = (*pi)->remoteVersionRevision();
} else {
p->versionMajor = -1;
p->versionMinor = -1;
p->versionRev = -1;
}
p->latency = pi->second->latency(_now);
p->latency = (*pi)->latency(_now);
if (p->latency >= 0xffff)
p->latency = -1;
p->role = RR->topology->isRoot(pi->second->identity()) ? ZT_PEER_ROLE_PLANET : ZT_PEER_ROLE_LEAF;
p->role = RR->topology->isRoot((*pi)->identity()) ? ZT_PEER_ROLE_PLANET : ZT_PEER_ROLE_LEAF;
std::vector< SharedPtr<Path> > paths(pi->second->paths(_now));
SharedPtr<Path> bestp(pi->second->getAppropriatePath(_now,false));
p->hadAggregateLink |= pi->second->hasAggregateLink();
std::vector< SharedPtr<Path> > paths((*pi)->paths(_now));
SharedPtr<Path> bestp((*pi)->getAppropriatePath(_now,false));
p->hadAggregateLink |= (*pi)->hasAggregateLink();
p->pathCount = 0;
for(std::vector< SharedPtr<Path> >::iterator path(paths.begin());path!=paths.end();++path) {
memcpy(&(p->paths[p->pathCount].address),&((*path)->address()),sizeof(struct sockaddr_storage));
@ -557,7 +557,7 @@ int Node::sendUserMessage(void *tptr,uint64_t dest,uint64_t typeId,const void *d
return 0;
}
void Node::setNetconfMaster(void *networkControllerInstance)
void Node::setController(void *networkControllerInstance)
{
RR->localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
if (networkControllerInstance)
@ -589,18 +589,6 @@ bool Node::shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,cons
return ( (_cb.pathCheckFunction) ? (_cb.pathCheckFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ztaddr.toInt(),localSocket,reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0) : true);
}
uint64_t Node::prng()
{
// https://en.wikipedia.org/wiki/Xorshift#xorshift.2B
uint64_t x = _prngState[0];
const uint64_t y = _prngState[1];
_prngState[0] = y;
x ^= x << 23;
const uint64_t z = x ^ y ^ (x >> 17) ^ (y >> 26);
_prngState[1] = z;
return z + y;
}
ZT_ResultCode Node::setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork, const ZT_PhysicalPathConfiguration *pathConfig)
{
RR->topology->setPhysicalPathConfiguration(pathNetwork,pathConfig);
@ -621,7 +609,7 @@ void Node::ncSendConfig(uint64_t nwid,uint64_t requestPacketId,const Address &de
Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY> *dconf = new Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY>();
try {
if (nc.toDictionary(*dconf,sendLegacyFormatConfig)) {
uint64_t configUpdateId = prng();
uint64_t configUpdateId = Utils::random();
if (!configUpdateId) ++configUpdateId;
const unsigned int totalSize = dconf->sizeBytes();
@ -913,10 +901,10 @@ int ZT_Node_sendUserMessage(ZT_Node *node,void *tptr,uint64_t dest,uint64_t type
}
}
void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkControllerInstance)
void ZT_Node_setController(ZT_Node *node,void *networkControllerInstance)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->setNetconfMaster(networkControllerInstance);
reinterpret_cast<ZeroTier::Node *>(node)->setController(networkControllerInstance);
} catch ( ... ) {}
}

View File

@ -106,7 +106,7 @@ public:
int addLocalInterfaceAddress(const struct sockaddr_storage *addr);
void clearLocalInterfaceAddresses();
int sendUserMessage(void *tptr,uint64_t dest,uint64_t typeId,const void *data,unsigned int len);
void setNetconfMaster(void *networkControllerInstance);
void setController(void *networkControllerInstance);
// Internal functions ------------------------------------------------------
@ -187,7 +187,6 @@ public:
bool shouldUsePathForZeroTierTraffic(void *tPtr,const Address &ztaddr,const int64_t localSocket,const InetAddress &remoteAddress);
inline bool externalPathLookup(void *tPtr,const Address &ztaddr,int family,InetAddress &addr) { return ( (_cb.pathLookupFunction) ? (_cb.pathLookupFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,tPtr,ztaddr.toInt(),family,reinterpret_cast<struct sockaddr_storage *>(&addr)) != 0) : false ); }
uint64_t prng();
ZT_ResultCode setPhysicalPathConfiguration(const struct sockaddr_storage *pathNetwork,const ZT_PhysicalPathConfiguration *pathConfig);
inline const Identity &identity() const { return _RR.identity; }
@ -309,7 +308,6 @@ private:
int64_t _lastPingCheck;
int64_t _lastHousekeepingRun;
int64_t _lastMemoizedTraceSettings;
volatile int64_t _prngState[2];
bool _online;
};

View File

@ -135,11 +135,6 @@
*/
#define ZT_PROTO_VERB_FLAG_COMPRESSED 0x80
/**
* PUSH_DIRECT_PATHS flag: cluster redirect
*/
#define ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT 0x02
// Field indexes in packet header
#define ZT_PACKET_IDX_IV 0
#define ZT_PACKET_IDX_DEST 8
@ -799,7 +794,6 @@ public:
*
* Path record flags:
* 0x01 - Forget this path if currently known (not implemented yet)
* 0x02 - Cluster redirect -- use this in preference to others
*
* The receiver may, upon receiving a push, attempt to establish a
* direct link to one or more of the indicated addresses. It is the

View File

@ -279,9 +279,9 @@ public:
*/
inline long quality(const int64_t now) const
{
const int l = (long)_latency;
const int age = (long)std::min((now - _lastIn),(int64_t)(ZT_PATH_HEARTBEAT_PERIOD * 10)); // set an upper sanity limit to avoid overflow
return (((age < (ZT_PATH_HEARTBEAT_PERIOD + 5000)) ? l : (l + 0xffff + age)) * (long)((ZT_INETADDRESS_MAX_SCOPE - _ipScope) + 1));
const long l = (long)_latency;
const long age = (long)std::min((long)(now - _lastIn),(long)(ZT_PEER_PING_PERIOD * 10)); // set an upper sanity limit to avoid overflow
return ( ( (age < (ZT_PEER_PING_PERIOD + 5000)) ? l : (l + 65535 + age) ) * (long)((ZT_INETADDRESS_MAX_SCOPE - _ipScope) + 1));
}
/**
@ -611,14 +611,9 @@ public:
}
/**
* @return True if this path is alive (receiving heartbeats)
* @return True if this path is alive (receiving data)
*/
inline bool alive(const int64_t now) const { return ((now - _lastIn) < (ZT_PATH_HEARTBEAT_PERIOD + 5000)); }
/**
* @return True if this path needs a heartbeat
*/
inline bool needsHeartbeat(const int64_t now) const { return ((now - _lastOut) >= ZT_PATH_HEARTBEAT_PERIOD); }
inline bool alive(const int64_t now) const { return ((now - _lastIn) < ((ZT_PEER_PING_PERIOD * 2) + 5000)); }
/**
* @return Last time we sent something

View File

@ -43,15 +43,12 @@ static unsigned char s_freeRandomByteCounter = 0;
Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) :
RR(renv),
_lastReceive(0),
_lastNontrivialReceive(0),
_lastTriedMemorizedPath(0),
_lastDirectPathPushSent(0),
_lastDirectPathPushReceive(0),
_lastCredentialRequestSent(0),
_lastWhoisRequestReceived(0),
_lastEchoRequestReceived(0),
_lastCredentialsReceived(0),
_lastSentFullHello(0),
_lastACKWindowReset(0),
_lastQoSWindowReset(0),
_lastMultipathCompatibilityCheck(0),
@ -91,17 +88,6 @@ void Peer::received(
const int64_t now = RR->node->now();
_lastReceive = now;
switch (verb) {
case Packet::VERB_FRAME:
case Packet::VERB_EXT_FRAME:
case Packet::VERB_NETWORK_CONFIG_REQUEST:
case Packet::VERB_NETWORK_CONFIG:
case Packet::VERB_MULTICAST_FRAME:
_lastNontrivialReceive = now;
break;
default:
break;
}
{
Mutex::Lock _l(_paths_m);
@ -181,7 +167,6 @@ void Peer::received(
RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
_paths[replacePath].lr = now;
_paths[replacePath].p = path;
_paths[replacePath].priority = 1;
} else {
attemptToContact = true;
}
@ -189,7 +174,7 @@ void Peer::received(
}
if (attemptToContact) {
attemptToContactAt(tPtr,path->localSocket(),path->address(),now,true);
sendHELLO(tPtr,path->localSocket(),path->address(),now);
path->sent(now);
RR->t->peerConfirmingUnknownPath(tPtr,networkId,*this,path,packetId,verb);
}
@ -376,7 +361,7 @@ SharedPtr<Path> Peer::getAppropriatePath(int64_t now, bool includeExpired)
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if (_paths[i].p) {
if ((includeExpired)||((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION)) {
const long q = _paths[i].p->quality(now) / _paths[i].priority;
const long q = _paths[i].p->quality(now);
if (q <= bestPathQuality) {
bestPathQuality = q;
bestPath = i;
@ -525,7 +510,7 @@ void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &o
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if (_paths[i].p) {
const long q = _paths[i].p->quality(now) / _paths[i].priority;
const long q = _paths[i].p->quality(now);
const unsigned int s = (unsigned int)_paths[i].p->ipScope();
switch(_paths[i].p->address().ss_family) {
case AF_INET:
@ -548,7 +533,7 @@ void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &o
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if (other->_paths[i].p) {
const long q = other->_paths[i].p->quality(now) / other->_paths[i].priority;
const long q = other->_paths[i].p->quality(now);
const unsigned int s = (unsigned int)other->_paths[i].p->ipScope();
switch(other->_paths[i].p->address().ss_family) {
case AF_INET:
@ -584,7 +569,7 @@ void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &o
}
if (mine != ZT_MAX_PEER_NETWORK_PATHS) {
unsigned int alt = (unsigned int)RR->node->prng() & 1; // randomize which hint we send first for black magickal NAT-t reasons
unsigned int alt = (unsigned int)Utils::random() & 1; // randomize which hint we send first for black magickal NAT-t reasons
const unsigned int completed = alt + 2;
while (alt != completed) {
if ((alt & 1) == 0) {
@ -710,38 +695,13 @@ void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atA
}
}
void Peer::attemptToContactAt(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now,bool sendFullHello)
void Peer::ping(void *tPtr,int64_t now,unsigned int &v4SendCount,unsigned int &v6SendCount)
{
if ( (!sendFullHello) && (_vProto >= 5) && (!((_vMajor == 1)&&(_vMinor == 1)&&(_vRevision == 0))) ) {
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ECHO);
RR->node->expectReplyTo(outp.packetId());
outp.armor(_key,true);
RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
} else {
sendHELLO(tPtr,localSocket,atAddress,now);
}
}
v4SendCount = 0;
v6SendCount = 0;
void Peer::tryMemorizedPath(void *tPtr,int64_t now)
{
if ((now - _lastTriedMemorizedPath) >= ZT_TRY_MEMORIZED_PATH_INTERVAL) {
_lastTriedMemorizedPath = now;
InetAddress mp;
if (RR->node->externalPathLookup(tPtr,_id.address(),-1,mp))
attemptToContactAt(tPtr,-1,mp,now,true);
}
}
unsigned int Peer::doPingAndKeepalive(void *tPtr,int64_t now)
{
unsigned int sent = 0;
Mutex::Lock _l(_paths_m);
const bool sendFullHello = ((now - _lastSentFullHello) >= ZT_PEER_PING_PERIOD);
_lastSentFullHello = now;
processBackgroundPeerTasks(now);
// Emit traces regarding aggregate link status
if (_canUseMultipath) {
int alivePathCount = aggregateLinkPhysicalPathCount();
@ -759,90 +719,26 @@ unsigned int Peer::doPingAndKeepalive(void *tPtr,int64_t now)
}
}
// Right now we only keep pinging links that have the maximum priority. The
// priority is used to track cluster redirections, meaning that when a cluster
// redirects us its redirect target links override all other links and we
// let those old links expire.
long maxPriority = 0;
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if (_paths[i].p)
maxPriority = std::max(_paths[i].priority,maxPriority);
else break;
}
unsigned int j = 0;
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if (_paths[i].p) {
// Clean expired and reduced priority paths
if ( ((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION) && (_paths[i].priority == maxPriority) ) {
if ((sendFullHello)||(_paths[i].p->needsHeartbeat(now))) {
attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,sendFullHello);
_paths[i].p->sent(now);
sent |= (_paths[i].p->address().ss_family == AF_INET) ? 0x1 : 0x2;
}
if (i != j)
_paths[j] = _paths[i];
++j;
}
} else break;
}
if (canUseMultipath()) {
while(j < ZT_MAX_PEER_NETWORK_PATHS) {
_paths[j].lr = 0;
_paths[j].p.zero();
_paths[j].priority = 1;
if ((_paths[i].p)&&(_paths[i].p->alive(now))) {
sendHELLO(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now);
_paths[i].p->sent(now);
if (_paths[i].p->address().isV4())
++v4SendCount;
else if (_paths[i].p->address().isV6())
++v6SendCount;
if (i != j)
_paths[j] = _paths[i];
++j;
}
}
return sent;
}
void Peer::clusterRedirect(void *tPtr,const SharedPtr<Path> &originatingPath,const InetAddress &remoteAddress,const int64_t now)
{
SharedPtr<Path> np(RR->topology->getPath(originatingPath->localSocket(),remoteAddress));
RR->t->peerRedirected(tPtr,0,*this,np);
attemptToContactAt(tPtr,originatingPath->localSocket(),remoteAddress,now,true);
{
Mutex::Lock _l(_paths_m);
// New priority is higher than the priority of the originating path (if known)
long newPriority = 1;
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if (_paths[i].p) {
if (_paths[i].p == originatingPath) {
newPriority = _paths[i].priority;
break;
}
} else break;
}
newPriority += 2;
// Erase any paths with lower priority than this one or that are duplicate
// IPs and add this path.
unsigned int j = 0;
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if (_paths[i].p) {
if ((_paths[i].priority >= newPriority)&&(!_paths[i].p->address().ipsEqual2(remoteAddress))) {
if (i != j)
_paths[j] = _paths[i];
++j;
}
}
}
if (j < ZT_MAX_PEER_NETWORK_PATHS) {
_paths[j].lr = now;
_paths[j].p = np;
_paths[j].priority = newPriority;
++j;
while (j < ZT_MAX_PEER_NETWORK_PATHS) {
_paths[j].lr = 0;
_paths[j].p.zero();
_paths[j].priority = 1;
++j;
}
}
while(j < ZT_MAX_PEER_NETWORK_PATHS) {
_paths[j].lr = 0;
_paths[j].p.zero();
++j;
}
}
@ -852,7 +748,7 @@ void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddres
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
if (_paths[i].p) {
if ((_paths[i].p->address().ss_family == inetAddressFamily)&&(_paths[i].p->ipScope() == scope)) {
attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,false);
sendHELLO(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now);
_paths[i].p->sent(now);
_paths[i].lr = 0; // path will not be used unless it speaks again
}

View File

@ -29,8 +29,6 @@
#include <vector>
#include "../include/ZeroTierOne.h"
#include "Constants.hpp"
#include "RuntimeEnvironment.hpp"
#include "Node.hpp"
@ -95,7 +93,7 @@ public:
* @param verb Packet verb
* @param inRePacketId Packet ID in reply to (default: none)
* @param inReVerb Verb in reply to (for OK/ERROR, default: VERB_NOP)
* @param networkId Network ID if this pertains to a network, or 0 otherwise
* @param networkId Network ID if this packet is related to a network, 0 otherwise
*/
void received(
void *tPtr,
@ -247,39 +245,16 @@ public:
void sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now);
/**
* Send ECHO (or HELLO for older peers) to this peer at the given address
*
* No statistics or sent times are updated here.
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param localSocket Local source socket
* @param atAddress Destination address
* @param now Current time
* @param sendFullHello If true, always send a full HELLO instead of just an ECHO
*/
void attemptToContactAt(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now,bool sendFullHello);
/**
* Try a memorized or statically defined path if any are known
*
* Under the hood this is done periodically based on ZT_TRY_MEMORIZED_PATH_INTERVAL.
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param now Current time
*/
void tryMemorizedPath(void *tPtr,int64_t now);
/**
* Send pings or keepalives depending on configured timeouts
* Send pings to active paths
*
* This also cleans up some internal data structures. It's called periodically from Node.
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param now Current time
* @param inetAddressFamily Keep this address family alive, or -1 for any
* @return 0 if nothing sent or bit mask: bit 0x1 if IPv4 sent, bit 0x2 if IPv6 sent (0x3 means both sent)
* @param v4SendCount Number of IPv4 packets sent (result parameter)
* @param v6SendCount Number of IPv6 packets sent (result parameter)
*/
unsigned int doPingAndKeepalive(void *tPtr,int64_t now);
void ping(void *tPtr,int64_t now,unsigned int &v4SendCount,unsigned int &v6SendCount);
/**
* Clear paths whose localSocket(s) are in a CLOSED state or have an otherwise INVALID state.
@ -291,16 +266,6 @@ public:
*/
unsigned int prunePaths();
/**
* Process a cluster redirect sent by this peer
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param originatingPath Path from which redirect originated
* @param remoteAddress Remote address
* @param now Current time
*/
void clusterRedirect(void *tPtr,const SharedPtr<Path> &originatingPath,const InetAddress &remoteAddress,const int64_t now);
/**
* Reset paths within a given IP scope and address family
*
@ -341,11 +306,6 @@ public:
*/
inline bool isAlive(const int64_t now) const { return ((now - _lastReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
/**
* @return True if this peer has sent us real network traffic recently
*/
inline int64_t isActive(int64_t now) const { return ((now - _lastNontrivialReceive) < ZT_PEER_ACTIVITY_TIMEOUT); }
/**
* @return Latency in milliseconds of best/aggregate path or 0xffff if unknown / no paths
*/
@ -417,7 +377,7 @@ public:
*
* @param now Current time
*/
inline void processBackgroundPeerTasks(const int64_t now);
void processBackgroundPeerTasks(const int64_t now);
/**
* Record that the remote peer does have multipath enabled. As is evident by the receipt of a VERB_ACK
@ -541,10 +501,9 @@ public:
private:
struct _PeerPath
{
_PeerPath() : lr(0),p(),priority(1) {}
_PeerPath() : lr(0),p() {}
int64_t lr; // time of last valid ZeroTier packet
SharedPtr<Path> p;
long priority; // >= 1, higher is better
};
uint8_t _key[ZT_PEER_SECRET_KEY_LENGTH];
@ -552,15 +511,12 @@ private:
const RuntimeEnvironment *RR;
int64_t _lastReceive; // direct or indirect
int64_t _lastNontrivialReceive; // frames, things like netconf, etc.
int64_t _lastTriedMemorizedPath;
int64_t _lastDirectPathPushSent;
int64_t _lastDirectPathPushReceive;
int64_t _lastCredentialRequestSent;
int64_t _lastWhoisRequestReceived;
int64_t _lastEchoRequestReceived;
int64_t _lastCredentialsReceived;
int64_t _lastSentFullHello;
int64_t _lastPathPrune;
int64_t _lastACKWindowReset;
int64_t _lastQoSWindowReset;

View File

@ -51,14 +51,11 @@ namespace ZeroTier {
*
* It's also possible to create a root with no DNS and no DNS validator public key. This root
* will be a static entry pointing to a single root identity and set of physical addresses.
*
* This object is thread-safe and may be concurrently accessed and updated.
*/
class Root
{
public:
inline Root() : _dnsPublicKeySize(0) {}
inline Root(const Root &r) { *this = r; }
/**
* Create a new root entry
@ -83,25 +80,11 @@ public:
}
}
inline Root &operator=(const Root &r)
{
Mutex::Lock l(_lock);
Mutex::Lock rl(r._lock);
_defaultIdentity = r._defaultIdentity;
_defaultAddresses = r._defaultAddresses;
_dnsName = r._dnsName;
_lastFetchedLocator = r._lastFetchedLocator;
_dnsPublicKeySize = r._dnsPublicKeySize;
memcpy(_dnsPublicKey,r._dnsPublicKey,_dnsPublicKeySize);
return *this;
}
/**
* @return Current identity (either default or latest locator)
*/
inline const Identity id() const
{
Mutex::Lock l(_lock);
if (_lastFetchedLocator.id())
return _lastFetchedLocator.id();
return _defaultIdentity;
@ -113,7 +96,6 @@ public:
*/
inline bool is(const Identity &id) const
{
Mutex::Lock l(_lock);
return ((_lastFetchedLocator.id()) ? (id == _lastFetchedLocator.id()) : (id == _defaultIdentity));
}
@ -122,7 +104,6 @@ public:
*/
inline const Address address() const
{
Mutex::Lock l(_lock);
if (_lastFetchedLocator.id())
return _lastFetchedLocator.id().address();
return _defaultIdentity.address();
@ -133,7 +114,6 @@ public:
*/
inline const Str dnsName() const
{
Mutex::Lock l(_lock);
return _dnsName;
}
@ -142,7 +122,6 @@ public:
*/
inline Locator locator() const
{
Mutex::Lock l(_lock);
return _lastFetchedLocator;
}
@ -151,7 +130,6 @@ public:
*/
inline int64_t locatorTimestamp() const
{
Mutex::Lock l(_lock);
return _lastFetchedLocator.timestamp();
}
@ -162,7 +140,6 @@ public:
{
if (!loc.verify())
return false;
Mutex::Lock l(_lock);
if ((loc.phy().size() > 0)&&(loc.timestamp() > _lastFetchedLocator.timestamp())) {
_lastFetchedLocator = loc;
return true;
@ -177,7 +154,6 @@ public:
inline bool updateLocatorFromTxt(I start,I end)
{
try {
Mutex::Lock l(_lock);
if (_dnsPublicKeySize != ZT_ECC384_PUBLIC_KEY_SIZE)
return false;
Locator loc;
@ -193,38 +169,25 @@ public:
}
/**
* Pick random IPv4 and IPv6 addresses for this root
* Pick a random physical IP for this root with the given address family
*
* @param v4 Filled with V4 address or NIL if none found
* @param v6 Filled with V6 address or NIL if none found
* @param addressFamily AF_INET or AF_INET6
* @return Address or InetAddress::NIL if no addresses exist for the given family
*/
inline void pickPhysical(InetAddress &v4,InetAddress &v6) const
inline const InetAddress &pickPhysical(const int addressFamily) const
{
v4.clear();
v6.clear();
std::vector<const InetAddress *> v4a,v6a;
Mutex::Lock l(_lock);
std::vector<const InetAddress *> pickList;
const std::vector<InetAddress> *const av = (_lastFetchedLocator) ? &(_lastFetchedLocator.phy()) : &_defaultAddresses;
for(std::vector<InetAddress>::const_iterator i(av->begin());i!=av->end();++i) {
switch(i->ss_family) {
case AF_INET:
v4a.push_back(&(*i));
break;
case AF_INET6:
v6a.push_back(&(*i));
break;
if (addressFamily == (int)i->ss_family) {
pickList.push_back(&(*i));
}
}
if (v4a.size() == 1) {
v4 = *v4a[0];
} else if (v4a.size() > 1) {
v4 = *v4a[(unsigned long)Utils::random() % (unsigned long)v4a.size()];
}
if (v6a.size() == 1) {
v6 = *v6a[0];
} else if (v6a.size() > 1) {
v6 = *v6a[(unsigned long)Utils::random() % (unsigned long)v6a.size()];
}
if (pickList.size() == 1)
return *pickList[0];
else if (pickList.size() > 1)
return *pickList[(unsigned long)Utils::random() % (unsigned long)pickList.size()];
return InetAddress::NIL;
}
private:
@ -234,7 +197,6 @@ private:
Locator _lastFetchedLocator;
unsigned int _dnsPublicKeySize;
uint8_t _dnsPublicKey[ZT_ECC384_PUBLIC_KEY_SIZE];
Mutex _lock;
};
} // namespace ZeroTier

View File

@ -470,7 +470,7 @@ void Switch::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const
while (numBridges < ZT_MAX_BRIDGE_SPAM) {
if (ab == activeBridges.end())
ab = activeBridges.begin();
if (((unsigned long)RR->node->prng() % (unsigned long)activeBridges.size()) == 0) {
if (((unsigned long)Utils::random() % (unsigned long)activeBridges.size()) == 0) {
bridges[numBridges++] = *ab;
++ab;
} else ++ab;
@ -519,7 +519,7 @@ void Switch::aqm_enqueue(void *tPtr, const SharedPtr<Network> &network, Packet &
// DEBUG_INFO("skipping, no QoS for this packet, verb=%x", packet.verb());
// just send packet normally, no QoS for ZT protocol traffic
send(tPtr, packet, encrypt);
}
}
_aqm_m.lock();
@ -527,7 +527,7 @@ void Switch::aqm_enqueue(void *tPtr, const SharedPtr<Network> &network, Packet &
const Address dest(packet.destination());
TXQueueEntry *txEntry = new TXQueueEntry(dest,RR->node->now(),packet,encrypt);
ManagedQueue *selectedQueue = nullptr;
for (size_t i=0; i<ZT_QOS_NUM_BUCKETS; i++) {
if (i < nqcb->oldQueues.size()) { // search old queues first (I think this is best since old would imply most recent usage of the queue)
@ -601,7 +601,7 @@ uint64_t Switch::control_law(uint64_t t, int count)
return (uint64_t)(t + ZT_QOS_INTERVAL / sqrt(count));
}
Switch::dqr Switch::dodequeue(ManagedQueue *q, uint64_t now)
Switch::dqr Switch::dodequeue(ManagedQueue *q, uint64_t now)
{
dqr r;
r.ok_to_drop = false;

View File

@ -215,13 +215,16 @@ public:
/**
* Apply a function or function object to all peers
*
* This locks the peer map during execution, so calls to get() etc. during
* eachPeer() will deadlock.
*
* @param f Function to apply
* @tparam F Function or function object type
*/
template<typename F>
inline void eachPeer(F f)
{
Mutex::Lock _l(_peers_m);
Mutex::Lock l(_peers_m);
Hashtable< Address,SharedPtr<Peer> >::Iterator i(_peers);
Address *a = (Address *)0;
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
@ -231,12 +234,51 @@ public:
}
/**
* @return All peers by address (unsorted)
* Apply a function or function object to all roots
*
* Arguments to the function are this topology object, the root in
* question, and a pointer to the peer corresponding to it.
*
* This locks the root list during execution but other operations
* are fine.
*
* @param f Function to apply
* @tparam F function or function object type
*/
inline std::vector< std::pair< Address,SharedPtr<Peer> > > allPeers() const
template<typename F>
inline void eachRoot(F f) const
{
Mutex::Lock _l(_peers_m);
return _peers.entries();
Mutex::Lock l(_roots_m);
SharedPtr<Peer> rp;
for(std::vector<Root>::const_iterator i(_roots.begin());i!=_roots.end();++i) {
{
SharedPtr::Lock l2(_peers_m);
const SharedPtr<Peer> *const ap = _peers.get(i->address());
if (ap) {
rp = *ap;
} else {
rp.set(new Peer(RR,_myIdentity,i->id()));
_peers.set(rp->address(),rp);
}
}
f(*this,*i,rp);
}
}
/**
* @param allPeers vector to fill with all current peers
*/
inline void getAllPeers(std::vector< SharedPtr<Peer> > &allPeers) const
{
Mutex::Lock l(_peers_m);
allPeers.clear();
allPeers.reserve(_peers.size());
Hashtable< Address,SharedPtr<Peer> >::Iterator i(*(const_cast<Hashtable< Address,SharedPtr<Peer> > *>(&_peers)));
Address *a = (Address *)0;
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
while (i.next(a,p)) {
allPeers.push_back(*p);
}
}
/**
@ -315,22 +357,22 @@ public:
std::map<InetAddress,ZT_PhysicalPathConfiguration> cpaths;
for(unsigned int i=0,j=_numConfiguredPhysicalPaths;i<j;++i)
cpaths[_physicalPathConfig[i].first] = _physicalPathConfig[i].second;
if (pathConfig) {
ZT_PhysicalPathConfiguration pc(*pathConfig);
if (pc.mtu <= 0)
pc.mtu = ZT_DEFAULT_PHYSMTU;
else if (pc.mtu < ZT_MIN_PHYSMTU)
pc.mtu = ZT_MIN_PHYSMTU;
else if (pc.mtu > ZT_MAX_PHYSMTU)
pc.mtu = ZT_MAX_PHYSMTU;
cpaths[*(reinterpret_cast<const InetAddress *>(pathNetwork))] = pc;
} else {
cpaths.erase(*(reinterpret_cast<const InetAddress *>(pathNetwork)));
}
unsigned int cnt = 0;
for(std::map<InetAddress,ZT_PhysicalPathConfiguration>::const_iterator i(cpaths.begin());((i!=cpaths.end())&&(cnt<ZT_MAX_CONFIGURABLE_PATHS));++i) {
_physicalPathConfig[cnt].first = i->first;

View File

@ -599,7 +599,7 @@ public:
// Network controller is now enabled by default for desktop and server
_controller = new EmbeddedNetworkController(_node,_homePath.c_str(),_controllerDbPath.c_str(),_ports[0], _mqc);
_node->setNetconfMaster((void *)_controller);
_node->setController((void *)_controller);
// Join existing networks in networks.d
{