From 0731f3f1a91244390d7cd6d58ecb449c07adc8cd Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 23 Aug 2019 07:04:20 -0700 Subject: [PATCH] wiring more stuff up, and simplification of timing loops --- node/Constants.hpp | 33 +++++--- node/Network.cpp | 173 +++++++++++++++++++++-------------------- node/Network.hpp | 14 ++-- node/NetworkConfig.hpp | 44 ----------- node/Node.cpp | 75 ++++-------------- node/Node.hpp | 2 +- node/Peer.cpp | 1 + node/Peer.hpp | 13 ++++ node/Switch.cpp | 48 ++++++------ node/Topology.hpp | 54 +++++++++++++ 10 files changed, 229 insertions(+), 228 deletions(-) diff --git a/node/Constants.hpp b/node/Constants.hpp index 7f3d226d4..4b3bf8a4f 100644 --- a/node/Constants.hpp +++ b/node/Constants.hpp @@ -221,12 +221,25 @@ /** * Minimum delay between timer task checks to prevent thrashing */ -#define ZT_CORE_TIMER_TASK_GRANULARITY 500 +#define ZT_MIN_TIMER_TASK_INTERVAL 500 /** - * How often Topology::clean() and Network::clean() and similar are called, in ms + * Maximum delay between timer task checks (should be a fraction of smallest housekeeping interval) */ -#define ZT_HOUSEKEEPING_PERIOD 60000 +#define ZT_MAX_TIMER_TASK_INTERVAL 3000 + +/** + * How often most internal cleanup and housekeeping tasks are performed + */ +#define ZT_HOUSEKEEPING_PERIOD 120000 + +/** + * How often network housekeeping is performed + * + * Note that this affects how frequently we re-request network configurations + * from network controllers if we haven't received one yet. + */ +#define ZT_NETWORK_HOUSEKEEPING_PERIOD 12000 /** * Delay between WHOIS retries in ms @@ -256,7 +269,7 @@ #define ZT_MULTICAST_LIKE_EXPIRE 600000 /** - * Period for multicast LIKE announcements + * Period for multicast LIKE re-announcements to connected nodes */ #define ZT_MULTICAST_ANNOUNCE_PERIOD 120000 @@ -458,11 +471,6 @@ */ #define ZT_PEER_PING_PERIOD 45000 -/** - * How often to retry expired paths that we're still remembering - */ -#define ZT_PEER_EXPIRED_PATH_TRIAL_PERIOD (ZT_PEER_PING_PERIOD * 10) - /** * Timeout for overall peer activity (measured from last receive) */ @@ -472,6 +480,11 @@ #define ZT_PEER_ACTIVITY_TIMEOUT 30000 #endif +/** + * Rescan for best/fastest root every N milliseconds + */ +#define ZT_FIND_BEST_ROOT_PERIOD 2000 + /** * General rate limit timeout for multiple packet types (HELLO, etc.) */ @@ -508,7 +521,7 @@ * physical LAN has anywhere even close to this many nodes. Note that this * does not limit the size of ZT virtual LANs, only bridge routing. */ -#define ZT_MAX_BRIDGE_ROUTES 67108864 +#define ZT_MAX_BRIDGE_ROUTES 16777216 /** * If there is no known L2 bridging route, spam to up to this many active bridges diff --git a/node/Network.cpp b/node/Network.cpp index 399f9b2ce..e73c364dc 100644 --- a/node/Network.cpp +++ b/node/Network.cpp @@ -1036,7 +1036,94 @@ int Network::setConfiguration(void *tPtr,const NetworkConfig &nconf,bool saveToD return 0; } -void Network::requestConfiguration(void *tPtr) +bool Network::gate(void *tPtr,const SharedPtr &peer) +{ + const int64_t now = RR->node->now(); + Mutex::Lock l(_memberships_l); + try { + if (_config) { + Membership *m = _memberships.get(peer->address()); + if ( (_config.isPublic()) || ((m)&&(m->isAllowedOnNetwork(_config))) ) { + if (!m) + m = &(_memberships[peer->address()]); + if (m->multicastLikeGate(now)) { + Mutex::Lock l2(_myMulticastGroups_l); + _announceMulticastGroupsTo(tPtr,peer->address(),_allMulticastGroups()); + } + return true; + } + } + } catch ( ... ) {} + return false; +} + +void Network::doPeriodicTasks(void *tPtr,const int64_t now) +{ + if (_destroyed) + return; + + if ((now - _lastConfigUpdate) >= ZT_NETWORK_AUTOCONF_DELAY) + _requestConfiguration(tPtr); + + { + Mutex::Lock l1(_memberships_l); + + { + Address *a = (Address *)0; + Membership *m = (Membership *)0; + Hashtable::Iterator i(_memberships); + while (i.next(a,m)) + m->clean(now,_config); + } + + { + Mutex::Lock l2(_myMulticastGroups_l); + + Hashtable< MulticastGroup,uint64_t >::Iterator i(_multicastGroupsBehindMe); + MulticastGroup *mg = (MulticastGroup *)0; + uint64_t *ts = (uint64_t *)0; + while (i.next(mg,ts)) { + if ((now - *ts) > (ZT_MULTICAST_LIKE_EXPIRE * 2)) + _multicastGroupsBehindMe.erase(*mg); + } + + _announceMulticastGroups(tPtr,false); + } + } +} + +Membership::AddCredentialResult Network::addCredential(void *tPtr,const Address &sentFrom,const Revocation &rev) +{ + if (rev.networkId() != _id) + return Membership::ADD_REJECTED; + + Mutex::Lock l1(_memberships_l); + Membership &m = _memberships[rev.target()]; + + const Membership::AddCredentialResult result = m.addCredential(RR,tPtr,_config,rev); + + if ((result == Membership::ADD_ACCEPTED_NEW)&&(rev.fastPropagate())) { + Address *a = (Address *)0; + Membership *m = (Membership *)0; + Hashtable::Iterator i(_memberships); + while (i.next(a,m)) { + if ((*a != sentFrom)&&(*a != rev.signer())) { + Packet outp(*a,RR->identity.address(),Packet::VERB_NETWORK_CREDENTIALS); + outp.append((uint8_t)0x00); // no COM + outp.append((uint16_t)0); // no capabilities + outp.append((uint16_t)0); // no tags + outp.append((uint16_t)1); // one revocation! + rev.serialize(outp); + outp.append((uint16_t)0); // no certificates of ownership + RR->sw->send(tPtr,outp,true); + } + } + } + + return result; +} + +void Network::_requestConfiguration(void *tPtr) { if (_destroyed) return; @@ -1215,90 +1302,6 @@ void Network::requestConfiguration(void *tPtr) RR->sw->send(tPtr,outp,true); } -bool Network::gate(void *tPtr,const SharedPtr &peer) -{ - const int64_t now = RR->node->now(); - Mutex::Lock l(_memberships_l); - try { - if (_config) { - Membership *m = _memberships.get(peer->address()); - if ( (_config.isPublic()) || ((m)&&(m->isAllowedOnNetwork(_config))) ) { - if (!m) - m = &(_memberships[peer->address()]); - if (m->multicastLikeGate(now)) { - Mutex::Lock l2(_myMulticastGroups_l); - _announceMulticastGroupsTo(tPtr,peer->address(),_allMulticastGroups()); - } - return true; - } - } - } catch ( ... ) {} - return false; -} - -void Network::doPeriodicTasks(void *tPtr) -{ - const int64_t now = RR->node->now(); - Mutex::Lock l1(_memberships_l); - - if (_destroyed) - return; - - { - Address *a = (Address *)0; - Membership *m = (Membership *)0; - Hashtable::Iterator i(_memberships); - while (i.next(a,m)) { - m->clean(now,_config); - } - } - - { - Mutex::Lock l2(_myMulticastGroups_l); - - Hashtable< MulticastGroup,uint64_t >::Iterator i(_multicastGroupsBehindMe); - MulticastGroup *mg = (MulticastGroup *)0; - uint64_t *ts = (uint64_t *)0; - while (i.next(mg,ts)) { - if ((now - *ts) > (ZT_MULTICAST_LIKE_EXPIRE * 2)) - _multicastGroupsBehindMe.erase(*mg); - } - - _announceMulticastGroups(tPtr,false); - } -} - -Membership::AddCredentialResult Network::addCredential(void *tPtr,const Address &sentFrom,const Revocation &rev) -{ - if (rev.networkId() != _id) - return Membership::ADD_REJECTED; - - Mutex::Lock l1(_memberships_l); - Membership &m = _memberships[rev.target()]; - - const Membership::AddCredentialResult result = m.addCredential(RR,tPtr,_config,rev); - - if ((result == Membership::ADD_ACCEPTED_NEW)&&(rev.fastPropagate())) { - Address *a = (Address *)0; - Membership *m = (Membership *)0; - Hashtable::Iterator i(_memberships); - while (i.next(a,m)) { - if ((*a != sentFrom)&&(*a != rev.signer())) { - Packet outp(*a,RR->identity.address(),Packet::VERB_NETWORK_CREDENTIALS); - outp.append((uint8_t)0x00); // no COM - outp.append((uint16_t)0); // no capabilities - outp.append((uint16_t)0); // no tags - outp.append((uint16_t)1); // one revocation! - rev.serialize(outp); - outp.append((uint16_t)0); // no certificates of ownership - RR->sw->send(tPtr,outp,true); - } - } - } - - return result; -} - ZT_VirtualNetworkStatus Network::_status() const { if (_portError) diff --git a/node/Network.hpp b/node/Network.hpp index ce7bbce83..5e0666921 100644 --- a/node/Network.hpp +++ b/node/Network.hpp @@ -230,6 +230,10 @@ public: /** * Set network configuration * + * This is normally called internally when a configuration is received + * and fully assembled, but it can also be called on Node startup when + * cached configurations are re-read from the data store. + * * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call * @param nconf Network configuration * @param saveToDisk Save to disk? Used during loading, should usually be true otherwise. @@ -247,13 +251,6 @@ public: */ inline void setNotFound() { _netconfFailure = NETCONF_FAILURE_NOT_FOUND; } - /** - * Causes this network to request an updated configuration from its master node now - * - * @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call - */ - void requestConfiguration(void *tPtr); - /** * Determine whether this peer is permitted to communicate on this network * @@ -265,7 +262,7 @@ public: /** * Do periodic cleanup and housekeeping tasks */ - void doPeriodicTasks(void *tPtr); + void doPeriodicTasks(void *tPtr,const int64_t now); /** * Find the node on this network that has this MAC behind it (if any) @@ -451,6 +448,7 @@ public: inline void **userPtr() { return &_uPtr; } private: + void _requestConfiguration(void *tPtr); ZT_VirtualNetworkStatus _status() const; void _externalConfig(ZT_VirtualNetworkConfig *ec) const; // assumes _lock is locked bool _gate(const SharedPtr &peer); diff --git a/node/NetworkConfig.hpp b/node/NetworkConfig.hpp index d7099aca4..1eb4adbfc 100644 --- a/node/NetworkConfig.hpp +++ b/node/NetworkConfig.hpp @@ -93,11 +93,6 @@ */ #define ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE 0x0000020000000000ULL -/** - * Anchors are stable devices on this network that can act like roots when none are up - */ -#define ZT_NETWORKCONFIG_SPECIALIST_TYPE_ANCHOR 0x0000040000000000ULL - namespace ZeroTier { // Dictionary capacity needed for max size network config @@ -323,45 +318,6 @@ struct NetworkConfig return false; } - inline std::vector
anchors() const - { - std::vector
r; - for(unsigned int i=0;i alwaysContactAddresses() const - { - std::vector
r; - for(unsigned int i=0;i > &a) const - { - for(unsigned int i=0;i= ZT_PEER_PING_PERIOD) { + if ((now - _lastPing) >= ZT_PEER_PING_PERIOD) { _lastPing = now; try { _processBackgroundTasks_ping_eachRoot rf; @@ -264,61 +267,17 @@ ZT_ResultCode Node::processBackgroundTasks(void *tptr,int64_t now,volatile int64 } } - /* - if (timeSinceLastPingCheck >= ZT_PING_CHECK_INVERVAL) { - try { - _lastPingCheck = now; - - // (1) Get peers we should remain connected to and (2) get networks that need config. - Hashtable< Address,std::vector > alwaysContact; - RR->topology->getAlwaysContact(alwaysContact); - std::vector< std::pair< SharedPtr,bool > > networkConfigNeeded; - { - Mutex::Lock l(_networks_m); - Hashtable< uint64_t,SharedPtr >::Iterator i(_networks); - uint64_t *nwid = (uint64_t *)0; - SharedPtr *network = (SharedPtr *)0; - while (i.next(nwid,network)) { - (*network)->config().alwaysContactAddresses(alwaysContact); - networkConfigNeeded.push_back( std::pair< SharedPtr,bool >(*network,(((now - (*network)->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!(*network)->hasConfig()))) ); - } + if ((now - _lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) { + _lastHousekeepingRun = now; + { + Mutex::Lock l(_networks_m); + Hashtable< uint64_t,SharedPtr >::Iterator i(_networks); + uint64_t *nwid = (uint64_t *)0; + SharedPtr *network = (SharedPtr *)0; + while (i.next(nwid,network)) { + (*network)->doPeriodicTasks(tptr,now); } - - // Ping active peers, upstreams, and others that we should always contact - _PingPeersThatNeedPing pfunc(RR,tptr,alwaysContact,now); - RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc); - - // Run WHOIS to create Peer for alwaysContact addresses that could not be contacted - { - Hashtable< Address,std::vector >::Iterator i(alwaysContact); - Address *upstreamAddress = (Address *)0; - std::vector *upstreamStableEndpoints = (std::vector *)0; - while (i.next(upstreamAddress,upstreamStableEndpoints)) - RR->sw->requestWhois(tptr,now,*upstreamAddress); - } - - // Refresh network config or broadcast network updates to members as needed - for(std::vector< std::pair< SharedPtr,bool > >::const_iterator n(networkConfigNeeded.begin());n!=networkConfigNeeded.end();++n) { - if (n->second) - n->first->requestConfiguration(tptr); - n->first->sendUpdatesToMembers(tptr); - } - - // Update online status, post status change as event - const bool oldOnline = _online; - _online = pfunc.online; - if (oldOnline != _online) - postEvent(tptr,_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE); - } catch ( ... ) { - return ZT_RESULT_FATAL_ERROR_INTERNAL; } - } else { - timeUntilNextPingCheck -= (unsigned long)timeSinceLastPingCheck; - } -*/ - - if ((now - _lastMemoizedTraceSettings) >= (ZT_HOUSEKEEPING_PERIOD / 4)) { - _lastMemoizedTraceSettings = now; RR->t->updateMemoizedSettings(); } @@ -350,7 +309,7 @@ ZT_ResultCode Node::processBackgroundTasks(void *tptr,int64_t now,volatile int64 } try { - *nextBackgroundTaskDeadline = now + (int64_t)std::max(std::min(timeUntilNextPing,RR->sw->doTimerTasks(tptr,now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY); + *nextBackgroundTaskDeadline = now + (int64_t)std::max(std::min((unsigned long)ZT_MAX_TIMER_TASK_INTERVAL,RR->sw->doTimerTasks(tptr,now)),(unsigned long)ZT_MIN_TIMER_TASK_INTERVAL); } catch ( ... ) { return ZT_RESULT_FATAL_ERROR_INTERNAL; } diff --git a/node/Node.hpp b/node/Node.hpp index c7ea6f050..c7e33a5a5 100644 --- a/node/Node.hpp +++ b/node/Node.hpp @@ -307,7 +307,7 @@ private: volatile int64_t _now; int64_t _lastPing; int64_t _lastHousekeepingRun; - int64_t _lastMemoizedTraceSettings; + int64_t _lastNetworkHousekeepingRun; bool _online; }; diff --git a/node/Peer.cpp b/node/Peer.cpp index cac16aa44..057f40958 100644 --- a/node/Peer.cpp +++ b/node/Peer.cpp @@ -50,6 +50,7 @@ Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Ident _lastACKWindowReset(0), _lastQoSWindowReset(0), _lastMultipathCompatibilityCheck(0), + _lastTriedStaticPath(0), _uniqueAlivePathCount(0), _localMultipathSupported(false), _remoteMultipathSupported(false), diff --git a/node/Peer.hpp b/node/Peer.hpp index ccc96ba48..dbc4d3c89 100644 --- a/node/Peer.hpp +++ b/node/Peer.hpp @@ -480,6 +480,18 @@ public: return (_QoSCutoffCount < ZT_PATH_QOS_ACK_CUTOFF_LIMIT); } + /** + * Rate limit gate for trying externally defined or static path + */ + inline bool rateGateTryStaticPath(const int64_t now) + { + if ((now - _lastTriedStaticPath) >= ZT_PEER_PING_PERIOD) { + _lastTriedStaticPath = now; + return true; + } + return false; + } + /** * @return Whether this peer is reachable via an aggregate link */ @@ -503,6 +515,7 @@ private: int64_t _lastACKWindowReset; int64_t _lastQoSWindowReset; int64_t _lastMultipathCompatibilityCheck; + int64_t _lastTriedStaticPath; int _uniqueAlivePathCount; diff --git a/node/Switch.cpp b/node/Switch.cpp index 98fb20f38..9138955f0 100644 --- a/node/Switch.cpp +++ b/node/Switch.cpp @@ -99,10 +99,9 @@ void Switch::onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddre SharedPtr relayTo = RR->topology->get(destination); if ((!relayTo)||(!relayTo->sendDirect(tPtr,fragment.data(),fragment.size(),now,false))) { // Don't know peer or no direct path -- so relay via someone upstream - // TODO - //relayTo = RR->topology->getUpstreamPeer(); - //if (relayTo) - // relayTo->sendDirect(tPtr,fragment.data(),fragment.size(),now,true); + relayTo = RR->topology->findRelayTo(now,destination); + if (relayTo) + relayTo->sendDirect(tPtr,fragment.data(),fragment.size(),now,true); } } } else { @@ -172,9 +171,7 @@ void Switch::onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddre relayTo->introduce(tPtr,now,sourcePeer); } } else { - // TODO - /* - relayTo = RR->topology->getUpstreamPeer(); + relayTo = RR->topology->findRelayTo(now,destination); if ((relayTo)&&(relayTo->address() != source)) { if (relayTo->sendDirect(tPtr,packet.data(),packet.size(),now,true)) { const SharedPtr sourcePeer(RR->topology->get(source)); @@ -182,7 +179,6 @@ void Switch::onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddre relayTo->introduce(tPtr,now,sourcePeer); } } - */ } } } else if ((reinterpret_cast(data)[ZT_PACKET_IDX_FLAGS] & ZT_PROTO_FLAG_FRAGMENTED) != 0) { @@ -785,16 +781,13 @@ void Switch::requestWhois(void *tPtr,const int64_t now,const Address &addr) else last = now; } - // TODO - /* - const SharedPtr upstream(RR->topology->getUpstreamPeer()); - if (upstream) { - Packet outp(upstream->address(),RR->identity.address(),Packet::VERB_WHOIS); + const SharedPtr root(RR->topology->root(now)); + if (root) { + Packet outp(root->address(),RR->identity.address(),Packet::VERB_WHOIS); addr.appendTo(outp); RR->node->expectReplyTo(outp.packetId()); - send(tPtr,outp,true); + root->sendDirect(tPtr,outp.data(),outp.size(),now,true); } - */ } void Switch::doAnythingWaitingForPeer(void *tPtr,const SharedPtr &peer) @@ -916,15 +909,26 @@ bool Switch::_trySend(void *tPtr,Packet &packet,bool encrypt) if (peer) { viaPath = peer->getAppropriatePath(now,false); if (!viaPath) { - // TODO - /* - peer->tryMemorizedPath(tPtr,now); // periodically attempt memorized or statically defined paths, if any are known - const SharedPtr relay(RR->topology->getUpstreamPeer()); - if ( (!relay) || (!(viaPath = relay->getAppropriatePath(now,false))) ) { - if (!(viaPath = peer->getAppropriatePath(now,true))) + if (peer->rateGateTryStaticPath(now)) { + InetAddress tryAddr; + bool gotPath = RR->node->externalPathLookup(tPtr,peer->address(),AF_INET6,tryAddr); + if ((gotPath)&&(tryAddr)) { + peer->sendHELLO(tPtr,-1,tryAddr,now); + } else { + gotPath = RR->node->externalPathLookup(tPtr,peer->address(),AF_INET,tryAddr); + if ((gotPath)&&(tryAddr)) + peer->sendHELLO(tPtr,-1,tryAddr,now); + } + } + + const SharedPtr relay(RR->topology->findRelayTo(now,destination)); + if (relay) { + viaPath = relay->getAppropriatePath(now,true); + if (!viaPath) return false; } - */ + + return false; } } else { return false; diff --git a/node/Topology.hpp b/node/Topology.hpp index d50d306cc..e52c01af7 100644 --- a/node/Topology.hpp +++ b/node/Topology.hpp @@ -263,6 +263,57 @@ public: } } + /** + * Get the best root, rescanning and re-ranking roots periodically + * + * @param now Current time + * @return Best/fastest currently connected root or NULL if none + */ + inline SharedPtr root(const int64_t now) + { + Mutex::Lock l(_bestRoot_m); + if ((!_bestRoot)||((now - _lastRankedBestRoot) >= ZT_FIND_BEST_ROOT_PERIOD)) { + _bestRoot.zero(); + Mutex::Lock l2(_roots_m); + SharedPtr rp; + long bestQuality = 2147483647; + for(std::vector::const_iterator i(_roots.begin());i!=_roots.end();++i) { + { + Mutex::Lock l2(_peers_m); + const SharedPtr *const ap = _peers.get(i->address()); + if (ap) { + rp = *ap; + } else { + rp.set(new Peer(RR,_myIdentity,i->id())); + _peers.set(rp->address(),rp); + } + } + SharedPtr path(rp->getAppropriatePath(now,false)); + if (path) { + const long pq = path->quality(now); + if (pq < bestQuality) { + bestQuality = pq; + _bestRoot = rp; + } + } + } + } + return _bestRoot; + } + + /** + * Get the best relay to a given address, which may or may not be a root + * + * @param now Current time + * @param toAddr Destination address + * @return Best current relay or NULL if none + */ + inline SharedPtr findRelayTo(const int64_t now,const Address &toAddr) + { + // TODO: in the future this will check 'mesh-like' relays and if enabled consult LF for other roots (for if this is a root) + return root(now); + } + /** * @param allPeers vector to fill with all current peers */ @@ -387,9 +438,12 @@ private: std::pair _physicalPathConfig[ZT_MAX_CONFIGURABLE_PATHS]; unsigned int _numConfiguredPhysicalPaths; std::vector _roots; + SharedPtr _bestRoot; + int64_t _lastRankedBestRoot; Hashtable< Address,SharedPtr > _peers; Hashtable< Path::HashKey,SharedPtr > _paths; Mutex _roots_m; + Mutex _bestRoot_m; Mutex _peers_m; Mutex _paths_m; };