wiring more stuff up, and simplification of timing loops

This commit is contained in:
Adam Ierymenko 2019-08-23 07:04:20 -07:00
parent 37047a39f9
commit 0731f3f1a9
No known key found for this signature in database
GPG Key ID: 1657198823E52A61
10 changed files with 229 additions and 228 deletions

View File

@ -221,12 +221,25 @@
/**
* Minimum delay between timer task checks to prevent thrashing
*/
#define ZT_CORE_TIMER_TASK_GRANULARITY 500
#define ZT_MIN_TIMER_TASK_INTERVAL 500
/**
* How often Topology::clean() and Network::clean() and similar are called, in ms
* Maximum delay between timer task checks (should be a fraction of smallest housekeeping interval)
*/
#define ZT_HOUSEKEEPING_PERIOD 60000
#define ZT_MAX_TIMER_TASK_INTERVAL 3000
/**
* How often most internal cleanup and housekeeping tasks are performed
*/
#define ZT_HOUSEKEEPING_PERIOD 120000
/**
* How often network housekeeping is performed
*
* Note that this affects how frequently we re-request network configurations
* from network controllers if we haven't received one yet.
*/
#define ZT_NETWORK_HOUSEKEEPING_PERIOD 12000
/**
* Delay between WHOIS retries in ms
@ -256,7 +269,7 @@
#define ZT_MULTICAST_LIKE_EXPIRE 600000
/**
* Period for multicast LIKE announcements
* Period for multicast LIKE re-announcements to connected nodes
*/
#define ZT_MULTICAST_ANNOUNCE_PERIOD 120000
@ -458,11 +471,6 @@
*/
#define ZT_PEER_PING_PERIOD 45000
/**
* How often to retry expired paths that we're still remembering
*/
#define ZT_PEER_EXPIRED_PATH_TRIAL_PERIOD (ZT_PEER_PING_PERIOD * 10)
/**
* Timeout for overall peer activity (measured from last receive)
*/
@ -472,6 +480,11 @@
#define ZT_PEER_ACTIVITY_TIMEOUT 30000
#endif
/**
* Rescan for best/fastest root every N milliseconds
*/
#define ZT_FIND_BEST_ROOT_PERIOD 2000
/**
* General rate limit timeout for multiple packet types (HELLO, etc.)
*/
@ -508,7 +521,7 @@
* physical LAN has anywhere even close to this many nodes. Note that this
* does not limit the size of ZT virtual LANs, only bridge routing.
*/
#define ZT_MAX_BRIDGE_ROUTES 67108864
#define ZT_MAX_BRIDGE_ROUTES 16777216
/**
* If there is no known L2 bridging route, spam to up to this many active bridges

View File

@ -1036,7 +1036,94 @@ int Network::setConfiguration(void *tPtr,const NetworkConfig &nconf,bool saveToD
return 0;
}
void Network::requestConfiguration(void *tPtr)
bool Network::gate(void *tPtr,const SharedPtr<Peer> &peer)
{
const int64_t now = RR->node->now();
Mutex::Lock l(_memberships_l);
try {
if (_config) {
Membership *m = _memberships.get(peer->address());
if ( (_config.isPublic()) || ((m)&&(m->isAllowedOnNetwork(_config))) ) {
if (!m)
m = &(_memberships[peer->address()]);
if (m->multicastLikeGate(now)) {
Mutex::Lock l2(_myMulticastGroups_l);
_announceMulticastGroupsTo(tPtr,peer->address(),_allMulticastGroups());
}
return true;
}
}
} catch ( ... ) {}
return false;
}
void Network::doPeriodicTasks(void *tPtr,const int64_t now)
{
if (_destroyed)
return;
if ((now - _lastConfigUpdate) >= ZT_NETWORK_AUTOCONF_DELAY)
_requestConfiguration(tPtr);
{
Mutex::Lock l1(_memberships_l);
{
Address *a = (Address *)0;
Membership *m = (Membership *)0;
Hashtable<Address,Membership>::Iterator i(_memberships);
while (i.next(a,m))
m->clean(now,_config);
}
{
Mutex::Lock l2(_myMulticastGroups_l);
Hashtable< MulticastGroup,uint64_t >::Iterator i(_multicastGroupsBehindMe);
MulticastGroup *mg = (MulticastGroup *)0;
uint64_t *ts = (uint64_t *)0;
while (i.next(mg,ts)) {
if ((now - *ts) > (ZT_MULTICAST_LIKE_EXPIRE * 2))
_multicastGroupsBehindMe.erase(*mg);
}
_announceMulticastGroups(tPtr,false);
}
}
}
Membership::AddCredentialResult Network::addCredential(void *tPtr,const Address &sentFrom,const Revocation &rev)
{
if (rev.networkId() != _id)
return Membership::ADD_REJECTED;
Mutex::Lock l1(_memberships_l);
Membership &m = _memberships[rev.target()];
const Membership::AddCredentialResult result = m.addCredential(RR,tPtr,_config,rev);
if ((result == Membership::ADD_ACCEPTED_NEW)&&(rev.fastPropagate())) {
Address *a = (Address *)0;
Membership *m = (Membership *)0;
Hashtable<Address,Membership>::Iterator i(_memberships);
while (i.next(a,m)) {
if ((*a != sentFrom)&&(*a != rev.signer())) {
Packet outp(*a,RR->identity.address(),Packet::VERB_NETWORK_CREDENTIALS);
outp.append((uint8_t)0x00); // no COM
outp.append((uint16_t)0); // no capabilities
outp.append((uint16_t)0); // no tags
outp.append((uint16_t)1); // one revocation!
rev.serialize(outp);
outp.append((uint16_t)0); // no certificates of ownership
RR->sw->send(tPtr,outp,true);
}
}
}
return result;
}
void Network::_requestConfiguration(void *tPtr)
{
if (_destroyed)
return;
@ -1215,90 +1302,6 @@ void Network::requestConfiguration(void *tPtr)
RR->sw->send(tPtr,outp,true);
}
bool Network::gate(void *tPtr,const SharedPtr<Peer> &peer)
{
const int64_t now = RR->node->now();
Mutex::Lock l(_memberships_l);
try {
if (_config) {
Membership *m = _memberships.get(peer->address());
if ( (_config.isPublic()) || ((m)&&(m->isAllowedOnNetwork(_config))) ) {
if (!m)
m = &(_memberships[peer->address()]);
if (m->multicastLikeGate(now)) {
Mutex::Lock l2(_myMulticastGroups_l);
_announceMulticastGroupsTo(tPtr,peer->address(),_allMulticastGroups());
}
return true;
}
}
} catch ( ... ) {}
return false;
}
void Network::doPeriodicTasks(void *tPtr)
{
const int64_t now = RR->node->now();
Mutex::Lock l1(_memberships_l);
if (_destroyed)
return;
{
Address *a = (Address *)0;
Membership *m = (Membership *)0;
Hashtable<Address,Membership>::Iterator i(_memberships);
while (i.next(a,m)) {
m->clean(now,_config);
}
}
{
Mutex::Lock l2(_myMulticastGroups_l);
Hashtable< MulticastGroup,uint64_t >::Iterator i(_multicastGroupsBehindMe);
MulticastGroup *mg = (MulticastGroup *)0;
uint64_t *ts = (uint64_t *)0;
while (i.next(mg,ts)) {
if ((now - *ts) > (ZT_MULTICAST_LIKE_EXPIRE * 2))
_multicastGroupsBehindMe.erase(*mg);
}
_announceMulticastGroups(tPtr,false);
}
}
Membership::AddCredentialResult Network::addCredential(void *tPtr,const Address &sentFrom,const Revocation &rev)
{
if (rev.networkId() != _id)
return Membership::ADD_REJECTED;
Mutex::Lock l1(_memberships_l);
Membership &m = _memberships[rev.target()];
const Membership::AddCredentialResult result = m.addCredential(RR,tPtr,_config,rev);
if ((result == Membership::ADD_ACCEPTED_NEW)&&(rev.fastPropagate())) {
Address *a = (Address *)0;
Membership *m = (Membership *)0;
Hashtable<Address,Membership>::Iterator i(_memberships);
while (i.next(a,m)) {
if ((*a != sentFrom)&&(*a != rev.signer())) {
Packet outp(*a,RR->identity.address(),Packet::VERB_NETWORK_CREDENTIALS);
outp.append((uint8_t)0x00); // no COM
outp.append((uint16_t)0); // no capabilities
outp.append((uint16_t)0); // no tags
outp.append((uint16_t)1); // one revocation!
rev.serialize(outp);
outp.append((uint16_t)0); // no certificates of ownership
RR->sw->send(tPtr,outp,true);
}
}
}
return result;
}
ZT_VirtualNetworkStatus Network::_status() const
{
if (_portError)

View File

@ -230,6 +230,10 @@ public:
/**
* Set network configuration
*
* This is normally called internally when a configuration is received
* and fully assembled, but it can also be called on Node startup when
* cached configurations are re-read from the data store.
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param nconf Network configuration
* @param saveToDisk Save to disk? Used during loading, should usually be true otherwise.
@ -247,13 +251,6 @@ public:
*/
inline void setNotFound() { _netconfFailure = NETCONF_FAILURE_NOT_FOUND; }
/**
* Causes this network to request an updated configuration from its master node now
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
void requestConfiguration(void *tPtr);
/**
* Determine whether this peer is permitted to communicate on this network
*
@ -265,7 +262,7 @@ public:
/**
* Do periodic cleanup and housekeeping tasks
*/
void doPeriodicTasks(void *tPtr);
void doPeriodicTasks(void *tPtr,const int64_t now);
/**
* Find the node on this network that has this MAC behind it (if any)
@ -451,6 +448,7 @@ public:
inline void **userPtr() { return &_uPtr; }
private:
void _requestConfiguration(void *tPtr);
ZT_VirtualNetworkStatus _status() const;
void _externalConfig(ZT_VirtualNetworkConfig *ec) const; // assumes _lock is locked
bool _gate(const SharedPtr<Peer> &peer);

View File

@ -93,11 +93,6 @@
*/
#define ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE 0x0000020000000000ULL
/**
* Anchors are stable devices on this network that can act like roots when none are up
*/
#define ZT_NETWORKCONFIG_SPECIALIST_TYPE_ANCHOR 0x0000040000000000ULL
namespace ZeroTier {
// Dictionary capacity needed for max size network config
@ -323,45 +318,6 @@ struct NetworkConfig
return false;
}
inline std::vector<Address> anchors() const
{
std::vector<Address> r;
for(unsigned int i=0;i<specialistCount;++i) {
if ((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ANCHOR) != 0)
r.push_back(Address(specialists[i]));
}
return r;
}
inline std::vector<Address> alwaysContactAddresses() const
{
std::vector<Address> r;
for(unsigned int i=0;i<specialistCount;++i) {
if ((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ANCHOR) != 0)
r.push_back(Address(specialists[i]));
}
return r;
}
inline unsigned int alwaysContactAddresses(Address ac[ZT_MAX_NETWORK_SPECIALISTS]) const
{
unsigned int c = 0;
for(unsigned int i=0;i<specialistCount;++i) {
if ((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ANCHOR) != 0)
ac[c++] = specialists[i];
}
return c;
}
inline void alwaysContactAddresses(Hashtable< Address,std::vector<InetAddress> > &a) const
{
for(unsigned int i=0;i<specialistCount;++i) {
if ((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ANCHOR) != 0) {
a[Address(specialists[i])];
}
}
}
/**
* @param fromPeer Peer attempting to bridge other Ethernet peers onto network
* @return True if this network allows bridging

View File

@ -60,7 +60,7 @@ Node::Node(void *uptr,void *tptr,const struct ZT_Node_Callbacks *callbacks,int64
_now(now),
_lastPing(0),
_lastHousekeepingRun(0),
_lastMemoizedTraceSettings(0)
_lastNetworkHousekeepingRun(0)
{
memcpy(&_cb,callbacks,sizeof(ZT_Node_Callbacks));
@ -237,10 +237,13 @@ ZT_ResultCode Node::processBackgroundTasks(void *tptr,int64_t now,volatile int64
_now = now;
Mutex::Lock bl(_backgroundTasksLock);
unsigned long timeUntilNextPing = ZT_PEER_PING_PERIOD;
const int64_t timeSinceLastPing = now - _lastPing;
// Initialize these on first call so these things happen just a few seconds after
// startup, since right at startup things are likely to not be ready to communicate
// at all yet.
if (_lastNetworkHousekeepingRun <= 0) _lastNetworkHousekeepingRun = now - (ZT_NETWORK_HOUSEKEEPING_PERIOD / 3);
if (_lastHousekeepingRun <= 0) _lastHousekeepingRun = now;
if (timeSinceLastPing >= ZT_PEER_PING_PERIOD) {
if ((now - _lastPing) >= ZT_PEER_PING_PERIOD) {
_lastPing = now;
try {
_processBackgroundTasks_ping_eachRoot rf;
@ -264,61 +267,17 @@ ZT_ResultCode Node::processBackgroundTasks(void *tptr,int64_t now,volatile int64
}
}
/*
if (timeSinceLastPingCheck >= ZT_PING_CHECK_INVERVAL) {
try {
_lastPingCheck = now;
// (1) Get peers we should remain connected to and (2) get networks that need config.
Hashtable< Address,std::vector<InetAddress> > alwaysContact;
RR->topology->getAlwaysContact(alwaysContact);
std::vector< std::pair< SharedPtr<Network>,bool > > networkConfigNeeded;
{
Mutex::Lock l(_networks_m);
Hashtable< uint64_t,SharedPtr<Network> >::Iterator i(_networks);
uint64_t *nwid = (uint64_t *)0;
SharedPtr<Network> *network = (SharedPtr<Network> *)0;
while (i.next(nwid,network)) {
(*network)->config().alwaysContactAddresses(alwaysContact);
networkConfigNeeded.push_back( std::pair< SharedPtr<Network>,bool >(*network,(((now - (*network)->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!(*network)->hasConfig()))) );
}
if ((now - _lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
_lastHousekeepingRun = now;
{
Mutex::Lock l(_networks_m);
Hashtable< uint64_t,SharedPtr<Network> >::Iterator i(_networks);
uint64_t *nwid = (uint64_t *)0;
SharedPtr<Network> *network = (SharedPtr<Network> *)0;
while (i.next(nwid,network)) {
(*network)->doPeriodicTasks(tptr,now);
}
// Ping active peers, upstreams, and others that we should always contact
_PingPeersThatNeedPing pfunc(RR,tptr,alwaysContact,now);
RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
// Run WHOIS to create Peer for alwaysContact addresses that could not be contacted
{
Hashtable< Address,std::vector<InetAddress> >::Iterator i(alwaysContact);
Address *upstreamAddress = (Address *)0;
std::vector<InetAddress> *upstreamStableEndpoints = (std::vector<InetAddress> *)0;
while (i.next(upstreamAddress,upstreamStableEndpoints))
RR->sw->requestWhois(tptr,now,*upstreamAddress);
}
// Refresh network config or broadcast network updates to members as needed
for(std::vector< std::pair< SharedPtr<Network>,bool > >::const_iterator n(networkConfigNeeded.begin());n!=networkConfigNeeded.end();++n) {
if (n->second)
n->first->requestConfiguration(tptr);
n->first->sendUpdatesToMembers(tptr);
}
// Update online status, post status change as event
const bool oldOnline = _online;
_online = pfunc.online;
if (oldOnline != _online)
postEvent(tptr,_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
} else {
timeUntilNextPingCheck -= (unsigned long)timeSinceLastPingCheck;
}
*/
if ((now - _lastMemoizedTraceSettings) >= (ZT_HOUSEKEEPING_PERIOD / 4)) {
_lastMemoizedTraceSettings = now;
RR->t->updateMemoizedSettings();
}
@ -350,7 +309,7 @@ ZT_ResultCode Node::processBackgroundTasks(void *tptr,int64_t now,volatile int64
}
try {
*nextBackgroundTaskDeadline = now + (int64_t)std::max(std::min(timeUntilNextPing,RR->sw->doTimerTasks(tptr,now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
*nextBackgroundTaskDeadline = now + (int64_t)std::max(std::min((unsigned long)ZT_MAX_TIMER_TASK_INTERVAL,RR->sw->doTimerTasks(tptr,now)),(unsigned long)ZT_MIN_TIMER_TASK_INTERVAL);
} catch ( ... ) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}

View File

@ -307,7 +307,7 @@ private:
volatile int64_t _now;
int64_t _lastPing;
int64_t _lastHousekeepingRun;
int64_t _lastMemoizedTraceSettings;
int64_t _lastNetworkHousekeepingRun;
bool _online;
};

View File

@ -50,6 +50,7 @@ Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Ident
_lastACKWindowReset(0),
_lastQoSWindowReset(0),
_lastMultipathCompatibilityCheck(0),
_lastTriedStaticPath(0),
_uniqueAlivePathCount(0),
_localMultipathSupported(false),
_remoteMultipathSupported(false),

View File

@ -480,6 +480,18 @@ public:
return (_QoSCutoffCount < ZT_PATH_QOS_ACK_CUTOFF_LIMIT);
}
/**
* Rate limit gate for trying externally defined or static path
*/
inline bool rateGateTryStaticPath(const int64_t now)
{
if ((now - _lastTriedStaticPath) >= ZT_PEER_PING_PERIOD) {
_lastTriedStaticPath = now;
return true;
}
return false;
}
/**
* @return Whether this peer is reachable via an aggregate link
*/
@ -503,6 +515,7 @@ private:
int64_t _lastACKWindowReset;
int64_t _lastQoSWindowReset;
int64_t _lastMultipathCompatibilityCheck;
int64_t _lastTriedStaticPath;
int _uniqueAlivePathCount;

View File

@ -99,10 +99,9 @@ void Switch::onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddre
SharedPtr<Peer> relayTo = RR->topology->get(destination);
if ((!relayTo)||(!relayTo->sendDirect(tPtr,fragment.data(),fragment.size(),now,false))) {
// Don't know peer or no direct path -- so relay via someone upstream
// TODO
//relayTo = RR->topology->getUpstreamPeer();
//if (relayTo)
// relayTo->sendDirect(tPtr,fragment.data(),fragment.size(),now,true);
relayTo = RR->topology->findRelayTo(now,destination);
if (relayTo)
relayTo->sendDirect(tPtr,fragment.data(),fragment.size(),now,true);
}
}
} else {
@ -172,9 +171,7 @@ void Switch::onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddre
relayTo->introduce(tPtr,now,sourcePeer);
}
} else {
// TODO
/*
relayTo = RR->topology->getUpstreamPeer();
relayTo = RR->topology->findRelayTo(now,destination);
if ((relayTo)&&(relayTo->address() != source)) {
if (relayTo->sendDirect(tPtr,packet.data(),packet.size(),now,true)) {
const SharedPtr<Peer> sourcePeer(RR->topology->get(source));
@ -182,7 +179,6 @@ void Switch::onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddre
relayTo->introduce(tPtr,now,sourcePeer);
}
}
*/
}
}
} else if ((reinterpret_cast<const uint8_t *>(data)[ZT_PACKET_IDX_FLAGS] & ZT_PROTO_FLAG_FRAGMENTED) != 0) {
@ -785,16 +781,13 @@ void Switch::requestWhois(void *tPtr,const int64_t now,const Address &addr)
else last = now;
}
// TODO
/*
const SharedPtr<Peer> upstream(RR->topology->getUpstreamPeer());
if (upstream) {
Packet outp(upstream->address(),RR->identity.address(),Packet::VERB_WHOIS);
const SharedPtr<Peer> root(RR->topology->root(now));
if (root) {
Packet outp(root->address(),RR->identity.address(),Packet::VERB_WHOIS);
addr.appendTo(outp);
RR->node->expectReplyTo(outp.packetId());
send(tPtr,outp,true);
root->sendDirect(tPtr,outp.data(),outp.size(),now,true);
}
*/
}
void Switch::doAnythingWaitingForPeer(void *tPtr,const SharedPtr<Peer> &peer)
@ -916,15 +909,26 @@ bool Switch::_trySend(void *tPtr,Packet &packet,bool encrypt)
if (peer) {
viaPath = peer->getAppropriatePath(now,false);
if (!viaPath) {
// TODO
/*
peer->tryMemorizedPath(tPtr,now); // periodically attempt memorized or statically defined paths, if any are known
const SharedPtr<Peer> relay(RR->topology->getUpstreamPeer());
if ( (!relay) || (!(viaPath = relay->getAppropriatePath(now,false))) ) {
if (!(viaPath = peer->getAppropriatePath(now,true)))
if (peer->rateGateTryStaticPath(now)) {
InetAddress tryAddr;
bool gotPath = RR->node->externalPathLookup(tPtr,peer->address(),AF_INET6,tryAddr);
if ((gotPath)&&(tryAddr)) {
peer->sendHELLO(tPtr,-1,tryAddr,now);
} else {
gotPath = RR->node->externalPathLookup(tPtr,peer->address(),AF_INET,tryAddr);
if ((gotPath)&&(tryAddr))
peer->sendHELLO(tPtr,-1,tryAddr,now);
}
}
const SharedPtr<Peer> relay(RR->topology->findRelayTo(now,destination));
if (relay) {
viaPath = relay->getAppropriatePath(now,true);
if (!viaPath)
return false;
}
*/
return false;
}
} else {
return false;

View File

@ -263,6 +263,57 @@ public:
}
}
/**
* Get the best root, rescanning and re-ranking roots periodically
*
* @param now Current time
* @return Best/fastest currently connected root or NULL if none
*/
inline SharedPtr<Peer> root(const int64_t now)
{
Mutex::Lock l(_bestRoot_m);
if ((!_bestRoot)||((now - _lastRankedBestRoot) >= ZT_FIND_BEST_ROOT_PERIOD)) {
_bestRoot.zero();
Mutex::Lock l2(_roots_m);
SharedPtr<Peer> rp;
long bestQuality = 2147483647;
for(std::vector<Root>::const_iterator i(_roots.begin());i!=_roots.end();++i) {
{
Mutex::Lock l2(_peers_m);
const SharedPtr<Peer> *const ap = _peers.get(i->address());
if (ap) {
rp = *ap;
} else {
rp.set(new Peer(RR,_myIdentity,i->id()));
_peers.set(rp->address(),rp);
}
}
SharedPtr<Path> path(rp->getAppropriatePath(now,false));
if (path) {
const long pq = path->quality(now);
if (pq < bestQuality) {
bestQuality = pq;
_bestRoot = rp;
}
}
}
}
return _bestRoot;
}
/**
* Get the best relay to a given address, which may or may not be a root
*
* @param now Current time
* @param toAddr Destination address
* @return Best current relay or NULL if none
*/
inline SharedPtr<Peer> findRelayTo(const int64_t now,const Address &toAddr)
{
// TODO: in the future this will check 'mesh-like' relays and if enabled consult LF for other roots (for if this is a root)
return root(now);
}
/**
* @param allPeers vector to fill with all current peers
*/
@ -387,9 +438,12 @@ private:
std::pair<InetAddress,ZT_PhysicalPathConfiguration> _physicalPathConfig[ZT_MAX_CONFIGURABLE_PATHS];
unsigned int _numConfiguredPhysicalPaths;
std::vector<Root> _roots;
SharedPtr<Peer> _bestRoot;
int64_t _lastRankedBestRoot;
Hashtable< Address,SharedPtr<Peer> > _peers;
Hashtable< Path::HashKey,SharedPtr<Path> > _paths;
Mutex _roots_m;
Mutex _bestRoot_m;
Mutex _peers_m;
Mutex _paths_m;
};