Wire through external path lookup. Static paths should now work.

This commit is contained in:
Adam Ierymenko 2016-11-22 14:23:13 -08:00
parent c6c578ce1d
commit 84732fcb12
7 changed files with 45 additions and 16 deletions

View File

@ -320,6 +320,11 @@
*/ */
#define ZT_MIN_UNITE_INTERVAL 30000 #define ZT_MIN_UNITE_INTERVAL 30000
/**
* How often should peers try memorized or statically defined paths?
*/
#define ZT_TRY_MEMORIZED_PATH_INTERVAL 30000
/** /**
* Sanity limit on maximum bridge routes * Sanity limit on maximum bridge routes
* *

View File

@ -683,7 +683,7 @@ void Node::postTrace(const char *module,unsigned int line,const char *fmt,...)
uint64_t Node::prng() uint64_t Node::prng()
{ {
unsigned int p = (++_prngStreamPtr % (sizeof(_prngStream) / sizeof(uint64_t))); unsigned int p = (++_prngStreamPtr % ZT_NODE_PRNG_BUF_SIZE);
if (!p) if (!p)
_prng.encrypt12(_prngStream,_prngStream,sizeof(_prngStream)); _prng.encrypt12(_prngStream,_prngStream,sizeof(_prngStream));
return _prngStream[p]; return _prngStream[p];

View File

@ -49,6 +49,9 @@
#define ZT_EXPECTING_REPLIES_BUCKET_MASK1 255 #define ZT_EXPECTING_REPLIES_BUCKET_MASK1 255
#define ZT_EXPECTING_REPLIES_BUCKET_MASK2 31 #define ZT_EXPECTING_REPLIES_BUCKET_MASK2 31
// Size of PRNG stream buffer
#define ZT_NODE_PRNG_BUF_SIZE 64
namespace ZeroTier { namespace ZeroTier {
/** /**
@ -195,7 +198,7 @@ public:
#endif #endif
bool shouldUsePathForZeroTierTraffic(const Address &ztaddr,const InetAddress &localAddress,const InetAddress &remoteAddress); bool shouldUsePathForZeroTierTraffic(const Address &ztaddr,const InetAddress &localAddress,const InetAddress &remoteAddress);
inline bool getPathHint(const Address &ztaddr,int family,InetAddress &addr) { return ( (_cb.pathLookupFunction) ? (_cb.pathLookupFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,ztaddr.toInt(),family,reinterpret_cast<struct sockaddr_storage *>(&addr)) != 0) : false ); } inline bool externalPathLookup(const Address &ztaddr,int family,InetAddress &addr) { return ( (_cb.pathLookupFunction) ? (_cb.pathLookupFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,ztaddr.toInt(),family,reinterpret_cast<struct sockaddr_storage *>(&addr)) != 0) : false ); }
uint64_t prng(); uint64_t prng();
void postCircuitTestReport(const ZT_CircuitTestReport *report); void postCircuitTestReport(const ZT_CircuitTestReport *report);
@ -284,7 +287,7 @@ private:
unsigned int _prngStreamPtr; unsigned int _prngStreamPtr;
Salsa20 _prng; Salsa20 _prng;
uint64_t _prngStream[16]; // repeatedly encrypted with _prng to yield a high-quality non-crypto PRNG stream uint64_t _prngStream[ZT_NODE_PRNG_BUF_SIZE]; // repeatedly encrypted with _prng to yield a high-quality non-crypto PRNG stream
uint64_t _now; uint64_t _now;
uint64_t _lastPingCheck; uint64_t _lastPingCheck;

View File

@ -43,6 +43,7 @@ static uint32_t _natKeepaliveBuf = 0;
Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) : Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) :
_lastReceive(0), _lastReceive(0),
_lastNontrivialReceive(0), _lastNontrivialReceive(0),
_lastTriedMemorizedPath(0),
_lastDirectPathPushSent(0), _lastDirectPathPushSent(0),
_lastDirectPathPushReceive(0), _lastDirectPathPushReceive(0),
_lastCredentialRequestSent(0), _lastCredentialRequestSent(0),
@ -373,6 +374,16 @@ void Peer::attemptToContactAt(const InetAddress &localAddr,const InetAddress &at
} }
} }
void Peer::tryMemorizedPath(uint64_t now)
{
if ((now - _lastTriedMemorizedPath) >= ZT_TRY_MEMORIZED_PATH_INTERVAL) {
_lastTriedMemorizedPath = now;
InetAddress mp;
if (RR->node->externalPathLookup(_id.address(),-1,mp))
attemptToContactAt(InetAddress(),mp,now);
}
}
bool Peer::doPingAndKeepalive(uint64_t now,int inetAddressFamily) bool Peer::doPingAndKeepalive(uint64_t now,int inetAddressFamily)
{ {
Mutex::Lock _l(_paths_m); Mutex::Lock _l(_paths_m);

View File

@ -164,6 +164,13 @@ public:
*/ */
void attemptToContactAt(const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now); void attemptToContactAt(const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now);
/**
* Try a memorized or statically defined path if any are known
*
* Under the hood this is done periodically based on ZT_TRY_MEMORIZED_PATH_INTERVAL.
*/
void tryMemorizedPath(uint64_t now);
/** /**
* Send pings or keepalives depending on configured timeouts * Send pings or keepalives depending on configured timeouts
* *
@ -435,6 +442,7 @@ private:
uint8_t _remoteClusterOptimal6[16]; uint8_t _remoteClusterOptimal6[16];
uint64_t _lastReceive; // direct or indirect uint64_t _lastReceive; // direct or indirect
uint64_t _lastNontrivialReceive; // frames, things like netconf, etc. uint64_t _lastNontrivialReceive; // frames, things like netconf, etc.
uint64_t _lastTriedMemorizedPath;
uint64_t _lastDirectPathPushSent; uint64_t _lastDirectPathPushSent;
uint64_t _lastDirectPathPushReceive; uint64_t _lastDirectPathPushReceive;
uint64_t _lastCredentialRequestSent; uint64_t _lastCredentialRequestSent;

View File

@ -710,12 +710,12 @@ bool Switch::_trySend(const Packet &packet,bool encrypt)
if (peer) { if (peer) {
const uint64_t now = RR->node->now(); const uint64_t now = RR->node->now();
// First get the best path, and if it's dead (and this is not a root) /* First get the best path, and if it's dead (and this is not a root)
// we attempt to re-activate that path but this packet will flow * we attempt to re-activate that path but this packet will flow
// upstream. If the path comes back alive, it will be used in the future. * upstream. If the path comes back alive, it will be used in the future.
// For roots we don't do the alive check since roots are not required * For roots we don't do the alive check since roots are not required
// to send heartbeats "down" and because we have to at least try to * to send heartbeats "down" and because we have to at least try to
// go somewhere. * go somewhere. */
SharedPtr<Path> viaPath(peer->getBestPath(now,false)); SharedPtr<Path> viaPath(peer->getBestPath(now,false));
if ( (viaPath) && (!viaPath->alive(now)) && (!RR->topology->isRoot(peer->identity())) ) { if ( (viaPath) && (!viaPath->alive(now)) && (!RR->topology->isRoot(peer->identity())) ) {
@ -724,7 +724,8 @@ bool Switch::_trySend(const Packet &packet,bool encrypt)
viaPath.zero(); viaPath.zero();
} }
if (!viaPath) { if (!viaPath) {
SharedPtr<Peer> relay(RR->topology->getUpstreamPeer()); peer->tryMemorizedPath(now); // periodically attempt memorized or statically defined paths, if any are known
const SharedPtr<Peer> relay(RR->topology->getUpstreamPeer());
if ( (!relay) || (!(viaPath = relay->getBestPath(now,false))) ) { if ( (!relay) || (!(viaPath = relay->getBestPath(now,false))) ) {
if (!(viaPath = peer->getBestPath(now,true))) if (!(viaPath = peer->getBestPath(now,true)))
return false; return false;

View File

@ -1163,6 +1163,7 @@ public:
// Internal implementation methods ----------------------------------------- // Internal implementation methods -----------------------------------------
// Must be called after _localConfig is read or modified
void applyLocalConfig() void applyLocalConfig()
{ {
Mutex::Lock _l(_localConfig_m); Mutex::Lock _l(_localConfig_m);
@ -1872,6 +1873,12 @@ public:
} }
} }
/* Note: I do not think we need to scan for overlap with managed routes
* because of the "route forking" and interface binding that we do. This
* ensures (we hope) that ZeroTier traffic will still take the physical
* path even if its managed routes override this for other traffic. Will
* revisit if we see recursion problems. */
// Check blacklists // Check blacklists
const Hashtable< uint64_t,std::vector<InetAddress> > *blh = (const Hashtable< uint64_t,std::vector<InetAddress> > *)0; const Hashtable< uint64_t,std::vector<InetAddress> > *blh = (const Hashtable< uint64_t,std::vector<InetAddress> > *)0;
const std::vector<InetAddress> *gbl = (const std::vector<InetAddress> *)0; const std::vector<InetAddress> *gbl = (const std::vector<InetAddress> *)0;
@ -1897,12 +1904,6 @@ public:
} }
} }
/* Note: I do not think we need to scan for overlap with managed routes
* because of the "route forking" and interface binding that we do. This
* ensures (we hope) that ZeroTier traffic will still take the physical
* path even if its managed routes override this for other traffic. Will
* revisit if we see problems with this. */
return 1; return 1;
} }