(1) Get rid of path sorting and just scan them, since sorting may have been a premature optimization that introduced a regression and path instability in a few edge cases, and (2) do not attempt to contact remote paths received via PUSH_DIRECT_PATH if we already have that path and it is already active (dumb, should have done this originally)

This commit is contained in:
Adam Ierymenko 2015-12-17 10:53:07 -08:00
parent 3137f43da9
commit 2160164e8c
4 changed files with 60 additions and 72 deletions

View File

@ -952,7 +952,7 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
switch(addrType) {
case 4: {
InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
if ( ((flags & 0x01) == 0) && (Path::isAddressValidForPath(a)) ) {
if ( ((flags & 0x01) == 0) && (Path::isAddressValidForPath(a)) && (!peer->hasActivePathTo(now,a)) ) {
if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
peer->sendHELLO(RR,_localAddress,a,now);
@ -963,7 +963,7 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
} break;
case 6: {
InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
if ( ((flags & 0x01) == 0) && (Path::isAddressValidForPath(a)) ) {
if ( ((flags & 0x01) == 0) && (Path::isAddressValidForPath(a)) && (!peer->hasActivePathTo(now,a)) ) {
if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
peer->sendHELLO(RR,_localAddress,a,now);

View File

@ -47,6 +47,11 @@
*/
#define ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL 0x0001
/**
* Maximum return value of preferenceRank()
*/
#define ZT_PATH_MAX_PREFERENCE_RANK ((ZT_INETADDRESS_MAX_SCOPE << 1) | 1)
namespace ZeroTier {
class RuntimeEnvironment;
@ -149,9 +154,9 @@ public:
inline InetAddress::IpScope ipScope() const throw() { return _ipScope; }
/**
* @return Preference rank, higher == better
* @return Preference rank, higher == better (will be less than 255)
*/
inline int preferenceRank() const throw()
inline unsigned int preferenceRank() const throw()
{
// First, since the scope enum values in InetAddress.hpp are in order of
// use preference rank, we take that. Then we multiple by two, yielding
@ -159,7 +164,20 @@ public:
// makes IPv6 addresses of a given scope outrank IPv4 addresses of the
// same scope -- e.g. 1 outranks 0. This makes us prefer IPv6, but not
// if the address scope/class is of a fundamentally lower rank.
return ( ((int)_ipScope * 2) + ((_addr.ss_family == AF_INET6) ? 1 : 0) );
return ( ((unsigned int)_ipScope << 1) | (unsigned int)(_addr.ss_family == AF_INET6) );
}
/**
* @return This path's overall score (higher == better)
*/
inline uint64_t score() const throw()
{
/* We compute the score based on the "freshness" of the path (when we last
* received something) scaled/corrected by the preference rank within the
* ping keepalive window. That way higher ranking paths are preferred but
* not to the point of overriding timeouts and choosing potentially dead
* paths. */
return (_lastReceived + (preferenceRank() * (ZT_PEER_DIRECT_PING_DELAY / ZT_PATH_MAX_PREFERENCE_RANK)));
}
/**

View File

@ -182,7 +182,6 @@ void Peer::received(
slot->setClusterSuboptimal(suboptimalPath);
#endif
_numPaths = np;
_sortPaths(now);
}
#ifdef ZT_ENABLE_CLUSTER
@ -362,7 +361,6 @@ bool Peer::resetWithinScope(const RuntimeEnvironment *RR,InetAddress::IpScope sc
++x;
}
_numPaths = y;
_sortPaths(now);
return (y < np);
}
@ -501,58 +499,34 @@ void Peer::clean(const RuntimeEnvironment *RR,uint64_t now)
}
}
struct _SortPathsByQuality
{
uint64_t _now;
_SortPathsByQuality(const uint64_t now) : _now(now) {}
inline bool operator()(const Path &a,const Path &b) const
{
const uint64_t qa = (
((uint64_t)a.active(_now) << 63) |
(((uint64_t)(a.preferenceRank() & 0xfff)) << 51) |
((uint64_t)a.lastReceived() & 0x7ffffffffffffULL) );
const uint64_t qb = (
((uint64_t)b.active(_now) << 63) |
(((uint64_t)(b.preferenceRank() & 0xfff)) << 51) |
((uint64_t)b.lastReceived() & 0x7ffffffffffffULL) );
return (qb < qa); // invert sense to sort in descending order
}
};
void Peer::_sortPaths(const uint64_t now)
{
// assumes _lock is locked
_lastPathSort = now;
std::sort(&(_paths[0]),&(_paths[_numPaths]),_SortPathsByQuality(now));
}
Path *Peer::_getBestPath(const uint64_t now)
{
// assumes _lock is locked
if ((now - _lastPathSort) >= ZT_PEER_PATH_SORT_INTERVAL)
_sortPaths(now);
if (_paths[0].active(now)) {
return &(_paths[0]);
} else {
_sortPaths(now);
if (_paths[0].active(now))
return &(_paths[0]);
Path *bestPath = (Path *)0;
uint64_t bestPathScore = 0;
for(unsigned int i=0;i<_numPaths;++i) {
const uint64_t score = _paths[i].score();
if ((score >= bestPathScore)&&(_paths[i].active(now))) {
bestPathScore = score;
bestPath = &(_paths[i]);
}
return (Path *)0;
}
return bestPath;
}
Path *Peer::_getBestPath(const uint64_t now,int inetAddressFamily)
{
// assumes _lock is locked
if ((now - _lastPathSort) >= ZT_PEER_PATH_SORT_INTERVAL)
_sortPaths(now);
for(int k=0;k<2;++k) { // try once, and if it fails sort and try one more time
Path *bestPath = (Path *)0;
uint64_t bestPathScore = 0;
for(unsigned int i=0;i<_numPaths;++i) {
if ((_paths[i].active(now))&&((int)_paths[i].address().ss_family == inetAddressFamily))
return &(_paths[i]);
const uint64_t score = _paths[i].score();
if (((int)_paths[i].address().ss_family == inetAddressFamily)&&(score >= bestPathScore)&&(_paths[i].active(now))) {
bestPathScore = score;
bestPath = &(_paths[i]);
}
_sortPaths(now);
}
return (Path *)0;
return bestPath;
}
} // namespace ZeroTier

View File

@ -152,7 +152,7 @@ public:
*/
inline Path *send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
{
Path *bestPath = getBestPath(now);
Path *const bestPath = getBestPath(now);
if (bestPath) {
if (bestPath->send(RR,data,len,now))
return bestPath;
@ -185,7 +185,7 @@ public:
bool doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now,int inetAddressFamily);
/**
* Push direct paths if we haven't done so in [rate limit] milliseconds
* Push direct paths back to self if we haven't done so in the configured timeout
*
* @param RR Runtime environment
* @param path Remote path to use to send the push
@ -232,7 +232,7 @@ public:
inline uint64_t lastAnnouncedTo() const throw() { return _lastAnnouncedTo; }
/**
* @return True if this peer is actively sending real network frames
* @return True if this peer has sent us real network traffic recently
*/
inline uint64_t activelyTransferringFrames(uint64_t now) const throw() { return ((now - lastFrame()) < ZT_PEER_ACTIVITY_TIMEOUT); }
@ -283,7 +283,7 @@ public:
inline bool hasActiveDirectPath(uint64_t now) const
{
Mutex::Lock _l(_lock);
for(unsigned int p=0,np=_numPaths;p<np;++p) {
for(unsigned int p=0;p<_numPaths;++p) {
if (_paths[p].active(now))
return true;
}
@ -306,6 +306,21 @@ public:
}
#endif
/**
* @param now Current time
* @param addr Remote address
* @return True if peer currently has an active direct path to addr
*/
inline bool hasActivePathTo(uint64_t now,const InetAddress &addr) const
{
Mutex::Lock _l(_lock);
for(unsigned int p=0;p<_numPaths;++p) {
if ((_paths[p].active(now))&&(_paths[p].address() == addr))
return true;
}
return false;
}
/**
* Reset paths within a given scope
*
@ -341,6 +356,7 @@ public:
inline unsigned int remoteVersionMajor() const throw() { return _vMajor; }
inline unsigned int remoteVersionMinor() const throw() { return _vMinor; }
inline unsigned int remoteVersionRevision() const throw() { return _vRevision; }
inline bool remoteVersionKnown() const throw() { return ((_vMajor > 0)||(_vMinor > 0)||(_vRevision > 0)); }
/**
@ -386,25 +402,6 @@ public:
*/
void clean(const RuntimeEnvironment *RR,uint64_t now);
/**
* Remove all paths with this remote address
*
* @param addr Remote address to remove
*/
inline void removePathByAddress(const InetAddress &addr)
{
Mutex::Lock _l(_lock);
unsigned int np = _numPaths;
unsigned int x = 0;
unsigned int y = 0;
while (x < np) {
if (_paths[x].address() != addr)
_paths[y++] = _paths[x];
++x;
}
_numPaths = y;
}
/**
* Update direct path push stats and return true if we should respond
*
@ -572,7 +569,6 @@ public:
}
private:
void _sortPaths(const uint64_t now);
Path *_getBestPath(const uint64_t now);
Path *_getBestPath(const uint64_t now,int inetAddressFamily);