mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-05 09:01:08 +00:00
Revise peer path weighting to always prioritize cluster-optimal paths.
This commit is contained in:
parent
cecfa99b7b
commit
4c455876f9
@ -70,9 +70,8 @@ bool IncomingPacket::tryDecode(const RuntimeEnvironment *RR,bool deferred)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
//TRACE("<< %s from %s(%s)",Packet::verbString(v),sourceAddress.toString().c_str(),_remoteAddress.toString().c_str());
|
|
||||||
|
|
||||||
const Packet::Verb v = verb();
|
const Packet::Verb v = verb();
|
||||||
|
//TRACE("<< %s from %s(%s)",Packet::verbString(v),sourceAddress.toString().c_str(),_remoteAddress.toString().c_str());
|
||||||
switch(v) {
|
switch(v) {
|
||||||
//case Packet::VERB_NOP:
|
//case Packet::VERB_NOP:
|
||||||
default: // ignore unknown verbs, but if they pass auth check they are "received"
|
default: // ignore unknown verbs, but if they pass auth check they are "received"
|
||||||
@ -933,7 +932,15 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
|
|||||||
switch(addrType) {
|
switch(addrType) {
|
||||||
case 4: {
|
case 4: {
|
||||||
InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
|
InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
|
||||||
if ( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_FORGET_PATH) == 0) && ( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) != 0) || (!peer->hasActivePathTo(now,a)) ) && (RR->node->shouldUsePathForZeroTierTraffic(_localAddress,a)) ) {
|
|
||||||
|
bool redundant = false;
|
||||||
|
if ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) != 0) {
|
||||||
|
peer->setClusterOptimalPathForAddressFamily(a);
|
||||||
|
} else {
|
||||||
|
redundant = peer->hasActivePathTo(now,a);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_FORGET_PATH) == 0) && (!redundant) && (RR->node->shouldUsePathForZeroTierTraffic(_localAddress,a)) ) {
|
||||||
if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
|
if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
|
||||||
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
|
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
|
||||||
peer->sendHELLO(InetAddress(),a,now);
|
peer->sendHELLO(InetAddress(),a,now);
|
||||||
@ -944,7 +951,15 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
|
|||||||
} break;
|
} break;
|
||||||
case 6: {
|
case 6: {
|
||||||
InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
|
InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
|
||||||
if ( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_FORGET_PATH) == 0) && ( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) != 0) || (!peer->hasActivePathTo(now,a)) ) && (RR->node->shouldUsePathForZeroTierTraffic(_localAddress,a)) ) {
|
|
||||||
|
bool redundant = false;
|
||||||
|
if ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_CLUSTER_REDIRECT) != 0) {
|
||||||
|
peer->setClusterOptimalPathForAddressFamily(a);
|
||||||
|
} else {
|
||||||
|
redundant = peer->hasActivePathTo(now,a);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_FORGET_PATH) == 0) && (!redundant) && (RR->node->shouldUsePathForZeroTierTraffic(_localAddress,a)) ) {
|
||||||
if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
|
if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
|
||||||
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
|
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
|
||||||
peer->sendHELLO(InetAddress(),a,now);
|
peer->sendHELLO(InetAddress(),a,now);
|
||||||
|
@ -22,7 +22,7 @@ namespace ZeroTier {
|
|||||||
|
|
||||||
const unsigned char Packet::ZERO_KEY[32] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
|
const unsigned char Packet::ZERO_KEY[32] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
|
||||||
|
|
||||||
#ifdef ZT_TRACE
|
//#ifdef ZT_TRACE
|
||||||
|
|
||||||
const char *Packet::verbString(Verb v)
|
const char *Packet::verbString(Verb v)
|
||||||
throw()
|
throw()
|
||||||
@ -68,7 +68,7 @@ const char *Packet::errorString(ErrorCode e)
|
|||||||
return "(unknown)";
|
return "(unknown)";
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // ZT_TRACE
|
//#endif // ZT_TRACE
|
||||||
|
|
||||||
void Packet::armor(const void *key,bool encryptPayload)
|
void Packet::armor(const void *key,bool encryptPayload)
|
||||||
{
|
{
|
||||||
|
@ -1052,12 +1052,12 @@ public:
|
|||||||
ERROR_UNWANTED_MULTICAST = 8
|
ERROR_UNWANTED_MULTICAST = 8
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef ZT_TRACE
|
//#ifdef ZT_TRACE
|
||||||
static const char *verbString(Verb v)
|
static const char *verbString(Verb v)
|
||||||
throw();
|
throw();
|
||||||
static const char *errorString(ErrorCode e)
|
static const char *errorString(ErrorCode e)
|
||||||
throw();
|
throw();
|
||||||
#endif
|
//#endif
|
||||||
|
|
||||||
template<unsigned int C2>
|
template<unsigned int C2>
|
||||||
Packet(const Buffer<C2> &b) :
|
Packet(const Buffer<C2> &b) :
|
||||||
|
@ -31,13 +31,21 @@
|
|||||||
/**
|
/**
|
||||||
* Flag indicating that this path is suboptimal
|
* Flag indicating that this path is suboptimal
|
||||||
*
|
*
|
||||||
* This is used in cluster mode to indicate that the peer has been directed
|
* Clusters set this flag on remote paths if GeoIP or other routing decisions
|
||||||
* to a better path. This path can continue to be used but shouldn't be kept
|
* indicate that a peer should be handed off to another cluster member.
|
||||||
* or advertised to other cluster members. Not used if clustering is not
|
|
||||||
* built and enabled.
|
|
||||||
*/
|
*/
|
||||||
#define ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL 0x0001
|
#define ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL 0x0001
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flag indicating that this path is optimal
|
||||||
|
*
|
||||||
|
* Peers set this flag on paths that are pushed by a cluster and indicated as
|
||||||
|
* optimal. A second flag is needed since we want to prioritize cluster optimal
|
||||||
|
* paths and de-prioritize sub-optimal paths and for new paths we don't know
|
||||||
|
* which one they are. So we want a trinary state: optimal, suboptimal, unknown.
|
||||||
|
*/
|
||||||
|
#define ZT_PATH_FLAG_CLUSTER_OPTIMAL 0x0002
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Maximum return value of preferenceRank()
|
* Maximum return value of preferenceRank()
|
||||||
*/
|
*/
|
||||||
@ -176,17 +184,39 @@ public:
|
|||||||
*/
|
*/
|
||||||
inline InetAddress::IpScope ipScope() const throw() { return _ipScope; }
|
inline InetAddress::IpScope ipScope() const throw() { return _ipScope; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param f Valuve of ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL and inverse of ZT_PATH_FLAG_CLUSTER_OPTIMAL (both are changed)
|
||||||
|
*/
|
||||||
|
inline void setClusterSuboptimal(bool f)
|
||||||
|
{
|
||||||
|
if (f) {
|
||||||
|
_flags = (_flags | ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL) & ~ZT_PATH_FLAG_CLUSTER_OPTIMAL;
|
||||||
|
} else {
|
||||||
|
_flags = (_flags | ZT_PATH_FLAG_CLUSTER_OPTIMAL) & ~ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return True if ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL is set
|
||||||
|
*/
|
||||||
|
inline bool isClusterSuboptimal() const { return ((_flags & ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL) != 0); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return True if ZT_PATH_FLAG_CLUSTER_OPTIMAL is set
|
||||||
|
*/
|
||||||
|
inline bool isClusterOptimal() const { return ((_flags & ZT_PATH_FLAG_CLUSTER_OPTIMAL) != 0); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Preference rank, higher == better (will be less than 255)
|
* @return Preference rank, higher == better (will be less than 255)
|
||||||
*/
|
*/
|
||||||
inline unsigned int preferenceRank() const throw()
|
inline unsigned int preferenceRank() const throw()
|
||||||
{
|
{
|
||||||
// First, since the scope enum values in InetAddress.hpp are in order of
|
/* First, since the scope enum values in InetAddress.hpp are in order of
|
||||||
// use preference rank, we take that. Then we multiple by two, yielding
|
* use preference rank, we take that. Then we multiple by two, yielding
|
||||||
// a sequence like 0, 2, 4, 6, etc. Then if it's IPv6 we add one. This
|
* a sequence like 0, 2, 4, 6, etc. Then if it's IPv6 we add one. This
|
||||||
// makes IPv6 addresses of a given scope outrank IPv4 addresses of the
|
* makes IPv6 addresses of a given scope outrank IPv4 addresses of the
|
||||||
// same scope -- e.g. 1 outranks 0. This makes us prefer IPv6, but not
|
* same scope -- e.g. 1 outranks 0. This makes us prefer IPv6, but not
|
||||||
// if the address scope/class is of a fundamentally lower rank.
|
* if the address scope/class is of a fundamentally lower rank. */
|
||||||
return ( ((unsigned int)_ipScope << 1) | (unsigned int)(_addr.ss_family == AF_INET6) );
|
return ( ((unsigned int)_ipScope << 1) | (unsigned int)(_addr.ss_family == AF_INET6) );
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,8 +229,13 @@ public:
|
|||||||
* received something) scaled/corrected by the preference rank within the
|
* received something) scaled/corrected by the preference rank within the
|
||||||
* ping keepalive window. That way higher ranking paths are preferred but
|
* ping keepalive window. That way higher ranking paths are preferred but
|
||||||
* not to the point of overriding timeouts and choosing potentially dead
|
* not to the point of overriding timeouts and choosing potentially dead
|
||||||
* paths. */
|
* paths. Finally we increase the score for known to be cluster optimal
|
||||||
return (_lastReceived + (preferenceRank() * (ZT_PEER_DIRECT_PING_DELAY / ZT_PATH_MAX_PREFERENCE_RANK)));
|
* paths and decrease it for paths known to be suboptimal. */
|
||||||
|
uint64_t score = _lastReceived + ZT_PEER_DIRECT_PING_DELAY; // make sure it's never less than ZT_PEER_DIRECT_PING_DELAY to prevent integer underflow
|
||||||
|
score += preferenceRank() * (ZT_PEER_DIRECT_PING_DELAY / ZT_PATH_MAX_PREFERENCE_RANK);
|
||||||
|
score += (uint64_t)(_flags & ZT_PATH_FLAG_CLUSTER_OPTIMAL) * (ZT_PEER_DIRECT_PING_DELAY / 2); // /2 because CLUSTER_OPTIMAL is flag 0x0002
|
||||||
|
score -= (uint64_t)(_flags & ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL) * ZT_PEER_DIRECT_PING_DELAY;
|
||||||
|
return score;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -259,18 +294,6 @@ public:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ZT_ENABLE_CLUSTER
|
|
||||||
/**
|
|
||||||
* @param f New value of ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL
|
|
||||||
*/
|
|
||||||
inline void setClusterSuboptimal(bool f) { _flags = ((f) ? (_flags | ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL) : (_flags & (~ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL))); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return True if ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL is set
|
|
||||||
*/
|
|
||||||
inline bool isClusterSuboptimal() const { return ((_flags & ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL) != 0); }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Current path probation count (for dead path detect)
|
* @return Current path probation count (for dead path detect)
|
||||||
*/
|
*/
|
||||||
|
@ -144,17 +144,20 @@ void Peer::received(
|
|||||||
if (np < ZT_MAX_PEER_NETWORK_PATHS) {
|
if (np < ZT_MAX_PEER_NETWORK_PATHS) {
|
||||||
slot = &(_paths[np++]);
|
slot = &(_paths[np++]);
|
||||||
} else {
|
} else {
|
||||||
uint64_t slotLRmin = 0xffffffffffffffffULL;
|
uint64_t slotWorstScore = 0xffffffffffffffffULL;
|
||||||
for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
|
for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
|
||||||
if (!_paths[p].active(now)) {
|
if (!_paths[p].active(now)) {
|
||||||
slot = &(_paths[p]);
|
slot = &(_paths[p]);
|
||||||
break;
|
break;
|
||||||
} else if (_paths[p].lastReceived() <= slotLRmin) {
|
} else {
|
||||||
slotLRmin = _paths[p].lastReceived();
|
const uint64_t score = _paths[p].score();
|
||||||
|
if (score <= slotWorstScore) {
|
||||||
|
slotWorstScore = score;
|
||||||
slot = &(_paths[p]);
|
slot = &(_paths[p]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if (slot) {
|
if (slot) {
|
||||||
*slot = Path(localAddr,remoteAddr);
|
*slot = Path(localAddr,remoteAddr);
|
||||||
slot->received(now);
|
slot->received(now);
|
||||||
|
@ -127,6 +127,36 @@ public:
|
|||||||
*/
|
*/
|
||||||
inline Path *getBestPath(uint64_t now) { return _getBestPath(now); }
|
inline Path *getBestPath(uint64_t now) { return _getBestPath(now); }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param now Current time
|
||||||
|
* @param addr Remote address
|
||||||
|
* @return True if we have an active path to this destination
|
||||||
|
*/
|
||||||
|
inline bool hasActivePathTo(uint64_t now,const InetAddress &addr) const
|
||||||
|
{
|
||||||
|
for(unsigned int p=0;p<_numPaths;++p) {
|
||||||
|
if ((_paths[p].active(now))&&(_paths[p].address() == addr))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set all paths in the same ss_family that are not this one to cluster suboptimal
|
||||||
|
*
|
||||||
|
* Addresses in other families are not affected.
|
||||||
|
*
|
||||||
|
* @param addr Address to make exclusive
|
||||||
|
*/
|
||||||
|
inline void setClusterOptimalPathForAddressFamily(const InetAddress &addr)
|
||||||
|
{
|
||||||
|
for(unsigned int p=0;p<_numPaths;++p) {
|
||||||
|
if (_paths[p].address().ss_family == addr.ss_family) {
|
||||||
|
_paths[p].setClusterSuboptimal(_paths[p].address() != addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Send via best path
|
* Send via best path
|
||||||
*
|
*
|
||||||
@ -282,20 +312,6 @@ public:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* @param now Current time
|
|
||||||
* @param addr Remote address
|
|
||||||
* @return True if peer currently has an active direct path to addr
|
|
||||||
*/
|
|
||||||
inline bool hasActivePathTo(uint64_t now,const InetAddress &addr) const
|
|
||||||
{
|
|
||||||
for(unsigned int p=0;p<_numPaths;++p) {
|
|
||||||
if ((_paths[p].active(now))&&(_paths[p].address() == addr))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reset paths within a given scope
|
* Reset paths within a given scope
|
||||||
*
|
*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user