mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2024-12-19 13:07:55 +00:00
Merge branch 'dev' of http://git.int.zerotier.com/ZeroTier/ZeroTierOne into dev
This commit is contained in:
commit
68a966fa55
@ -1053,11 +1053,6 @@ typedef struct
|
||||
*/
|
||||
uint64_t trustedPathId;
|
||||
|
||||
/**
|
||||
* Is path active?
|
||||
*/
|
||||
int active;
|
||||
|
||||
/**
|
||||
* Is path preferred?
|
||||
*/
|
||||
|
@ -264,7 +264,7 @@
|
||||
/**
|
||||
* Peers forget paths that have not spoken in this long
|
||||
*/
|
||||
#define ZT_PEER_PATH_EXPIRATION ((ZT_PEER_PING_PERIOD * 3) + 3000)
|
||||
#define ZT_PEER_PATH_EXPIRATION ((ZT_PEER_PING_PERIOD * 4) + 3000)
|
||||
|
||||
/**
|
||||
* Timeout for overall peer activity (measured from last receive)
|
||||
|
@ -36,7 +36,7 @@
|
||||
#include "Peer.hpp"
|
||||
|
||||
// Uncomment to make the rules engine dump trace info to stdout
|
||||
#define ZT_RULES_ENGINE_DEBUGGING 1
|
||||
//#define ZT_RULES_ENGINE_DEBUGGING 1
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
|
@ -243,7 +243,7 @@ public:
|
||||
lastReceiveFromUpstream = std::max(p->lastReceive(),lastReceiveFromUpstream);
|
||||
} else if (p->activelyTransferringFrames(_now)) {
|
||||
// Normal nodes get their preferred link kept alive if the node has generated frame traffic recently
|
||||
p->doPingAndKeepalive(_now,0);
|
||||
p->doPingAndKeepalive(_now,-1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -422,7 +422,6 @@ ZT_PeerList *Node::peers() const
|
||||
memcpy(&(p->paths[p->pathCount].address),&((*path)->address()),sizeof(struct sockaddr_storage));
|
||||
p->paths[p->pathCount].lastSend = (*path)->lastOut();
|
||||
p->paths[p->pathCount].lastReceive = (*path)->lastIn();
|
||||
p->paths[p->pathCount].active = (*path)->alive(_now) ? 1 : 0;
|
||||
p->paths[p->pathCount].preferred = (*path == bestp) ? 1 : 0;
|
||||
p->paths[p->pathCount].trustedPathId = RR->topology->getOutboundPathTrust((*path)->address());
|
||||
++p->pathCount;
|
||||
|
@ -152,16 +152,10 @@ public:
|
||||
inline InetAddress::IpScope ipScope() const { return _ipScope; }
|
||||
|
||||
/**
|
||||
* @return Preference rank, higher == better (will be less than 255)
|
||||
* @return Preference rank, higher == better
|
||||
*/
|
||||
inline unsigned int preferenceRank() const
|
||||
{
|
||||
/* First, since the scope enum values in InetAddress.hpp are in order of
|
||||
* use preference rank, we take that. Then we multiple by two, yielding
|
||||
* a sequence like 0, 2, 4, 6, etc. Then if it's IPv6 we add one. This
|
||||
* makes IPv6 addresses of a given scope outrank IPv4 addresses of the
|
||||
* same scope -- e.g. 1 outranks 0. This makes us prefer IPv6, but not
|
||||
* if the address scope/class is of a fundamentally lower rank. */
|
||||
return ( ((unsigned int)_ipScope << 1) | (unsigned int)(_addr.ss_family == AF_INET6) );
|
||||
}
|
||||
|
||||
@ -213,7 +207,7 @@ public:
|
||||
/**
|
||||
* @return True if this path needs a heartbeat
|
||||
*/
|
||||
inline bool needsHeartbeat(const uint64_t now) const { return ((now - _lastOut) > ZT_PATH_HEARTBEAT_PERIOD); }
|
||||
inline bool needsHeartbeat(const uint64_t now) const { return ((now - _lastOut) >= ZT_PATH_HEARTBEAT_PERIOD); }
|
||||
|
||||
/**
|
||||
* @return Last time we sent something
|
||||
|
@ -122,10 +122,11 @@ void Peer::received(
|
||||
{
|
||||
Mutex::Lock _l(_paths_m);
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if (_paths[p].path == path) { // paths are canonicalized so pointer compare is good here
|
||||
if (_paths[p].path->address() == path->address()) {
|
||||
_paths[p].lastReceive = now;
|
||||
_paths[p].path = path; // local address may have changed!
|
||||
#ifdef ZT_ENABLE_CLUSTER
|
||||
_paths[p].clusterSuboptimal = suboptimalPath;
|
||||
_paths[p].clusterWeights = (unsigned int)(!suboptimalPath);
|
||||
#endif
|
||||
pathIsConfirmed = true;
|
||||
break;
|
||||
@ -133,49 +134,65 @@ void Peer::received(
|
||||
}
|
||||
}
|
||||
|
||||
if ((!pathIsConfirmed)&&(RR->node->shouldUsePathForZeroTierTraffic(path->localAddress(),path->address()))) {
|
||||
if ( (!pathIsConfirmed) && (RR->node->shouldUsePathForZeroTierTraffic(path->localAddress(),path->address())) ) {
|
||||
if (verb == Packet::VERB_OK) {
|
||||
Mutex::Lock _l(_paths_m);
|
||||
|
||||
unsigned int slot = 0;
|
||||
unsigned int slot;
|
||||
if (_numPaths < ZT_MAX_PEER_NETWORK_PATHS) {
|
||||
slot = _numPaths++;
|
||||
} else {
|
||||
uint64_t oldest = 0ULL;
|
||||
unsigned int oldestPath = 0;
|
||||
// First try to replace the worst within the same address family, if possible
|
||||
int worstSlot = -1;
|
||||
uint64_t worstScore = 0xffffffffffffffffULL;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if (_paths[p].lastReceive < oldest) {
|
||||
oldest = _paths[p].lastReceive;
|
||||
oldestPath = p;
|
||||
if (_paths[p].path->address().ss_family == path->address().ss_family) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if (s < worstScore) {
|
||||
worstScore = s;
|
||||
worstSlot = (int)p;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (worstSlot >= 0) {
|
||||
slot = (unsigned int)worstSlot;
|
||||
} else {
|
||||
slot = ZT_MAX_PEER_NETWORK_PATHS - 1;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if (s < worstScore) {
|
||||
worstScore = s;
|
||||
slot = p;
|
||||
}
|
||||
}
|
||||
}
|
||||
slot = oldestPath;
|
||||
}
|
||||
|
||||
_paths[slot].path = path;
|
||||
_paths[slot].lastReceive = now;
|
||||
_paths[slot].path = path;
|
||||
#ifdef ZT_ENABLE_CLUSTER
|
||||
_paths[slot].clusterSuboptimal = suboptimalPath;
|
||||
_paths[slot].clusterWeights = (unsigned int)(!suboptimalPath);
|
||||
if (RR->cluster)
|
||||
RR->cluster->broadcastHavePeer(_id);
|
||||
#else
|
||||
_paths[slot].clusterSuboptimal = false;
|
||||
_paths[slot].clusterWeights = 1;
|
||||
#endif
|
||||
} else {
|
||||
|
||||
TRACE("got %s via unknown path %s(%s), confirming...",Packet::verbString(verb),_id.address().toString().c_str(),remoteAddr.toString().c_str());
|
||||
TRACE("got %s via unknown path %s(%s), confirming...",Packet::verbString(verb),_id.address().toString().c_str(),path->address().toString().c_str());
|
||||
|
||||
if ( (_vProto >= 5) && ( !((_vMajor == 1)&&(_vMinor == 1)&&(_vRevision == 0)) ) ) {
|
||||
// Newer than 1.1.0 can use ECHO, which is smaller
|
||||
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ECHO);
|
||||
outp.armor(_key,true);
|
||||
path->send(RR,outp.data(),outp.size(),now);
|
||||
} else {
|
||||
// For backward compatibility we send HELLO to ancient nodes
|
||||
sendHELLO(path->localAddress(),path->address(),now);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
} else if (trustEstablished) {
|
||||
// Send PUSH_DIRECT_PATHS if hops>0 (relayed) and we have a trust relationship (common network membership)
|
||||
_pushDirectPaths(path,now);
|
||||
}
|
||||
|
||||
@ -201,17 +218,19 @@ void Peer::setClusterOptimal(const InetAddress &addr)
|
||||
{
|
||||
Mutex::Lock _l(_paths_m);
|
||||
|
||||
int have = -1;
|
||||
int opt = -1;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if (_paths[p].path->address() == addr) {
|
||||
have = (int)p;
|
||||
opt = (int)p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (have >= 0) {
|
||||
for(unsigned int p=0;p<_numPaths;++p)
|
||||
_paths[p].clusterSuboptimal = (p != have);
|
||||
if (opt >= 0) { // only change anything if we have the optimal path
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if (_paths[p].path->address().ss_family == addr.ss_family)
|
||||
_paths[p].clusterWeights = ((int)p == opt) ? 2 : 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -282,10 +301,12 @@ bool Peer::doPingAndKeepalive(uint64_t now,int inetAddressFamily)
|
||||
int bestp = -1;
|
||||
uint64_t best = 0ULL;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if (s >= best) {
|
||||
best = s;
|
||||
bestp = (int)p;
|
||||
if ((inetAddressFamily < 0)||((int)_paths[p].path->address().ss_family == inetAddressFamily)) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if (s >= best) {
|
||||
best = s;
|
||||
bestp = (int)p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -325,11 +346,9 @@ bool Peer::resetWithinScope(InetAddress::IpScope scope,uint64_t now)
|
||||
sendHELLO(_paths[x].path->localAddress(),_paths[x].path->address(),now);
|
||||
} else {
|
||||
if (x != y) {
|
||||
_paths[y].path = _paths[x].path;
|
||||
_paths[y].lastReceive = _paths[x].lastReceive;
|
||||
#ifdef ZT_ENABLE_CLUSTER
|
||||
_paths[y].clusterSuboptimal = _paths[x].clusterSuboptimal;
|
||||
#endif
|
||||
_paths[y].path = _paths[x].path;
|
||||
_paths[y].clusterWeights = _paths[x].clusterWeights;
|
||||
}
|
||||
++y;
|
||||
}
|
||||
@ -376,11 +395,9 @@ void Peer::clean(uint64_t now)
|
||||
while (x < np) {
|
||||
if ((now - _paths[x].lastReceive) <= ZT_PEER_PATH_EXPIRATION) {
|
||||
if (y != x) {
|
||||
_paths[y].path = _paths[x].path;
|
||||
_paths[y].lastReceive = _paths[x].lastReceive;
|
||||
#ifdef ZT_ENABLE_CLUSTER
|
||||
_paths[y].clusterSuboptimal = _paths[x].clusterSuboptimal;
|
||||
#endif
|
||||
_paths[y].path = _paths[x].path;
|
||||
_paths[y].clusterWeights = _paths[x].clusterWeights;
|
||||
}
|
||||
++y;
|
||||
}
|
||||
|
@ -121,7 +121,9 @@ public:
|
||||
bool hasActivePathTo(uint64_t now,const InetAddress &addr) const;
|
||||
|
||||
/**
|
||||
* If we have a confirmed path to this address, mark others as cluster suboptimal
|
||||
* Set which known path for an address family is optimal
|
||||
*
|
||||
* This only modifies paths within the same address family
|
||||
*
|
||||
* @param addr Address to make exclusive
|
||||
*/
|
||||
@ -161,8 +163,8 @@ public:
|
||||
* Send pings or keepalives depending on configured timeouts
|
||||
*
|
||||
* @param now Current time
|
||||
* @param inetAddressFamily Keep this address family alive, or 0 to simply pick current best ignoring family
|
||||
* @return True if we have at least one direct path
|
||||
* @param inetAddressFamily Keep this address family alive, or -1 for any
|
||||
* @return True if we have at least one direct path of the given family (or any if family is -1)
|
||||
*/
|
||||
bool doPingAndKeepalive(uint64_t now,int inetAddressFamily);
|
||||
|
||||
@ -285,7 +287,7 @@ public:
|
||||
inline bool hasClusterOptimalPath(uint64_t now) const
|
||||
{
|
||||
for(unsigned int p=0,np=_numPaths;p<np;++p) {
|
||||
if ( (_paths[p].path->alive(now)) && (!_paths[p].clusterSuboptimal) )
|
||||
if ( (_paths[p].path->alive(now)) && ((_paths[p].clusterWeights & 1) != 0) )
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -365,7 +367,9 @@ private:
|
||||
|
||||
inline uint64_t _pathScore(const unsigned int p) const
|
||||
{
|
||||
return ( (_paths[p].path->lastIn() + (_paths[p].path->preferenceRank() * (ZT_PEER_PING_PERIOD / ZT_PATH_MAX_PREFERENCE_RANK))) - ((ZT_PEER_PING_PERIOD * 10) * (uint64_t)_paths[p].clusterSuboptimal) );
|
||||
return ( _paths[p].lastReceive +
|
||||
(uint64_t)(_paths[p].path->preferenceRank() * (ZT_PEER_PING_PERIOD / ZT_PATH_MAX_PREFERENCE_RANK)) +
|
||||
(uint64_t)(_paths[p].clusterWeights * ZT_PEER_PING_PERIOD) );
|
||||
}
|
||||
|
||||
unsigned char _key[ZT_PEER_SECRET_KEY_LENGTH];
|
||||
@ -384,9 +388,9 @@ private:
|
||||
uint16_t _vRevision;
|
||||
Identity _id;
|
||||
struct {
|
||||
SharedPtr<Path> path;
|
||||
uint64_t lastReceive;
|
||||
bool clusterSuboptimal;
|
||||
SharedPtr<Path> path;
|
||||
unsigned int clusterWeights;
|
||||
} _paths[ZT_MAX_PEER_NETWORK_PATHS];
|
||||
Mutex _paths_m;
|
||||
unsigned int _numPaths;
|
||||
|
@ -292,6 +292,7 @@ unsigned int Utils::snprintf(char *buf,unsigned int len,const char *fmt,...)
|
||||
if ((n >= (int)len)||(n < 0)) {
|
||||
if (len)
|
||||
buf[len - 1] = (char)0;
|
||||
abort();
|
||||
throw std::length_error("buf[] overflow in Utils::snprintf");
|
||||
}
|
||||
|
||||
|
10
one.cpp
10
one.cpp
@ -330,12 +330,12 @@ static int cli(int argc,char **argv)
|
||||
out << "200 listpeers <ztaddr> <path> <latency> <version> <role>" << ZT_EOL_S;
|
||||
if (j.is_array()) {
|
||||
for(unsigned long k=0;k<j.size();++k) {
|
||||
auto p = j[k];
|
||||
auto &p = j[k];
|
||||
std::string bestPath;
|
||||
auto paths = p["paths"];
|
||||
if (paths.is_array()) {
|
||||
for(unsigned long i=0;i<paths.size();++i) {
|
||||
auto path = paths[i];
|
||||
auto &path = paths[i];
|
||||
if (path["preferred"]) {
|
||||
char tmp[256];
|
||||
std::string addr = path["address"];
|
||||
@ -389,13 +389,13 @@ static int cli(int argc,char **argv)
|
||||
out << "200 listnetworks <nwid> <name> <mac> <status> <type> <dev> <ZT assigned ips>" << ZT_EOL_S;
|
||||
if (j.is_array()) {
|
||||
for(unsigned long i=0;i<j.size();++i) {
|
||||
auto n = j[i];
|
||||
auto &n = j[i];
|
||||
if (n.is_object()) {
|
||||
std::string aa;
|
||||
auto assignedAddresses = n["assignedAddresses"];
|
||||
auto &assignedAddresses = n["assignedAddresses"];
|
||||
if (assignedAddresses.is_array()) {
|
||||
for(unsigned long j=0;j<assignedAddresses.size();++j) {
|
||||
auto addr = assignedAddresses[j];
|
||||
auto &addr = assignedAddresses[j];
|
||||
if (addr.is_string()) {
|
||||
if (aa.length() > 0) aa.push_back(',');
|
||||
aa.append(addr);
|
||||
|
@ -165,7 +165,7 @@ static void _jsonAppend(unsigned int depth,std::string &buf,const ZT_VirtualNetw
|
||||
|
||||
static std::string _jsonEnumerate(unsigned int depth,const ZT_PeerPhysicalPath *pp,unsigned int count)
|
||||
{
|
||||
char json[1024];
|
||||
char json[2048];
|
||||
char prefix[32];
|
||||
|
||||
if (depth >= sizeof(prefix)) // sanity check -- shouldn't be possible
|
||||
@ -183,14 +183,14 @@ static std::string _jsonEnumerate(unsigned int depth,const ZT_PeerPhysicalPath *
|
||||
"%s\t\"address\": \"%s\",\n"
|
||||
"%s\t\"lastSend\": %llu,\n"
|
||||
"%s\t\"lastReceive\": %llu,\n"
|
||||
"%s\t\"active\": %s,\n"
|
||||
"%s\t\"active\": true,\n"
|
||||
"%s\t\"preferred\": %s,\n"
|
||||
"%s\t\"trustedPathId\": %llu\n"
|
||||
"%s}",
|
||||
prefix,_jsonEscape(reinterpret_cast<const InetAddress *>(&(pp[i].address))->toString()).c_str(),
|
||||
prefix,pp[i].lastSend,
|
||||
prefix,pp[i].lastReceive,
|
||||
prefix,(pp[i].active == 0) ? "false" : "true",
|
||||
prefix,
|
||||
prefix,(pp[i].preferred == 0) ? "false" : "true",
|
||||
prefix,pp[i].trustedPathId,
|
||||
prefix);
|
||||
@ -201,7 +201,7 @@ static std::string _jsonEnumerate(unsigned int depth,const ZT_PeerPhysicalPath *
|
||||
|
||||
static void _jsonAppend(unsigned int depth,std::string &buf,const ZT_Peer *peer)
|
||||
{
|
||||
char json[1024];
|
||||
char json[2048];
|
||||
char prefix[32];
|
||||
|
||||
if (depth >= sizeof(prefix)) // sanity check -- shouldn't be possible
|
||||
|
Loading…
Reference in New Issue
Block a user