mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2024-12-24 15:16:40 +00:00
Merge branch 'dev' of http://git.int.zerotier.com/ZeroTier/ZeroTierOne into dev
This commit is contained in:
commit
5ed5b22525
@ -654,16 +654,16 @@ NetworkController::ResultCode EmbeddedNetworkController::doNetworkConfigRequest(
|
||||
// for both.) This is computed by reference to the last time we deauthorized
|
||||
// a member, since within the time period since this event any temporal
|
||||
// differences are not particularly relevant.
|
||||
uint64_t credentialTtl = ZT_NETWORKCONFIG_DEFAULT_MIN_CREDENTIAL_TTL;
|
||||
uint64_t credentialtmd = ZT_NETWORKCONFIG_DEFAULT_CREDENTIAL_TIME_MIN_MAX_DELTA;
|
||||
if (now > nmi.mostRecentDeauthTime)
|
||||
credentialTtl += (now - nmi.mostRecentDeauthTime);
|
||||
if (credentialTtl > ZT_NETWORKCONFIG_DEFAULT_MAX_CREDENTIAL_TTL)
|
||||
credentialTtl = ZT_NETWORKCONFIG_DEFAULT_MAX_CREDENTIAL_TTL;
|
||||
credentialtmd += (now - nmi.mostRecentDeauthTime);
|
||||
if (credentialtmd > ZT_NETWORKCONFIG_DEFAULT_CREDENTIAL_TIME_MAX_MAX_DELTA)
|
||||
credentialtmd = ZT_NETWORKCONFIG_DEFAULT_CREDENTIAL_TIME_MAX_MAX_DELTA;
|
||||
|
||||
nc.networkId = nwid;
|
||||
nc.type = _jB(network["private"],true) ? ZT_NETWORK_TYPE_PRIVATE : ZT_NETWORK_TYPE_PUBLIC;
|
||||
nc.timestamp = now;
|
||||
nc.credentialTimeToLive = credentialTtl;
|
||||
nc.credentialTimeMaxDelta = credentialtmd;
|
||||
nc.revision = _jI(network["revision"],0ULL);
|
||||
nc.issuedTo = identity.address();
|
||||
if (_jB(network["enableBroadcast"],true)) nc.flags |= ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST;
|
||||
@ -925,7 +925,7 @@ NetworkController::ResultCode EmbeddedNetworkController::doNetworkConfigRequest(
|
||||
}
|
||||
|
||||
if (_jB(network["private"],true)) {
|
||||
CertificateOfMembership com(now,credentialTtl,nwid,identity.address());
|
||||
CertificateOfMembership com(now,credentialtmd,nwid,identity.address());
|
||||
if (com.sign(signingId)) {
|
||||
nc.com = com;
|
||||
} else {
|
||||
|
@ -1053,6 +1053,11 @@ typedef struct
|
||||
*/
|
||||
uint64_t trustedPathId;
|
||||
|
||||
/**
|
||||
* Is path expired?
|
||||
*/
|
||||
int expired;
|
||||
|
||||
/**
|
||||
* Is path preferred?
|
||||
*/
|
||||
|
@ -88,7 +88,7 @@ ifeq ($(ZT_DEBUG),1)
|
||||
DEFS+=-DZT_TRACE
|
||||
override CFLAGS+=-Wall -g -O -pthread $(INCLUDES) $(DEFS)
|
||||
override CXXFLAGS+=-Wall -g -O -std=c++11 -pthread $(INCLUDES) $(DEFS)
|
||||
LDFLAGS=
|
||||
override LDFLAGS+=
|
||||
STRIP?=echo
|
||||
# The following line enables optimization for the crypto code, since
|
||||
# C25519 in particular is almost UNUSABLE in -O0 even on a 3ghz box!
|
||||
@ -98,7 +98,7 @@ else
|
||||
override CFLAGS+=-Wall -fPIE -pthread $(INCLUDES) -DNDEBUG $(DEFS)
|
||||
CXXFLAGS?=-O3 -fstack-protector
|
||||
override CXXFLAGS+=-Wall -Wno-unused-result -Wreorder -fPIE -std=c++11 -pthread $(INCLUDES) -DNDEBUG $(DEFS)
|
||||
LDFLAGS=-pie -Wl,-z,relro,-z,now
|
||||
override LDFLAGS+=-pie -Wl,-z,relro,-z,now
|
||||
STRIP?=strip
|
||||
STRIP+=--strip-all
|
||||
endif
|
||||
|
@ -179,16 +179,16 @@
|
||||
*/
|
||||
#define ZT_PEER_SECRET_KEY_LENGTH 32
|
||||
|
||||
/**
|
||||
* Minimum delay between timer task checks to prevent thrashing
|
||||
*/
|
||||
#define ZT_CORE_TIMER_TASK_GRANULARITY 500
|
||||
|
||||
/**
|
||||
* How often Topology::clean() and Network::clean() and similar are called, in ms
|
||||
*/
|
||||
#define ZT_HOUSEKEEPING_PERIOD 120000
|
||||
|
||||
/**
|
||||
* Overriding granularity for timer tasks to prevent CPU-intensive thrashing on every packet
|
||||
*/
|
||||
#define ZT_CORE_TIMER_TASK_GRANULARITY 500
|
||||
|
||||
/**
|
||||
* How long to remember peer records in RAM if they haven't been used
|
||||
*/
|
||||
@ -226,6 +226,11 @@
|
||||
*/
|
||||
#define ZT_MULTICAST_LIKE_EXPIRE 600000
|
||||
|
||||
/**
|
||||
* Period for multicast LIKE announcements
|
||||
*/
|
||||
#define ZT_MULTICAST_ANNOUNCE_PERIOD 120000
|
||||
|
||||
/**
|
||||
* Delay between explicit MULTICAST_GATHER requests for a given multicast channel
|
||||
*/
|
||||
@ -239,22 +244,24 @@
|
||||
#define ZT_MULTICAST_TRANSMIT_TIMEOUT 5000
|
||||
|
||||
/**
|
||||
* Delay between scans of the topology active peer DB for peers that need ping
|
||||
*
|
||||
* This is also how often pings will be retried to upstream peers (relays, roots)
|
||||
* constantly until something is heard.
|
||||
* Delay between checks of peer pings, etc., and also related housekeeping tasks
|
||||
*/
|
||||
#define ZT_PING_CHECK_INVERVAL 9000
|
||||
#define ZT_PING_CHECK_INVERVAL 5000
|
||||
|
||||
/**
|
||||
* How frequently to send heartbeats over in-use paths
|
||||
*/
|
||||
#define ZT_PATH_HEARTBEAT_PERIOD 15000
|
||||
#define ZT_PATH_HEARTBEAT_PERIOD 10000
|
||||
|
||||
/**
|
||||
* Paths are considered inactive if they have not received traffic in this long
|
||||
*/
|
||||
#define ZT_PATH_ALIVE_TIMEOUT 35000
|
||||
#define ZT_PATH_ALIVE_TIMEOUT 25000
|
||||
|
||||
/**
|
||||
* Minimum time between attempts to check dead paths to see if they can be re-awakened
|
||||
*/
|
||||
#define ZT_PATH_MIN_REACTIVATE_INTERVAL 2500
|
||||
|
||||
/**
|
||||
* Delay between full-fledge pings of directly connected peers
|
||||
@ -262,10 +269,15 @@
|
||||
#define ZT_PEER_PING_PERIOD 60000
|
||||
|
||||
/**
|
||||
* Peers forget paths that have not spoken in this long
|
||||
* Paths are considered expired if they have not produced a real packet in this long
|
||||
*/
|
||||
#define ZT_PEER_PATH_EXPIRATION ((ZT_PEER_PING_PERIOD * 4) + 3000)
|
||||
|
||||
/**
|
||||
* How often to retry expired paths that we're still remembering
|
||||
*/
|
||||
#define ZT_PEER_EXPIRED_PATH_TRIAL_PERIOD (ZT_PEER_PING_PERIOD * 10)
|
||||
|
||||
/**
|
||||
* Timeout for overall peer activity (measured from last receive)
|
||||
*/
|
||||
@ -288,14 +300,6 @@
|
||||
*/
|
||||
#define ZT_MIN_UNITE_INTERVAL 30000
|
||||
|
||||
/**
|
||||
* Delay between initial direct NAT-t packet and more aggressive techniques
|
||||
*
|
||||
* This may also be a delay before sending the first packet if we determine
|
||||
* that we should wait for the remote to initiate rendezvous first.
|
||||
*/
|
||||
#define ZT_NAT_T_TACTICAL_ESCALATION_DELAY 1000
|
||||
|
||||
/**
|
||||
* Sanity limit on maximum bridge routes
|
||||
*
|
||||
|
@ -526,7 +526,7 @@ bool IncomingPacket::_doRENDEZVOUS(const RuntimeEnvironment *RR,const SharedPtr<
|
||||
TRACE("RENDEZVOUS from %s says %s might be at %s, ignoring since peer is not upstream",peer->address().toString().c_str(),with.toString().c_str(),atAddr.toString().c_str());
|
||||
} else if (RR->node->shouldUsePathForZeroTierTraffic(_path->localAddress(),atAddr)) {
|
||||
RR->node->putPacket(_path->localAddress(),atAddr,"ABRE",4,2); // send low-TTL junk packet to 'open' local NAT(s) and stateful firewalls
|
||||
rendezvousWith->sendHELLO(_path->localAddress(),atAddr,RR->node->now());
|
||||
rendezvousWith->attemptToContactAt(_path->localAddress(),atAddr,RR->node->now());
|
||||
TRACE("RENDEZVOUS from %s says %s might be at %s, sent verification attempt",peer->address().toString().c_str(),with.toString().c_str(),atAddr.toString().c_str());
|
||||
} else {
|
||||
TRACE("RENDEZVOUS from %s says %s might be at %s, ignoring since path is not suitable",peer->address().toString().c_str(),with.toString().c_str(),atAddr.toString().c_str());
|
||||
@ -547,25 +547,27 @@ bool IncomingPacket::_doRENDEZVOUS(const RuntimeEnvironment *RR,const SharedPtr<
|
||||
bool IncomingPacket::_doFRAME(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer)
|
||||
{
|
||||
try {
|
||||
const SharedPtr<Network> network(RR->node->network(at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID)));
|
||||
const uint64_t nwid = at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID);
|
||||
const SharedPtr<Network> network(RR->node->network(nwid));
|
||||
bool approved = false;
|
||||
if (network) {
|
||||
if (size() > ZT_PROTO_VERB_FRAME_IDX_PAYLOAD) {
|
||||
if (!network->isAllowed(peer)) {
|
||||
TRACE("dropped FRAME from %s(%s): not a member of private network %.16llx",peer->address().toString().c_str(),_path->address().toString().c_str(),(unsigned long long)network->id());
|
||||
peer->received(_path,hops(),packetId(),Packet::VERB_FRAME,0,Packet::VERB_NOP,false);
|
||||
} else {
|
||||
const unsigned int etherType = at<uint16_t>(ZT_PROTO_VERB_FRAME_IDX_ETHERTYPE);
|
||||
const MAC sourceMac(peer->address(),network->id());
|
||||
const MAC sourceMac(peer->address(),nwid);
|
||||
const unsigned int frameLen = size() - ZT_PROTO_VERB_FRAME_IDX_PAYLOAD;
|
||||
const uint8_t *const frameData = reinterpret_cast<const uint8_t *>(data()) + ZT_PROTO_VERB_FRAME_IDX_PAYLOAD;
|
||||
if (network->filterIncomingPacket(peer,RR->identity.address(),sourceMac,network->mac(),frameData,frameLen,etherType,0) > 0)
|
||||
RR->node->putFrame(network->id(),network->userPtr(),sourceMac,network->mac(),etherType,0,(const void *)frameData,frameLen);
|
||||
peer->received(_path,hops(),packetId(),Packet::VERB_FRAME,0,Packet::VERB_NOP,true);
|
||||
RR->node->putFrame(nwid,network->userPtr(),sourceMac,network->mac(),etherType,0,(const void *)frameData,frameLen);
|
||||
approved = true; // this means approved on the network in general, not this packet per se
|
||||
}
|
||||
}
|
||||
} else {
|
||||
TRACE("dropped FRAME from %s(%s): we are not connected to network %.16llx",source().toString().c_str(),_path->address().toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
|
||||
TRACE("dropped FRAME from %s(%s): we are not a member of network %.16llx",source().toString().c_str(),_path->address().toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
|
||||
}
|
||||
peer->received(_path,hops(),packetId(),Packet::VERB_FRAME,0,Packet::VERB_NOP,approved);
|
||||
} catch ( ... ) {
|
||||
TRACE("dropped FRAME from %s(%s): unexpected exception",source().toString().c_str(),_path->address().toString().c_str());
|
||||
}
|
||||
@ -575,7 +577,8 @@ bool IncomingPacket::_doFRAME(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer)
|
||||
{
|
||||
try {
|
||||
SharedPtr<Network> network(RR->node->network(at<uint64_t>(ZT_PROTO_VERB_EXT_FRAME_IDX_NETWORK_ID)));
|
||||
const uint64_t nwid = at<uint64_t>(ZT_PROTO_VERB_EXT_FRAME_IDX_NETWORK_ID);
|
||||
const SharedPtr<Network> network(RR->node->network(nwid));
|
||||
if (network) {
|
||||
if (size() > ZT_PROTO_VERB_EXT_FRAME_IDX_PAYLOAD) {
|
||||
const unsigned int flags = (*this)[ZT_PROTO_VERB_EXT_FRAME_IDX_FLAGS];
|
||||
@ -608,7 +611,7 @@ bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<P
|
||||
|
||||
switch (network->filterIncomingPacket(peer,RR->identity.address(),from,to,frameData,frameLen,etherType,0)) {
|
||||
case 1:
|
||||
if (from != MAC(peer->address(),network->id())) {
|
||||
if (from != MAC(peer->address(),nwid)) {
|
||||
if (network->config().permitsBridging(peer->address())) {
|
||||
network->learnBridgeRoute(from,peer->address());
|
||||
} else {
|
||||
@ -625,7 +628,7 @@ bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<P
|
||||
}
|
||||
// fall through -- 2 means accept regardless of bridging checks or other restrictions
|
||||
case 2:
|
||||
RR->node->putFrame(network->id(),network->userPtr(),from,to,etherType,0,(const void *)frameData,frameLen);
|
||||
RR->node->putFrame(nwid,network->userPtr(),from,to,etherType,0,(const void *)frameData,frameLen);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -633,6 +636,7 @@ bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<P
|
||||
}
|
||||
} else {
|
||||
TRACE("dropped EXT_FRAME from %s(%s): we are not connected to network %.16llx",source().toString().c_str(),_path->address().toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
|
||||
peer->received(_path,hops(),packetId(),Packet::VERB_EXT_FRAME,0,Packet::VERB_NOP,false);
|
||||
}
|
||||
} catch ( ... ) {
|
||||
TRACE("dropped EXT_FRAME from %s(%s): unexpected exception",source().toString().c_str(),_path->address().toString().c_str());
|
||||
@ -968,7 +972,7 @@ bool IncomingPacket::_doMULTICAST_FRAME(const RuntimeEnvironment *RR,const Share
|
||||
return true;
|
||||
}
|
||||
|
||||
if (from != MAC(peer->address(),network->id())) {
|
||||
if (from != MAC(peer->address(),nwid)) {
|
||||
if (network->config().permitsBridging(peer->address())) {
|
||||
network->learnBridgeRoute(from,peer->address());
|
||||
} else {
|
||||
@ -980,7 +984,7 @@ bool IncomingPacket::_doMULTICAST_FRAME(const RuntimeEnvironment *RR,const Share
|
||||
|
||||
const uint8_t *const frameData = (const uint8_t *)field(offset + ZT_PROTO_VERB_MULTICAST_FRAME_IDX_FRAME,frameLen);
|
||||
if (network->filterIncomingPacket(peer,RR->identity.address(),from,to.mac(),frameData,frameLen,etherType,0) > 0) {
|
||||
RR->node->putFrame(network->id(),network->userPtr(),from,to.mac(),etherType,0,(const void *)frameData,frameLen);
|
||||
RR->node->putFrame(nwid,network->userPtr(),from,to.mac(),etherType,0,(const void *)frameData,frameLen);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1050,7 +1054,7 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
|
||||
if ( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_FORGET_PATH) == 0) && (!redundant) && (RR->node->shouldUsePathForZeroTierTraffic(_path->localAddress(),a)) ) {
|
||||
if (++countPerScope[(int)a.ipScope()][0] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
|
||||
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
|
||||
peer->sendHELLO(InetAddress(),a,now);
|
||||
peer->attemptToContactAt(InetAddress(),a,now);
|
||||
} else {
|
||||
TRACE("ignoring contact for %s at %s -- too many per scope",peer->address().toString().c_str(),a.toString().c_str());
|
||||
}
|
||||
@ -1069,7 +1073,7 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
|
||||
if ( ((flags & ZT_PUSH_DIRECT_PATHS_FLAG_FORGET_PATH) == 0) && (!redundant) && (RR->node->shouldUsePathForZeroTierTraffic(_path->localAddress(),a)) ) {
|
||||
if (++countPerScope[(int)a.ipScope()][1] <= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY) {
|
||||
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
|
||||
peer->sendHELLO(InetAddress(),a,now);
|
||||
peer->attemptToContactAt(InetAddress(),a,now);
|
||||
} else {
|
||||
TRACE("ignoring contact for %s at %s -- too many per scope",peer->address().toString().c_str(),a.toString().c_str());
|
||||
}
|
||||
@ -1163,7 +1167,7 @@ bool IncomingPacket::_doCIRCUIT_TEST(const RuntimeEnvironment *RR,const SharedPt
|
||||
remainingHopsPtr += ZT_ADDRESS_LENGTH;
|
||||
SharedPtr<Peer> nhp(RR->topology->getPeer(nextHop[h]));
|
||||
if (nhp) {
|
||||
SharedPtr<Path> nhbp(nhp->getBestPath(now));
|
||||
SharedPtr<Path> nhbp(nhp->getBestPath(now,false));
|
||||
if ((nhbp)&&(nhbp->alive(now)))
|
||||
nextHopBestPathAddress[h] = nhbp->address();
|
||||
}
|
||||
@ -1261,6 +1265,7 @@ bool IncomingPacket::_doCIRCUIT_TEST_REPORT(const RuntimeEnvironment *RR,const S
|
||||
}
|
||||
|
||||
RR->node->postCircuitTestReport(&report);
|
||||
|
||||
peer->received(_path,hops(),packetId(),Packet::VERB_CIRCUIT_TEST_REPORT,0,Packet::VERB_NOP,false);
|
||||
} catch ( ... ) {
|
||||
TRACE("dropped CIRCUIT_TEST_REPORT from %s(%s): unexpected exception",source().toString().c_str(),_path->address().toString().c_str());
|
||||
|
@ -95,7 +95,9 @@ int Membership::addCredential(const RuntimeEnvironment *RR,const CertificateOfMe
|
||||
TRACE("addCredential(CertificateOfMembership) for %s on %.16llx ACCEPTED (redundant)",com.issuedTo().toString().c_str(),com.networkId());
|
||||
return 0;
|
||||
}
|
||||
|
||||
const int vr = com.verify(RR);
|
||||
|
||||
if (vr == 0) {
|
||||
TRACE("addCredential(CertificateOfMembership) for %s on %.16llx ACCEPTED (new)",com.issuedTo().toString().c_str(),com.networkId());
|
||||
if (com.timestamp().first > _com.timestamp().first) {
|
||||
@ -104,6 +106,7 @@ int Membership::addCredential(const RuntimeEnvironment *RR,const CertificateOfMe
|
||||
} else {
|
||||
TRACE("addCredential(CertificateOfMembership) for %s on %.16llx REJECTED (%d)",com.issuedTo().toString().c_str(),com.networkId(),vr);
|
||||
}
|
||||
|
||||
return vr;
|
||||
}
|
||||
|
||||
|
@ -31,15 +31,10 @@
|
||||
#include "Hashtable.hpp"
|
||||
#include "NetworkConfig.hpp"
|
||||
|
||||
// Expiration time for capability and tag cache
|
||||
#define ZT_MEMBERSHIP_STATE_EXPIRATION_TIME 600000
|
||||
|
||||
// Expiration time for Memberships (used in Peer::clean())
|
||||
#define ZT_MEMBERSHIP_EXPIRATION_TIME (ZT_MEMBERSHIP_STATE_EXPIRATION_TIME * 2)
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
class RuntimeEnvironment;
|
||||
class Network;
|
||||
|
||||
/**
|
||||
* A container for certificates of membership and other network credentials
|
||||
@ -107,6 +102,7 @@ public:
|
||||
friend class CapabilityIterator;
|
||||
|
||||
Membership() :
|
||||
_lastUpdatedMulticast(0),
|
||||
_lastPushAttempt(0),
|
||||
_lastPushedCom(0),
|
||||
_blacklistBefore(0),
|
||||
@ -130,6 +126,21 @@ public:
|
||||
*/
|
||||
void sendCredentialsIfNeeded(const RuntimeEnvironment *RR,const uint64_t now,const Address &peerAddress,const NetworkConfig &nconf,const Capability *cap);
|
||||
|
||||
/**
|
||||
* Check whether we should push MULTICAST_LIKEs to this peer
|
||||
*
|
||||
* @param now Current time
|
||||
* @return True if we should update multicasts
|
||||
*/
|
||||
inline bool shouldLikeMulticasts(const uint64_t now) const { return ((now - _lastUpdatedMulticast) >= ZT_MULTICAST_ANNOUNCE_PERIOD); }
|
||||
|
||||
/**
|
||||
* Set time we last updated multicasts for this peer
|
||||
*
|
||||
* @param now Current time
|
||||
*/
|
||||
inline void likingMulticasts(const uint64_t now) { _lastUpdatedMulticast = now; }
|
||||
|
||||
/**
|
||||
* @param nconf Our network config
|
||||
* @return True if this peer is allowed on this network at all
|
||||
@ -144,7 +155,7 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether a capability or tag is expired
|
||||
* Check whether a capability or tag is within its max delta from the timestamp of our network config and newer than any blacklist cutoff time
|
||||
*
|
||||
* @param cred Credential to check -- must have timestamp() accessor method
|
||||
* @return True if credential is NOT expired
|
||||
@ -153,7 +164,8 @@ public:
|
||||
inline bool isCredentialTimestampValid(const NetworkConfig &nconf,const C &cred) const
|
||||
{
|
||||
const uint64_t ts = cred.timestamp();
|
||||
return ( ( (ts >= nconf.timestamp) || ((nconf.timestamp - ts) <= nconf.credentialTimeToLive) ) && (ts > _blacklistBefore) );
|
||||
const uint64_t delta = (ts >= nconf.timestamp) ? (ts - nconf.timestamp) : (nconf.timestamp - ts);
|
||||
return ((delta <= nconf.credentialTimeMaxDelta)&&(ts > _blacklistBefore));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -205,6 +217,8 @@ public:
|
||||
/**
|
||||
* Validate and add a credential if signature is okay and it's otherwise good
|
||||
*
|
||||
* @param RR Runtime environment
|
||||
* @param com Certificate of membership
|
||||
* @return 0 == OK, 1 == waiting for WHOIS, -1 == BAD signature or credential
|
||||
*/
|
||||
int addCredential(const RuntimeEnvironment *RR,const CertificateOfMembership &com);
|
||||
@ -236,20 +250,15 @@ public:
|
||||
/**
|
||||
* Clean up old or stale entries
|
||||
*
|
||||
* @return Time of most recent activity in this Membership
|
||||
* @param nconf Network config
|
||||
*/
|
||||
inline uint64_t clean(const uint64_t now)
|
||||
inline void clean(const NetworkConfig &nconf)
|
||||
{
|
||||
uint64_t lastAct = _lastPushedCom;
|
||||
|
||||
for(std::map<uint32_t,CState>::iterator i(_caps.begin());i!=_caps.end();) {
|
||||
const uint64_t la = std::max(i->second.lastPushed,i->second.lastReceived);
|
||||
if ((now - la) > ZT_MEMBERSHIP_STATE_EXPIRATION_TIME) {
|
||||
if (!isCredentialTimestampValid(nconf,i->second.cap)) {
|
||||
_caps.erase(i++);
|
||||
} else {
|
||||
++i;
|
||||
if (la > lastAct)
|
||||
lastAct = la;
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,17 +266,15 @@ public:
|
||||
TState *ts = (TState *)0;
|
||||
Hashtable<uint32_t,TState>::Iterator tsi(_tags);
|
||||
while (tsi.next(i,ts)) {
|
||||
const uint64_t la = std::max(ts->lastPushed,ts->lastReceived);
|
||||
if ((now - la) > ZT_MEMBERSHIP_STATE_EXPIRATION_TIME)
|
||||
if (!isCredentialTimestampValid(nconf,ts->tag))
|
||||
_tags.erase(*i);
|
||||
else if (la > lastAct)
|
||||
lastAct = la;
|
||||
}
|
||||
|
||||
return lastAct;
|
||||
}
|
||||
|
||||
private:
|
||||
// Last time we pushed MULTICAST_LIKE(s)
|
||||
uint64_t _lastUpdatedMulticast;
|
||||
|
||||
// Last time we checked if credential push was needed
|
||||
uint64_t _lastPushAttempt;
|
||||
|
||||
|
151
node/Network.cpp
151
node/Network.cpp
@ -577,6 +577,7 @@ Network::Network(const RuntimeEnvironment *renv,uint64_t nwid,void *uptr) :
|
||||
RR(renv),
|
||||
_uPtr(uptr),
|
||||
_id(nwid),
|
||||
_lastAnnouncedMulticastGroupsUpstream(0),
|
||||
_mac(renv->identity.address(),nwid),
|
||||
_portInitialized(false),
|
||||
_inboundConfigPacketId(0),
|
||||
@ -677,7 +678,7 @@ bool Network::filterOutgoingPacket(
|
||||
accept = true;
|
||||
|
||||
if ((!noTee)&&(cc2)) {
|
||||
_memberships[cc2].sendCredentialsIfNeeded(RR,RR->node->now(),cc2,_config,relevantCap);
|
||||
_membership(cc2).sendCredentialsIfNeeded(RR,RR->node->now(),cc2,_config,relevantCap);
|
||||
|
||||
Packet outp(cc2,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
||||
outp.append(_id);
|
||||
@ -709,7 +710,7 @@ bool Network::filterOutgoingPacket(
|
||||
|
||||
if (accept) {
|
||||
if ((!noTee)&&(cc)) {
|
||||
_memberships[cc].sendCredentialsIfNeeded(RR,RR->node->now(),cc,_config,relevantCap);
|
||||
_membership(cc).sendCredentialsIfNeeded(RR,RR->node->now(),cc,_config,relevantCap);
|
||||
|
||||
Packet outp(cc,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
||||
outp.append(_id);
|
||||
@ -723,7 +724,7 @@ bool Network::filterOutgoingPacket(
|
||||
}
|
||||
|
||||
if ((ztDest != ztDest2)&&(ztDest2)) {
|
||||
_memberships[ztDest2].sendCredentialsIfNeeded(RR,RR->node->now(),ztDest2,_config,relevantCap);
|
||||
_membership(ztDest2).sendCredentialsIfNeeded(RR,RR->node->now(),ztDest2,_config,relevantCap);
|
||||
|
||||
Packet outp(ztDest2,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
||||
outp.append(_id);
|
||||
@ -763,7 +764,7 @@ int Network::filterIncomingPacket(
|
||||
|
||||
Mutex::Lock _l(_lock);
|
||||
|
||||
Membership &m = _memberships[ztDest];
|
||||
Membership &m = _membership(ztDest);
|
||||
const unsigned int remoteTagCount = m.getAllTags(_config,remoteTagIds,remoteTagValues,ZT_MAX_NETWORK_TAGS);
|
||||
|
||||
switch (_doZtFilter(RR,_config,true,sourcePeer->address(),ztDest2,macSource,macDest,frameData,frameLen,etherType,vlanId,_config.rules,_config.ruleCount,_config.tags,_config.tagCount,remoteTagIds,remoteTagValues,remoteTagCount,cc,ccLength)) {
|
||||
@ -790,7 +791,7 @@ int Network::filterIncomingPacket(
|
||||
|
||||
if (accept) {
|
||||
if (cc2) {
|
||||
_memberships[cc2].sendCredentialsIfNeeded(RR,RR->node->now(),cc2,_config,(const Capability *)0);
|
||||
_membership(cc2).sendCredentialsIfNeeded(RR,RR->node->now(),cc2,_config,(const Capability *)0);
|
||||
|
||||
Packet outp(cc2,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
||||
outp.append(_id);
|
||||
@ -821,7 +822,7 @@ int Network::filterIncomingPacket(
|
||||
|
||||
if (accept) {
|
||||
if (cc) {
|
||||
_memberships[cc].sendCredentialsIfNeeded(RR,RR->node->now(),cc,_config,(const Capability *)0);
|
||||
_membership(cc).sendCredentialsIfNeeded(RR,RR->node->now(),cc,_config,(const Capability *)0);
|
||||
|
||||
Packet outp(cc,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
||||
outp.append(_id);
|
||||
@ -835,7 +836,7 @@ int Network::filterIncomingPacket(
|
||||
}
|
||||
|
||||
if ((ztDest != ztDest2)&&(ztDest2)) {
|
||||
_memberships[ztDest2].sendCredentialsIfNeeded(RR,RR->node->now(),ztDest2,_config,(const Capability *)0);
|
||||
_membership(ztDest2).sendCredentialsIfNeeded(RR,RR->node->now(),ztDest2,_config,(const Capability *)0);
|
||||
|
||||
Packet outp(ztDest2,RR->identity.address(),Packet::VERB_EXT_FRAME);
|
||||
outp.append(_id);
|
||||
@ -872,8 +873,8 @@ void Network::multicastSubscribe(const MulticastGroup &mg)
|
||||
return;
|
||||
_myMulticastGroups.push_back(mg);
|
||||
std::sort(_myMulticastGroups.begin(),_myMulticastGroups.end());
|
||||
_announceMulticastGroups(&mg);
|
||||
}
|
||||
_announceMulticastGroups();
|
||||
}
|
||||
|
||||
void Network::multicastUnsubscribe(const MulticastGroup &mg)
|
||||
@ -888,20 +889,6 @@ void Network::multicastUnsubscribe(const MulticastGroup &mg)
|
||||
_myMulticastGroups.swap(nmg);
|
||||
}
|
||||
|
||||
bool Network::tryAnnounceMulticastGroupsTo(const SharedPtr<Peer> &peer)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
if (
|
||||
(_isAllowed(peer)) ||
|
||||
(peer->address() == this->controller()) ||
|
||||
(RR->topology->isUpstream(peer->identity()))
|
||||
) {
|
||||
_announceMulticastGroupsTo(peer,_allMulticastGroups());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Network::applyConfiguration(const NetworkConfig &conf)
|
||||
{
|
||||
if (_destroyed) // sanity check
|
||||
@ -1094,8 +1081,9 @@ void Network::clean()
|
||||
Membership *m = (Membership *)0;
|
||||
Hashtable<Address,Membership>::Iterator i(_memberships);
|
||||
while (i.next(a,m)) {
|
||||
if ((now - m->clean(now)) > ZT_MEMBERSHIP_EXPIRATION_TIME)
|
||||
_memberships.erase(*a);
|
||||
if (RR->topology->getPeerNoCache(*a))
|
||||
m->clean(_config);
|
||||
else _memberships.erase(*a);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1143,7 +1131,7 @@ void Network::learnBridgedMulticastGroup(const MulticastGroup &mg,uint64_t now)
|
||||
const unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size();
|
||||
_multicastGroupsBehindMe.set(mg,now);
|
||||
if (tmp != _multicastGroupsBehindMe.size())
|
||||
_announceMulticastGroups();
|
||||
_announceMulticastGroups(&mg);
|
||||
}
|
||||
|
||||
void Network::destroy()
|
||||
@ -1223,61 +1211,74 @@ bool Network::_isAllowed(const SharedPtr<Peer> &peer) const
|
||||
return false;
|
||||
}
|
||||
|
||||
class _MulticastAnnounceAll
|
||||
void Network::_announceMulticastGroups(const MulticastGroup *const onlyThis)
|
||||
{
|
||||
public:
|
||||
_MulticastAnnounceAll(const RuntimeEnvironment *renv,Network *nw) :
|
||||
_now(renv->node->now()),
|
||||
_controller(nw->controller()),
|
||||
_network(nw),
|
||||
_anchors(nw->config().anchors()),
|
||||
_upstreamAddresses(renv->topology->upstreamAddresses())
|
||||
{}
|
||||
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
|
||||
// Assumes _lock is locked
|
||||
const uint64_t now = RR->node->now();
|
||||
|
||||
std::vector<MulticastGroup> groups;
|
||||
if (onlyThis)
|
||||
groups.push_back(*onlyThis);
|
||||
else groups = _allMulticastGroups();
|
||||
|
||||
if ((onlyThis)||((now - _lastAnnouncedMulticastGroupsUpstream) >= ZT_MULTICAST_ANNOUNCE_PERIOD)) {
|
||||
if (!onlyThis)
|
||||
_lastAnnouncedMulticastGroupsUpstream = now;
|
||||
|
||||
// Announce multicast groups to upstream peers (roots, etc.) and also send
|
||||
// them our COM so that MULTICAST_GATHER can be authenticated properly.
|
||||
const std::vector<Address> upstreams(RR->topology->upstreamAddresses());
|
||||
for(std::vector<Address>::const_iterator a(upstreams.begin());a!=upstreams.end();++a) {
|
||||
if ((_config.isPrivate())&&(_config.com)) {
|
||||
Packet outp(*a,RR->identity.address(),Packet::VERB_NETWORK_CREDENTIALS);
|
||||
_config.com.serialize(outp);
|
||||
outp.append((uint8_t)0x00);
|
||||
RR->sw->send(outp,true);
|
||||
}
|
||||
_announceMulticastGroupsTo(*a,groups);
|
||||
}
|
||||
|
||||
// Announce to controller, which does not need our COM since it obviously
|
||||
// knows if we are a member. Of course if we already did or are going to
|
||||
// below then we can skip it here.
|
||||
const Address c(controller());
|
||||
if ( (std::find(upstreams.begin(),upstreams.end(),c) == upstreams.end()) && (!_memberships.contains(c)) )
|
||||
_announceMulticastGroupsTo(c,groups);
|
||||
}
|
||||
|
||||
// Make sure that all "network anchors" have Membership records so we will
|
||||
// push multicasts to them. Note that _membership() also does this but in a
|
||||
// piecemeal on-demand fashion.
|
||||
const std::vector<Address> anchors(_config.anchors());
|
||||
for(std::vector<Address>::const_iterator a(anchors.begin());a!=anchors.end();++a)
|
||||
_memberships[*a];
|
||||
|
||||
// Send MULTICAST_LIKE(s) to all members of this network
|
||||
{
|
||||
if ( (_network->_isAllowed(p)) || // FIXME: this causes multicast LIKEs for public networks to get spammed, which isn't terrible but is a bit stupid
|
||||
(p->address() == _controller) ||
|
||||
(std::find(_upstreamAddresses.begin(),_upstreamAddresses.end(),p->address()) != _upstreamAddresses.end()) ||
|
||||
(std::find(_anchors.begin(),_anchors.end(),p->address()) != _anchors.end()) ) {
|
||||
peers.push_back(p);
|
||||
Address *a = (Address *)0;
|
||||
Membership *m = (Membership *)0;
|
||||
Hashtable<Address,Membership>::Iterator i(_memberships);
|
||||
while (i.next(a,m)) {
|
||||
if ((onlyThis)||(m->shouldLikeMulticasts(now))) {
|
||||
if (!onlyThis)
|
||||
m->likingMulticasts(now);
|
||||
m->sendCredentialsIfNeeded(RR,RR->node->now(),*a,_config,(const Capability *)0);
|
||||
_announceMulticastGroupsTo(*a,groups);
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector< SharedPtr<Peer> > peers;
|
||||
private:
|
||||
const uint64_t _now;
|
||||
const Address _controller;
|
||||
Network *const _network;
|
||||
const std::vector<Address> _anchors;
|
||||
const std::vector<Address> _upstreamAddresses;
|
||||
};
|
||||
void Network::_announceMulticastGroups()
|
||||
{
|
||||
// Assumes _lock is locked
|
||||
std::vector<MulticastGroup> allMulticastGroups(_allMulticastGroups());
|
||||
_MulticastAnnounceAll gpfunc(RR,this);
|
||||
RR->topology->eachPeer<_MulticastAnnounceAll &>(gpfunc);
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator i(gpfunc.peers.begin());i!=gpfunc.peers.end();++i)
|
||||
_announceMulticastGroupsTo(*i,allMulticastGroups);
|
||||
}
|
||||
|
||||
void Network::_announceMulticastGroupsTo(const SharedPtr<Peer> &peer,const std::vector<MulticastGroup> &allMulticastGroups)
|
||||
void Network::_announceMulticastGroupsTo(const Address &peer,const std::vector<MulticastGroup> &allMulticastGroups)
|
||||
{
|
||||
// Assumes _lock is locked
|
||||
|
||||
// Anyone we announce multicast groups to will need our COM to authenticate GATHER requests.
|
||||
{
|
||||
Membership *m = _memberships.get(peer->address());
|
||||
if (m)
|
||||
m->sendCredentialsIfNeeded(RR,RR->node->now(),peer->address(),_config,(const Capability *)0);
|
||||
}
|
||||
|
||||
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
Packet outp(peer,RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
|
||||
for(std::vector<MulticastGroup>::const_iterator mg(allMulticastGroups.begin());mg!=allMulticastGroups.end();++mg) {
|
||||
if ((outp.size() + 24) >= ZT_PROTO_MAX_PACKET_LENGTH) {
|
||||
outp.compress();
|
||||
RR->sw->send(outp,true);
|
||||
outp.reset(peer->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
outp.reset(peer,RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
}
|
||||
|
||||
// network ID, MAC, ADI
|
||||
@ -1295,7 +1296,6 @@ void Network::_announceMulticastGroupsTo(const SharedPtr<Peer> &peer,const std::
|
||||
std::vector<MulticastGroup> Network::_allMulticastGroups() const
|
||||
{
|
||||
// Assumes _lock is locked
|
||||
|
||||
std::vector<MulticastGroup> mgs;
|
||||
mgs.reserve(_myMulticastGroups.size() + _multicastGroupsBehindMe.size() + 1);
|
||||
mgs.insert(mgs.end(),_myMulticastGroups.begin(),_myMulticastGroups.end());
|
||||
@ -1304,8 +1304,21 @@ std::vector<MulticastGroup> Network::_allMulticastGroups() const
|
||||
mgs.push_back(Network::BROADCAST);
|
||||
std::sort(mgs.begin(),mgs.end());
|
||||
mgs.erase(std::unique(mgs.begin(),mgs.end()),mgs.end());
|
||||
|
||||
return mgs;
|
||||
}
|
||||
|
||||
Membership &Network::_membership(const Address &a)
|
||||
{
|
||||
// assumes _lock is locked
|
||||
const unsigned long ms = _memberships.size();
|
||||
Membership &m = _memberships[a];
|
||||
if (ms != _memberships.size()) {
|
||||
const uint64_t now = RR->node->now();
|
||||
m.sendCredentialsIfNeeded(RR,now,a,_config,(const Capability *)0);
|
||||
_announceMulticastGroupsTo(a,_allMulticastGroups());
|
||||
m.likingMulticasts(now);
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
@ -190,14 +190,6 @@ public:
|
||||
*/
|
||||
void multicastUnsubscribe(const MulticastGroup &mg);
|
||||
|
||||
/**
|
||||
* Announce multicast groups to a peer if that peer is authorized on this network
|
||||
*
|
||||
* @param peer Peer to try to announce multicast groups to
|
||||
* @return True if peer was authorized and groups were announced
|
||||
*/
|
||||
bool tryAnnounceMulticastGroupsTo(const SharedPtr<Peer> &peer);
|
||||
|
||||
/**
|
||||
* Apply a NetworkConfig to this network
|
||||
*
|
||||
@ -272,6 +264,15 @@ public:
|
||||
*/
|
||||
void clean();
|
||||
|
||||
/**
|
||||
* Announce multicast groups to all members, anchors, etc.
|
||||
*/
|
||||
inline void announceMulticastGroups()
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
_announceMulticastGroups((const MulticastGroup *)0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Time of last updated configuration or 0 if none
|
||||
*/
|
||||
@ -298,23 +299,10 @@ public:
|
||||
/**
|
||||
* Get current network config
|
||||
*
|
||||
* This returns a const reference to the network config in place, which is safe
|
||||
* to concurrently access but *may* change during access. Normally this isn't a
|
||||
* problem, but if it is use configCopy().
|
||||
*
|
||||
* @return Network configuration (may be a null config if we don't have one yet)
|
||||
*/
|
||||
inline const NetworkConfig &config() const { return _config; }
|
||||
|
||||
/**
|
||||
* @return A thread-safe copy of our NetworkConfig instead of a const reference
|
||||
*/
|
||||
inline NetworkConfig configCopy() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return _config;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if this network has a valid config
|
||||
*/
|
||||
@ -323,7 +311,7 @@ public:
|
||||
/**
|
||||
* @return Ethernet MAC address for this network's local interface
|
||||
*/
|
||||
inline const MAC &mac() const throw() { return _mac; }
|
||||
inline const MAC &mac() const { return _mac; }
|
||||
|
||||
/**
|
||||
* Find the node on this network that has this MAC behind it (if any)
|
||||
@ -365,7 +353,7 @@ public:
|
||||
if (com.networkId() != _id)
|
||||
return -1;
|
||||
Mutex::Lock _l(_lock);
|
||||
return _memberships[com.issuedTo()].addCredential(RR,com);
|
||||
return _membership(com.issuedTo()).addCredential(RR,com);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -377,7 +365,7 @@ public:
|
||||
if (cap.networkId() != _id)
|
||||
return -1;
|
||||
Mutex::Lock _l(_lock);
|
||||
return _memberships[cap.issuedTo()].addCredential(RR,cap);
|
||||
return _membership(cap.issuedTo()).addCredential(RR,cap);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -389,7 +377,7 @@ public:
|
||||
if (tag.networkId() != _id)
|
||||
return -1;
|
||||
Mutex::Lock _l(_lock);
|
||||
return _memberships[tag.issuedTo()].addCredential(RR,tag);
|
||||
return _membership(tag.issuedTo()).addCredential(RR,tag);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -400,7 +388,7 @@ public:
|
||||
inline void blacklistBefore(const Address &peerAddress,const uint64_t ts)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
_memberships[peerAddress].blacklistBefore(ts);
|
||||
_membership(peerAddress).blacklistBefore(ts);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -417,24 +405,19 @@ public:
|
||||
*/
|
||||
inline void **userPtr() throw() { return &_uPtr; }
|
||||
|
||||
inline bool operator==(const Network &n) const throw() { return (_id == n._id); }
|
||||
inline bool operator!=(const Network &n) const throw() { return (_id != n._id); }
|
||||
inline bool operator<(const Network &n) const throw() { return (_id < n._id); }
|
||||
inline bool operator>(const Network &n) const throw() { return (_id > n._id); }
|
||||
inline bool operator<=(const Network &n) const throw() { return (_id <= n._id); }
|
||||
inline bool operator>=(const Network &n) const throw() { return (_id >= n._id); }
|
||||
|
||||
private:
|
||||
ZT_VirtualNetworkStatus _status() const;
|
||||
void _externalConfig(ZT_VirtualNetworkConfig *ec) const; // assumes _lock is locked
|
||||
bool _isAllowed(const SharedPtr<Peer> &peer) const;
|
||||
void _announceMulticastGroups();
|
||||
void _announceMulticastGroupsTo(const SharedPtr<Peer> &peer,const std::vector<MulticastGroup> &allMulticastGroups);
|
||||
void _announceMulticastGroups(const MulticastGroup *const onlyThis);
|
||||
void _announceMulticastGroupsTo(const Address &peer,const std::vector<MulticastGroup> &allMulticastGroups);
|
||||
std::vector<MulticastGroup> _allMulticastGroups() const;
|
||||
Membership &_membership(const Address &a); // also lazily sends COM and MULTICAST_LIKE(s) if this is a new member
|
||||
|
||||
const RuntimeEnvironment *RR;
|
||||
void *_uPtr;
|
||||
uint64_t _id;
|
||||
uint64_t _lastAnnouncedMulticastGroupsUpstream;
|
||||
MAC _mac; // local MAC address
|
||||
volatile bool _portInitialized;
|
||||
|
||||
|
@ -37,7 +37,7 @@ bool NetworkConfig::toDictionary(Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY> &d,b
|
||||
if (!d.add(ZT_NETWORKCONFIG_DICT_KEY_VERSION,(uint64_t)ZT_NETWORKCONFIG_VERSION)) return false;
|
||||
if (!d.add(ZT_NETWORKCONFIG_DICT_KEY_NETWORK_ID,this->networkId)) return false;
|
||||
if (!d.add(ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP,this->timestamp)) return false;
|
||||
if (!d.add(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TTL,this->credentialTimeToLive)) return false;
|
||||
if (!d.add(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,this->credentialTimeMaxDelta)) return false;
|
||||
if (!d.add(ZT_NETWORKCONFIG_DICT_KEY_REVISION,this->revision)) return false;
|
||||
if (!d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,this->issuedTo)) return false;
|
||||
if (!d.add(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,this->flags)) return false;
|
||||
@ -193,7 +193,7 @@ bool NetworkConfig::fromDictionary(const Dictionary<ZT_NETWORKCONFIG_DICT_CAPACI
|
||||
return false;
|
||||
}
|
||||
this->timestamp = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP,0);
|
||||
this->credentialTimeToLive = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TTL,0);
|
||||
this->credentialTimeMaxDelta = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,0);
|
||||
this->revision = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_REVISION,0);
|
||||
this->issuedTo = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,0);
|
||||
if (!this->issuedTo) {
|
||||
|
@ -41,12 +41,12 @@
|
||||
#include "Identity.hpp"
|
||||
|
||||
/**
|
||||
* Default maximum credential TTL and maxDelta for COM timestamps
|
||||
* Default maximum time delta for COMs, tags, and capabilities
|
||||
*
|
||||
* The current value is two hours, providing ample time for a controller to
|
||||
* experience fail-over, etc.
|
||||
*/
|
||||
#define ZT_NETWORKCONFIG_DEFAULT_MAX_CREDENTIAL_TTL 7200000ULL
|
||||
#define ZT_NETWORKCONFIG_DEFAULT_CREDENTIAL_TIME_MAX_MAX_DELTA 7200000ULL
|
||||
|
||||
/**
|
||||
* Default minimum credential TTL and maxDelta for COM timestamps
|
||||
@ -54,7 +54,7 @@
|
||||
* This is just slightly over three minutes and provides three retries for
|
||||
* all currently online members to refresh.
|
||||
*/
|
||||
#define ZT_NETWORKCONFIG_DEFAULT_MIN_CREDENTIAL_TTL 185000ULL
|
||||
#define ZT_NETWORKCONFIG_DEFAULT_CREDENTIAL_TIME_MIN_MAX_DELTA 185000ULL
|
||||
|
||||
/**
|
||||
* Flag: allow passive bridging (experimental)
|
||||
@ -148,8 +148,8 @@ namespace ZeroTier {
|
||||
#define ZT_NETWORKCONFIG_DICT_KEY_TYPE "t"
|
||||
// text
|
||||
#define ZT_NETWORKCONFIG_DICT_KEY_NAME "n"
|
||||
// credential time to live in ms
|
||||
#define ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TTL "cttl"
|
||||
// credential time max delta in ms
|
||||
#define ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA "ctmd"
|
||||
// binary serialized certificate of membership
|
||||
#define ZT_NETWORKCONFIG_DICT_KEY_COM "C"
|
||||
// specialists (binary array of uint64_t)
|
||||
@ -372,7 +372,7 @@ public:
|
||||
{
|
||||
printf("networkId==%.16llx\n",networkId);
|
||||
printf("timestamp==%llu\n",timestamp);
|
||||
printf("credentialTimeToLive==%llu\n",credentialTimeToLive);
|
||||
printf("credentialTimeMaxDelta==%llu\n",credentialTimeMaxDelta);
|
||||
printf("revision==%llu\n",revision);
|
||||
printf("issuedTo==%.10llx\n",issuedTo.toInt());
|
||||
printf("multicastLimit==%u\n",multicastLimit);
|
||||
@ -407,9 +407,9 @@ public:
|
||||
uint64_t timestamp;
|
||||
|
||||
/**
|
||||
* TTL for capabilities and tags
|
||||
* Max difference between timestamp and tag/capability timestamp
|
||||
*/
|
||||
uint64_t credentialTimeToLive;
|
||||
uint64_t credentialTimeMaxDelta;
|
||||
|
||||
/**
|
||||
* Controller-side revision counter for this configuration
|
||||
|
@ -202,14 +202,6 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if (!upstream) {
|
||||
// If I am a root server, only ping other root servers -- roots don't ping "down"
|
||||
// since that would just be a waste of bandwidth and could potentially cause route
|
||||
// flapping in Cluster mode.
|
||||
if (RR->topology->amRoot())
|
||||
return;
|
||||
}
|
||||
|
||||
if (upstream) {
|
||||
// "Upstream" devices are roots and relays and get special treatment -- they stay alive
|
||||
// forever and we try to keep (if available) both IPv4 and IPv6 channels open to them.
|
||||
@ -269,13 +261,11 @@ ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextB
|
||||
{
|
||||
Mutex::Lock _l(_networks_m);
|
||||
for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
|
||||
if (((now - n->second->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!n->second->hasConfig())) {
|
||||
if (((now - n->second->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)||(!n->second->hasConfig()))
|
||||
needConfig.push_back(n->second);
|
||||
}
|
||||
n->second->announceMulticastGroups();
|
||||
}
|
||||
}
|
||||
|
||||
// Request updated configuration for networks that need it
|
||||
for(std::vector< SharedPtr<Network> >::const_iterator n(needConfig.begin());n!=needConfig.end();++n)
|
||||
(*n)->requestConfiguration();
|
||||
|
||||
@ -415,15 +405,16 @@ ZT_PeerList *Node::peers() const
|
||||
p->latency = pi->second->latency();
|
||||
p->role = RR->topology->isRoot(pi->second->identity()) ? ZT_PEER_ROLE_ROOT : ZT_PEER_ROLE_LEAF;
|
||||
|
||||
std::vector< SharedPtr<Path> > paths(pi->second->paths());
|
||||
SharedPtr<Path> bestp(pi->second->getBestPath(_now));
|
||||
std::vector< std::pair< SharedPtr<Path>,bool > > paths(pi->second->paths(_now));
|
||||
SharedPtr<Path> bestp(pi->second->getBestPath(_now,false));
|
||||
p->pathCount = 0;
|
||||
for(std::vector< SharedPtr<Path> >::iterator path(paths.begin());path!=paths.end();++path) {
|
||||
memcpy(&(p->paths[p->pathCount].address),&((*path)->address()),sizeof(struct sockaddr_storage));
|
||||
p->paths[p->pathCount].lastSend = (*path)->lastOut();
|
||||
p->paths[p->pathCount].lastReceive = (*path)->lastIn();
|
||||
p->paths[p->pathCount].preferred = (*path == bestp) ? 1 : 0;
|
||||
p->paths[p->pathCount].trustedPathId = RR->topology->getOutboundPathTrust((*path)->address());
|
||||
for(std::vector< std::pair< SharedPtr<Path>,bool > >::iterator path(paths.begin());path!=paths.end();++path) {
|
||||
memcpy(&(p->paths[p->pathCount].address),&(path->first->address()),sizeof(struct sockaddr_storage));
|
||||
p->paths[p->pathCount].lastSend = path->first->lastOut();
|
||||
p->paths[p->pathCount].lastReceive = path->first->lastIn();
|
||||
p->paths[p->pathCount].expired = path->second;
|
||||
p->paths[p->pathCount].preferred = (path->first == bestp) ? 1 : 0;
|
||||
p->paths[p->pathCount].trustedPathId = RR->topology->getOutboundPathTrust(path->first->address());
|
||||
++p->pathCount;
|
||||
}
|
||||
}
|
||||
|
@ -137,6 +137,13 @@ public:
|
||||
*/
|
||||
bool send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now);
|
||||
|
||||
/**
|
||||
* Manually update last sent time
|
||||
*
|
||||
* @param t Time of send
|
||||
*/
|
||||
inline void sent(const uint64_t t) { _lastOut = t; }
|
||||
|
||||
/**
|
||||
* @return Address of local side of this path or NULL if unspecified
|
||||
*/
|
||||
|
160
node/Peer.cpp
160
node/Peer.cpp
@ -27,6 +27,14 @@
|
||||
#include "Cluster.hpp"
|
||||
#include "Packet.hpp"
|
||||
|
||||
#ifndef AF_MAX
|
||||
#if AF_INET > AF_INET6
|
||||
#define AF_MAX AF_INET
|
||||
#else
|
||||
#define AF_MAX AF_INET6
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
// Used to send varying values for NAT keepalive
|
||||
@ -37,7 +45,6 @@ Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Ident
|
||||
_lastReceive(0),
|
||||
_lastUnicastFrame(0),
|
||||
_lastMulticastFrame(0),
|
||||
_lastAnnouncedTo(0),
|
||||
_lastDirectPathPushSent(0),
|
||||
_lastDirectPathPushReceive(0),
|
||||
RR(renv),
|
||||
@ -58,11 +65,11 @@ Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Ident
|
||||
|
||||
void Peer::received(
|
||||
const SharedPtr<Path> &path,
|
||||
unsigned int hops,
|
||||
uint64_t packetId,
|
||||
Packet::Verb verb,
|
||||
uint64_t inRePacketId,
|
||||
Packet::Verb inReVerb,
|
||||
const unsigned int hops,
|
||||
const uint64_t packetId,
|
||||
const Packet::Verb verb,
|
||||
const uint64_t inRePacketId,
|
||||
const Packet::Verb inReVerb,
|
||||
const bool trustEstablished)
|
||||
{
|
||||
const uint64_t now = RR->node->now();
|
||||
@ -150,7 +157,7 @@ void Peer::received(
|
||||
uint64_t worstScore = 0xffffffffffffffffULL;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if (_paths[p].path->address().ss_family == path->address().ss_family) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
const uint64_t s = _pathScore(p,now);
|
||||
if (s < worstScore) {
|
||||
worstScore = s;
|
||||
worstSlot = (int)p;
|
||||
@ -163,7 +170,7 @@ void Peer::received(
|
||||
// If we can't find one with the same family, replace the worst of any family
|
||||
slot = ZT_MAX_PEER_NETWORK_PATHS - 1;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
const uint64_t s = _pathScore(p,now);
|
||||
if (s < worstScore) {
|
||||
worstScore = s;
|
||||
slot = p;
|
||||
@ -181,36 +188,21 @@ void Peer::received(
|
||||
#endif
|
||||
} else {
|
||||
TRACE("got %s via unknown path %s(%s), confirming...",Packet::verbString(verb),_id.address().toString().c_str(),path->address().toString().c_str());
|
||||
|
||||
if ( (_vProto >= 5) && ( !((_vMajor == 1)&&(_vMinor == 1)&&(_vRevision == 0)) ) ) {
|
||||
// Newer than 1.1.0 can use ECHO, which is smaller
|
||||
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ECHO);
|
||||
outp.armor(_key,true);
|
||||
path->send(RR,outp.data(),outp.size(),now);
|
||||
} else {
|
||||
// For backward compatibility we send HELLO to ancient nodes
|
||||
sendHELLO(path->localAddress(),path->address(),now);
|
||||
}
|
||||
attemptToContactAt(path->localAddress(),path->address(),now);
|
||||
path->sent(now);
|
||||
}
|
||||
}
|
||||
} else if (trustEstablished) {
|
||||
// Send PUSH_DIRECT_PATHS if hops>0 (relayed) and we have a trust relationship (common network membership)
|
||||
_pushDirectPaths(path,now);
|
||||
}
|
||||
|
||||
if ((now - _lastAnnouncedTo) >= ((ZT_MULTICAST_LIKE_EXPIRE / 2) - 1000)) {
|
||||
_lastAnnouncedTo = now;
|
||||
const std::vector< SharedPtr<Network> > networks(RR->node->allNetworks());
|
||||
for(std::vector< SharedPtr<Network> >::const_iterator n(networks.begin());n!=networks.end();++n)
|
||||
(*n)->tryAnnounceMulticastGroupsTo(SharedPtr<Peer>(this));
|
||||
}
|
||||
}
|
||||
|
||||
bool Peer::hasActivePathTo(uint64_t now,const InetAddress &addr) const
|
||||
{
|
||||
Mutex::Lock _l(_paths_m);
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if ( (_paths[p].path->address() == addr) && (_paths[p].path->alive(now)) )
|
||||
if ( (_paths[p].path->address() == addr) && ((now - _paths[p].lastReceive) <= ZT_PEER_PATH_EXPIRATION) && (_paths[p].path->alive(now)) )
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -223,8 +215,8 @@ bool Peer::sendDirect(const void *data,unsigned int len,uint64_t now,bool forceE
|
||||
int bestp = -1;
|
||||
uint64_t best = 0ULL;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if (_paths[p].path->alive(now)||(forceEvenIfDead)) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if ( ((now - _paths[p].lastReceive) <= ZT_PEER_PATH_EXPIRATION) && (_paths[p].path->alive(now)||(forceEvenIfDead)) ) {
|
||||
const uint64_t s = _pathScore(p,now);
|
||||
if (s >= best) {
|
||||
best = s;
|
||||
bestp = (int)p;
|
||||
@ -239,17 +231,19 @@ bool Peer::sendDirect(const void *data,unsigned int len,uint64_t now,bool forceE
|
||||
}
|
||||
}
|
||||
|
||||
SharedPtr<Path> Peer::getBestPath(uint64_t now)
|
||||
SharedPtr<Path> Peer::getBestPath(uint64_t now,bool includeExpired)
|
||||
{
|
||||
Mutex::Lock _l(_paths_m);
|
||||
|
||||
int bestp = -1;
|
||||
uint64_t best = 0ULL;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if (s >= best) {
|
||||
best = s;
|
||||
bestp = (int)p;
|
||||
if ( ((now - _paths[p].lastReceive) <= ZT_PEER_PATH_EXPIRATION) || (includeExpired) ) {
|
||||
const uint64_t s = _pathScore(p,now);
|
||||
if (s >= best) {
|
||||
best = s;
|
||||
bestp = (int)p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -276,6 +270,17 @@ void Peer::sendHELLO(const InetAddress &localAddr,const InetAddress &atAddress,u
|
||||
RR->node->putPacket(localAddr,atAddress,outp.data(),outp.size());
|
||||
}
|
||||
|
||||
void Peer::attemptToContactAt(const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now)
|
||||
{
|
||||
if ( (_vProto >= 5) && ( !((_vMajor == 1)&&(_vMinor == 1)&&(_vRevision == 0)) ) ) {
|
||||
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ECHO);
|
||||
outp.armor(_key,true);
|
||||
RR->node->putPacket(localAddr,atAddress,outp.data(),outp.size());
|
||||
} else {
|
||||
sendHELLO(localAddr,atAddress,now);
|
||||
}
|
||||
}
|
||||
|
||||
bool Peer::doPingAndKeepalive(uint64_t now,int inetAddressFamily)
|
||||
{
|
||||
Mutex::Lock _l(_paths_m);
|
||||
@ -283,8 +288,8 @@ bool Peer::doPingAndKeepalive(uint64_t now,int inetAddressFamily)
|
||||
int bestp = -1;
|
||||
uint64_t best = 0ULL;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if ((inetAddressFamily < 0)||((int)_paths[p].path->address().ss_family == inetAddressFamily)) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if ( ((now - _paths[p].lastReceive) <= ZT_PEER_PATH_EXPIRATION) && ((inetAddressFamily < 0)||((int)_paths[p].path->address().ss_family == inetAddressFamily)) ) {
|
||||
const uint64_t s = _pathScore(p,now);
|
||||
if (s >= best) {
|
||||
best = s;
|
||||
bestp = (int)p;
|
||||
@ -294,7 +299,8 @@ bool Peer::doPingAndKeepalive(uint64_t now,int inetAddressFamily)
|
||||
|
||||
if (bestp >= 0) {
|
||||
if ((now - _paths[bestp].lastReceive) >= ZT_PEER_PING_PERIOD) {
|
||||
sendHELLO(_paths[bestp].path->localAddress(),_paths[bestp].path->address(),now);
|
||||
attemptToContactAt(_paths[bestp].path->localAddress(),_paths[bestp].path->address(),now);
|
||||
_paths[bestp].path->sent(now);
|
||||
} else if (_paths[bestp].path->needsHeartbeat(now)) {
|
||||
_natKeepaliveBuf += (uint32_t)((now * 0x9e3779b1) >> 1); // tumble this around to send constantly varying (meaningless) payloads
|
||||
_paths[bestp].path->send(RR,&_natKeepaliveBuf,sizeof(_natKeepaliveBuf),now);
|
||||
@ -309,39 +315,25 @@ bool Peer::hasActiveDirectPath(uint64_t now) const
|
||||
{
|
||||
Mutex::Lock _l(_paths_m);
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if (_paths[p].path->alive(now))
|
||||
if (((now - _paths[p].lastReceive) <= ZT_PEER_PATH_EXPIRATION)&&(_paths[p].path->alive(now)))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Peer::resetWithinScope(InetAddress::IpScope scope,uint64_t now)
|
||||
bool Peer::resetWithinScope(InetAddress::IpScope scope,int inetAddressFamily,uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_paths_m);
|
||||
unsigned int np = _numPaths;
|
||||
unsigned int x = 0;
|
||||
unsigned int y = 0;
|
||||
while (x < np) {
|
||||
if (_paths[x].path->address().ipScope() == scope) {
|
||||
// Resetting a path means sending a HELLO and then forgetting it. If we
|
||||
// get OK(HELLO) then it will be re-learned.
|
||||
sendHELLO(_paths[x].path->localAddress(),_paths[x].path->address(),now);
|
||||
} else {
|
||||
if (x != y) {
|
||||
_paths[y].lastReceive = _paths[x].lastReceive;
|
||||
_paths[y].path = _paths[x].path;
|
||||
#ifdef ZT_ENABLE_CLUSTER
|
||||
_paths[y].localClusterSuboptimal = _paths[x].localClusterSuboptimal;
|
||||
#endif
|
||||
}
|
||||
++y;
|
||||
bool resetSomething = false;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if ( (_paths[p].path->address().ss_family == inetAddressFamily) && (_paths[p].path->address().ipScope() == scope) ) {
|
||||
attemptToContactAt(_paths[p].path->localAddress(),_paths[p].path->address(),now);
|
||||
_paths[p].path->sent(now);
|
||||
_paths[p].lastReceive >>= 2; // de-prioritize heavily vs. other paths, will get reset if we get OK(HELLO) or other traffic
|
||||
resetSomething = true;
|
||||
}
|
||||
++x;
|
||||
}
|
||||
_numPaths = y;
|
||||
while (y < ZT_MAX_PEER_NETWORK_PATHS)
|
||||
_paths[y++].path.zero(); // let go of unused SmartPtr<>'s
|
||||
return (_numPaths < np);
|
||||
return resetSomething;
|
||||
}
|
||||
|
||||
void Peer::getBestActiveAddresses(uint64_t now,InetAddress &v4,InetAddress &v6) const
|
||||
@ -351,17 +343,19 @@ void Peer::getBestActiveAddresses(uint64_t now,InetAddress &v4,InetAddress &v6)
|
||||
int bestp4 = -1,bestp6 = -1;
|
||||
uint64_t best4 = 0ULL,best6 = 0ULL;
|
||||
for(unsigned int p=0;p<_numPaths;++p) {
|
||||
if (_paths[p].path->address().ss_family == AF_INET) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if (s >= best4) {
|
||||
best4 = s;
|
||||
bestp4 = (int)p;
|
||||
}
|
||||
} else if (_paths[p].path->address().ss_family == AF_INET6) {
|
||||
const uint64_t s = _pathScore(p);
|
||||
if (s >= best6) {
|
||||
best6 = s;
|
||||
bestp6 = (int)p;
|
||||
if ( ((now - _paths[p].lastReceive) <= ZT_PEER_PATH_EXPIRATION) && (_paths[p].path->alive(now)) ) {
|
||||
if (_paths[p].path->address().ss_family == AF_INET) {
|
||||
const uint64_t s = _pathScore(p,now);
|
||||
if (s >= best4) {
|
||||
best4 = s;
|
||||
bestp4 = (int)p;
|
||||
}
|
||||
} else if (_paths[p].path->address().ss_family == AF_INET6) {
|
||||
const uint64_t s = _pathScore(p,now);
|
||||
if (s >= best6) {
|
||||
best6 = s;
|
||||
bestp6 = (int)p;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -372,30 +366,6 @@ void Peer::getBestActiveAddresses(uint64_t now,InetAddress &v4,InetAddress &v6)
|
||||
v6 = _paths[bestp6].path->address();
|
||||
}
|
||||
|
||||
void Peer::clean(uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_paths_m);
|
||||
unsigned int np = _numPaths;
|
||||
unsigned int x = 0;
|
||||
unsigned int y = 0;
|
||||
while (x < np) {
|
||||
if ((now - _paths[x].lastReceive) <= ZT_PEER_PATH_EXPIRATION) {
|
||||
if (y != x) {
|
||||
_paths[y].lastReceive = _paths[x].lastReceive;
|
||||
_paths[y].path = _paths[x].path;
|
||||
#ifdef ZT_ENABLE_CLUSTER
|
||||
_paths[y].localClusterSuboptimal = _paths[x].localClusterSuboptimal;
|
||||
#endif
|
||||
}
|
||||
++y;
|
||||
}
|
||||
++x;
|
||||
}
|
||||
_numPaths = y;
|
||||
while (y < ZT_MAX_PEER_NETWORK_PATHS)
|
||||
_paths[y++].path.zero(); // let go of unused SmartPtr<>'s
|
||||
}
|
||||
|
||||
bool Peer::_pushDirectPaths(const SharedPtr<Path> &path,uint64_t now)
|
||||
{
|
||||
#ifdef ZT_ENABLE_CLUSTER
|
||||
|
@ -106,11 +106,11 @@ public:
|
||||
*/
|
||||
void received(
|
||||
const SharedPtr<Path> &path,
|
||||
unsigned int hops,
|
||||
uint64_t packetId,
|
||||
Packet::Verb verb,
|
||||
uint64_t inRePacketId,
|
||||
Packet::Verb inReVerb,
|
||||
const unsigned int hops,
|
||||
const uint64_t packetId,
|
||||
const Packet::Verb verb,
|
||||
const uint64_t inRePacketId,
|
||||
const Packet::Verb inReVerb,
|
||||
const bool trustEstablished);
|
||||
|
||||
/**
|
||||
@ -149,9 +149,10 @@ public:
|
||||
* Get the best current direct path
|
||||
*
|
||||
* @param now Current time
|
||||
* @param includeDead If true, include even expired paths
|
||||
* @return Best current path or NULL if none
|
||||
*/
|
||||
SharedPtr<Path> getBestPath(uint64_t now);
|
||||
SharedPtr<Path> getBestPath(uint64_t now,bool includeExpired);
|
||||
|
||||
/**
|
||||
* Send a HELLO to this peer at a specified physical address
|
||||
@ -164,6 +165,17 @@ public:
|
||||
*/
|
||||
void sendHELLO(const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now);
|
||||
|
||||
/**
|
||||
* Send ECHO (or HELLO for older peers) to this peer at the given address
|
||||
*
|
||||
* No statistics or sent times are updated here.
|
||||
*
|
||||
* @param localAddr Local address
|
||||
* @param atAddress Destination address
|
||||
* @param now Current time
|
||||
*/
|
||||
void attemptToContactAt(const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now);
|
||||
|
||||
/**
|
||||
* Send pings or keepalives depending on configured timeouts
|
||||
*
|
||||
@ -175,18 +187,22 @@ public:
|
||||
|
||||
/**
|
||||
* @param now Current time
|
||||
* @return True if this peer has at least one active direct path
|
||||
* @return True if this peer has at least one active and alive direct path
|
||||
*/
|
||||
bool hasActiveDirectPath(uint64_t now) const;
|
||||
|
||||
/**
|
||||
* Reset paths within a given scope
|
||||
* Reset paths within a given IP scope and address family
|
||||
*
|
||||
* @param scope IP scope of paths to reset
|
||||
* Resetting a path involves sending a HELLO to it and then de-prioritizing
|
||||
* it vs. other paths.
|
||||
*
|
||||
* @param scope IP scope
|
||||
* @param inetAddressFamily Family e.g. AF_INET
|
||||
* @param now Current time
|
||||
* @return True if at least one path was forgotten
|
||||
* @return True if we forgot at least one path
|
||||
*/
|
||||
bool resetWithinScope(InetAddress::IpScope scope,uint64_t now);
|
||||
bool resetWithinScope(InetAddress::IpScope scope,int inetAddressFamily,uint64_t now);
|
||||
|
||||
/**
|
||||
* Get most recently active path addresses for IPv4 and/or IPv6
|
||||
@ -201,21 +217,15 @@ public:
|
||||
void getBestActiveAddresses(uint64_t now,InetAddress &v4,InetAddress &v6) const;
|
||||
|
||||
/**
|
||||
* Perform periodic cleaning operations
|
||||
*
|
||||
* @param now Current time
|
||||
* @return All known direct paths to this peer and whether they are expired (true == expired)
|
||||
*/
|
||||
void clean(uint64_t now);
|
||||
|
||||
/**
|
||||
* @return All known direct paths to this peer (active or inactive)
|
||||
*/
|
||||
inline std::vector< SharedPtr<Path> > paths() const
|
||||
inline std::vector< std::pair< SharedPtr<Path>,bool > > paths(const uint64_t now) const
|
||||
{
|
||||
std::vector< SharedPtr<Path> > pp;
|
||||
std::vector< std::pair< SharedPtr<Path>,bool > > pp;
|
||||
Mutex::Lock _l(_paths_m);
|
||||
for(unsigned int p=0,np=_numPaths;p<np;++p)
|
||||
pp.push_back(_paths[p].path);
|
||||
pp.push_back(std::pair< SharedPtr<Path>,bool >(_paths[p].path,(now - _paths[p].lastReceive) > ZT_PEER_PATH_EXPIRATION));
|
||||
return pp;
|
||||
}
|
||||
|
||||
@ -370,11 +380,12 @@ public:
|
||||
private:
|
||||
bool _pushDirectPaths(const SharedPtr<Path> &path,uint64_t now);
|
||||
|
||||
inline uint64_t _pathScore(const unsigned int p) const
|
||||
inline uint64_t _pathScore(const unsigned int p,const uint64_t now) const
|
||||
{
|
||||
uint64_t s = ZT_PEER_PING_PERIOD;
|
||||
uint64_t s = ZT_PEER_PING_PERIOD + _paths[p].lastReceive + (uint64_t)(_paths[p].path->preferenceRank() * (ZT_PEER_PING_PERIOD / ZT_PATH_MAX_PREFERENCE_RANK));
|
||||
|
||||
if (_paths[p].path->address().ss_family == AF_INET) {
|
||||
s += _paths[p].lastReceive + (uint64_t)(_paths[p].path->preferenceRank() * (ZT_PEER_PING_PERIOD / ZT_PATH_MAX_PREFERENCE_RANK)) + (uint64_t)(ZT_PEER_PING_PERIOD * (unsigned long)(reinterpret_cast<const struct sockaddr_in *>(&(_paths[p].path->address()))->sin_addr.s_addr == _remoteClusterOptimal4));
|
||||
s += (uint64_t)(ZT_PEER_PING_PERIOD * (unsigned long)(reinterpret_cast<const struct sockaddr_in *>(&(_paths[p].path->address()))->sin_addr.s_addr == _remoteClusterOptimal4));
|
||||
} else if (_paths[p].path->address().ss_family == AF_INET6) {
|
||||
uint64_t clusterWeight = ZT_PEER_PING_PERIOD;
|
||||
const uint8_t *a = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&(_paths[p].path->address()))->sin6_addr.s6_addr);
|
||||
@ -384,23 +395,24 @@ private:
|
||||
break;
|
||||
}
|
||||
}
|
||||
s += _paths[p].lastReceive + (uint64_t)(_paths[p].path->preferenceRank() * (ZT_PEER_PING_PERIOD / ZT_PATH_MAX_PREFERENCE_RANK)) + clusterWeight;
|
||||
} else {
|
||||
s += _paths[p].lastReceive + (uint64_t)(_paths[p].path->preferenceRank() * (ZT_PEER_PING_PERIOD / ZT_PATH_MAX_PREFERENCE_RANK));
|
||||
s += clusterWeight;
|
||||
}
|
||||
|
||||
s += (ZT_PEER_PING_PERIOD / 2) * (uint64_t)_paths[p].path->alive(now);
|
||||
|
||||
#ifdef ZT_ENABLE_CLUSTER
|
||||
s -= ZT_PEER_PING_PERIOD * (uint64_t)_paths[p].localClusterSuboptimal;
|
||||
#endif
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
unsigned char _key[ZT_PEER_SECRET_KEY_LENGTH];
|
||||
uint8_t _key[ZT_PEER_SECRET_KEY_LENGTH];
|
||||
uint8_t _remoteClusterOptimal6[16];
|
||||
uint64_t _lastUsed;
|
||||
uint64_t _lastReceive; // direct or indirect
|
||||
uint64_t _lastUnicastFrame;
|
||||
uint64_t _lastMulticastFrame;
|
||||
uint64_t _lastAnnouncedTo;
|
||||
uint64_t _lastDirectPathPushSent;
|
||||
uint64_t _lastDirectPathPushReceive;
|
||||
const RuntimeEnvironment *RR;
|
||||
|
@ -33,37 +33,31 @@
|
||||
#include "Switch.hpp"
|
||||
|
||||
// Entry timeout -- make it fairly long since this is just to prevent stale buildup
|
||||
#define ZT_SELFAWARENESS_ENTRY_TIMEOUT 3600000
|
||||
#define ZT_SELFAWARENESS_ENTRY_TIMEOUT 600000
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
class _ResetWithinScope
|
||||
{
|
||||
public:
|
||||
_ResetWithinScope(uint64_t now,InetAddress::IpScope scope) :
|
||||
_ResetWithinScope(uint64_t now,int inetAddressFamily,InetAddress::IpScope scope) :
|
||||
_now(now),
|
||||
_family(inetAddressFamily),
|
||||
_scope(scope) {}
|
||||
|
||||
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
|
||||
{
|
||||
if (p->resetWithinScope(_scope,_now))
|
||||
peersReset.push_back(p);
|
||||
}
|
||||
inline void operator()(Topology &t,const SharedPtr<Peer> &p) { if (p->resetWithinScope(_scope,_family,_now)) peersReset.push_back(p); }
|
||||
|
||||
std::vector< SharedPtr<Peer> > peersReset;
|
||||
|
||||
private:
|
||||
uint64_t _now;
|
||||
int _family;
|
||||
InetAddress::IpScope _scope;
|
||||
};
|
||||
|
||||
SelfAwareness::SelfAwareness(const RuntimeEnvironment *renv) :
|
||||
RR(renv),
|
||||
_phy(32)
|
||||
{
|
||||
}
|
||||
|
||||
SelfAwareness::~SelfAwareness()
|
||||
_phy(128)
|
||||
{
|
||||
}
|
||||
|
||||
@ -98,8 +92,8 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &receivedOnLoc
|
||||
}
|
||||
}
|
||||
|
||||
// Reset all paths within this scope
|
||||
_ResetWithinScope rset(now,(InetAddress::IpScope)scope);
|
||||
// Reset all paths within this scope and address family
|
||||
_ResetWithinScope rset(now,myPhysicalAddress.ss_family,(InetAddress::IpScope)scope);
|
||||
RR->topology->eachPeer<_ResetWithinScope &>(rset);
|
||||
|
||||
// Send a NOP to all peers for whom we forgot a path. This will cause direct
|
||||
|
@ -36,7 +36,6 @@ class SelfAwareness
|
||||
{
|
||||
public:
|
||||
SelfAwareness(const RuntimeEnvironment *renv);
|
||||
~SelfAwareness();
|
||||
|
||||
/**
|
||||
* Called when a trusted remote peer informs us of our external network address
|
||||
|
@ -747,14 +747,20 @@ Address Switch::_sendWhoisRequest(const Address &addr,const Address *peersAlread
|
||||
|
||||
bool Switch::_trySend(const Packet &packet,bool encrypt)
|
||||
{
|
||||
SharedPtr<Peer> peer(RR->topology->getPeer(packet.destination()));
|
||||
|
||||
const SharedPtr<Peer> peer(RR->topology->getPeer(packet.destination()));
|
||||
if (peer) {
|
||||
const uint64_t now = RR->node->now();
|
||||
|
||||
SharedPtr<Path> viaPath(peer->getBestPath(now));
|
||||
// First get the best path, and if it's dead (and this is not a root)
|
||||
// we attempt to re-activate that path but this packet will flow
|
||||
// upstream. If the path comes back alive, it will be used in the future.
|
||||
// For roots we don't do the alive check since roots are not required
|
||||
// to send heartbeats "down" and because we have to at least try to
|
||||
// go somewhere.
|
||||
|
||||
SharedPtr<Path> viaPath(peer->getBestPath(now,false));
|
||||
if ( (viaPath) && (!viaPath->alive(now)) && (!RR->topology->isRoot(peer->identity())) ) {
|
||||
if ((now - viaPath->lastOut()) > 5000) {
|
||||
if ((now - viaPath->lastOut()) > std::max((now - viaPath->lastIn()) >> 2,(uint64_t)ZT_PATH_MIN_REACTIVATE_INTERVAL)) {
|
||||
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_ECHO);
|
||||
outp.armor(peer->key(),true);
|
||||
viaPath->send(RR,outp.data(),outp.size(),now);
|
||||
@ -763,8 +769,10 @@ bool Switch::_trySend(const Packet &packet,bool encrypt)
|
||||
}
|
||||
if (!viaPath) {
|
||||
SharedPtr<Peer> relay(RR->topology->getBestRoot());
|
||||
if ( (!relay) || (!(viaPath = relay->getBestPath(now))) )
|
||||
return false;
|
||||
if ( (!relay) || (!(viaPath = relay->getBestPath(now,false))) ) {
|
||||
if (!(viaPath = peer->getBestPath(now,true)))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Packet tmp(packet);
|
||||
@ -787,7 +795,7 @@ bool Switch::_trySend(const Packet &packet,bool encrypt)
|
||||
unsigned int fragsRemaining = (remaining / (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
|
||||
if ((fragsRemaining * (ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH)) < remaining)
|
||||
++fragsRemaining;
|
||||
unsigned int totalFragments = fragsRemaining + 1;
|
||||
const unsigned int totalFragments = fragsRemaining + 1;
|
||||
|
||||
for(unsigned int fno=1;fno<totalFragments;++fno) {
|
||||
chunkSize = std::min(remaining,(unsigned int)(ZT_UDP_DEFAULT_PAYLOAD_MTU - ZT_PROTO_MIN_FRAGMENT_LENGTH));
|
||||
|
@ -256,11 +256,8 @@ void Topology::clean(uint64_t now)
|
||||
Address *a = (Address *)0;
|
||||
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
|
||||
while (i.next(a,p)) {
|
||||
if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) {
|
||||
if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end()))
|
||||
_peers.erase(*a);
|
||||
} else {
|
||||
(*p)->clean(now);
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
|
@ -183,14 +183,16 @@ static std::string _jsonEnumerate(unsigned int depth,const ZT_PeerPhysicalPath *
|
||||
"%s\t\"address\": \"%s\",\n"
|
||||
"%s\t\"lastSend\": %llu,\n"
|
||||
"%s\t\"lastReceive\": %llu,\n"
|
||||
"%s\t\"active\": true,\n"
|
||||
"%s\t\"active\": %s,\n"
|
||||
"%s\t\"expired\": %s,\n"
|
||||
"%s\t\"preferred\": %s,\n"
|
||||
"%s\t\"trustedPathId\": %llu\n"
|
||||
"%s}",
|
||||
prefix,_jsonEscape(reinterpret_cast<const InetAddress *>(&(pp[i].address))->toString()).c_str(),
|
||||
prefix,pp[i].lastSend,
|
||||
prefix,pp[i].lastReceive,
|
||||
prefix,
|
||||
prefix,(pp[i].expired != 0) ? "false" : "true",
|
||||
prefix,(pp[i].expired == 0) ? "false" : "true",
|
||||
prefix,(pp[i].preferred == 0) ? "false" : "true",
|
||||
prefix,pp[i].trustedPathId,
|
||||
prefix);
|
||||
|
Loading…
Reference in New Issue
Block a user