Cleanup in numerous places, reduce network chattiness around MULTICAST_LIKE, and fix a "how was that working" latent bug causing some control traffic to take the scenic route.

This commit is contained in:
Adam Ierymenko 2016-04-19 12:09:35 -07:00
parent affbca74b4
commit 2f18a92e20
12 changed files with 112 additions and 85 deletions

View File

@ -710,7 +710,7 @@ bool Cluster::findBetterEndpoint(InetAddress &redirectTo,const Address &peerAddr
// Pick based on location if it can be determined
int px = 0,py = 0,pz = 0;
if (_addressToLocationFunction(_addressToLocationFunctionArg,reinterpret_cast<const struct sockaddr_storage *>(&peerPhysicalAddress),&px,&py,&pz) == 0) {
TRACE("no geolocation data for %s (geo-lookup is lazy/async so it may work next time)",peerPhysicalAddress.toIpString().c_str());
TRACE("no geolocation data for %s",peerPhysicalAddress.toIpString().c_str());
return false;
}

View File

@ -277,7 +277,7 @@
/**
* No answer timeout to trigger dead path detection
*/
#define ZT_PEER_DEAD_PATH_DETECTION_NO_ANSWER_TIMEOUT 2500
#define ZT_PEER_DEAD_PATH_DETECTION_NO_ANSWER_TIMEOUT 2000
/**
* Probation threshold after which a path becomes dead

View File

@ -71,6 +71,7 @@ bool IncomingPacket::tryDecode(const RuntimeEnvironment *RR,bool deferred)
}
const Packet::Verb v = verb();
//if (RR->topology->isRoot(peer->identity())) printf("<< %s from %s(%s)\n",Packet::verbString(v),sourceAddress.toString().c_str(),_remoteAddress.toString().c_str());
//TRACE("<< %s from %s(%s)",Packet::verbString(v),sourceAddress.toString().c_str(),_remoteAddress.toString().c_str());
switch(v) {
//case Packet::VERB_NOP:
@ -349,6 +350,7 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
const uint64_t inRePacketId = at<uint64_t>(ZT_PROTO_VERB_OK_IDX_IN_RE_PACKET_ID);
//TRACE("%s(%s): OK(%s)",source().toString().c_str(),_remoteAddress.toString().c_str(),Packet::verbString(inReVerb));
//if (RR->topology->isRoot(peer->identity())) printf("%s(%s): OK(%s)\n",source().toString().c_str(),_remoteAddress.toString().c_str(),Packet::verbString(inReVerb));
switch(inReVerb) {

View File

@ -142,7 +142,7 @@ bool Network::tryAnnounceMulticastGroupsTo(const SharedPtr<Peer> &peer)
(peer->address() == this->controller()) ||
(RR->topology->isRoot(peer->identity()))
) {
_announceMulticastGroupsTo(peer->address(),_allMulticastGroups());
_announceMulticastGroupsTo(peer,_allMulticastGroups());
return true;
}
return false;
@ -400,10 +400,10 @@ bool Network::_isAllowed(const SharedPtr<Peer> &peer) const
return false; // default position on any failure
}
class _GetPeersThatNeedMulticastAnnouncement
class _MulticastAnnounceAll
{
public:
_GetPeersThatNeedMulticastAnnouncement(const RuntimeEnvironment *renv,Network *nw) :
_MulticastAnnounceAll(const RuntimeEnvironment *renv,Network *nw) :
_now(renv->node->now()),
_controller(nw->controller()),
_network(nw),
@ -416,47 +416,45 @@ public:
(p->address() == _controller) ||
(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->address()) != _rootAddresses.end())
) {
peers.push_back(p->address());
peers.push_back(p);
}
}
std::vector<Address> peers;
std::vector< SharedPtr<Peer> > peers;
private:
uint64_t _now;
Address _controller;
Network *_network;
std::vector<Address> _rootAddresses;
const uint64_t _now;
const Address _controller;
Network *const _network;
const std::vector<Address> _rootAddresses;
};
void Network::_announceMulticastGroups()
{
// Assumes _lock is locked
_GetPeersThatNeedMulticastAnnouncement gpfunc(RR,this);
RR->topology->eachPeer<_GetPeersThatNeedMulticastAnnouncement &>(gpfunc);
_MulticastAnnounceAll gpfunc(RR,this);
RR->topology->eachPeer<_MulticastAnnounceAll &>(gpfunc);
std::vector<MulticastGroup> allMulticastGroups(_allMulticastGroups());
for(std::vector<Address>::const_iterator pa(gpfunc.peers.begin());pa!=gpfunc.peers.end();++pa)
_announceMulticastGroupsTo(*pa,allMulticastGroups);
for(std::vector< SharedPtr<Peer> >::const_iterator i(gpfunc.peers.begin());i!=gpfunc.peers.end();++i)
_announceMulticastGroupsTo(*i,allMulticastGroups);
}
void Network::_announceMulticastGroupsTo(const Address &peerAddress,const std::vector<MulticastGroup> &allMulticastGroups) const
void Network::_announceMulticastGroupsTo(const SharedPtr<Peer> &peer,const std::vector<MulticastGroup> &allMulticastGroups) const
{
// Assumes _lock is locked
// We push COMs ahead of MULTICAST_LIKE since they're used for access control -- a COM is a public
// credential so "over-sharing" isn't really an issue (and we only do so with roots).
if ((_config)&&(_config.com())&&(!_config.isPublic())) {
Packet outp(peerAddress,RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
if ((_config)&&(_config.com())&&(!_config.isPublic())&&(peer->needsOurNetworkMembershipCertificate(_id,RR->node->now(),true))) {
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
_config.com().serialize(outp);
RR->sw->send(outp,true,0);
}
{
Packet outp(peerAddress,RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
for(std::vector<MulticastGroup>::const_iterator mg(allMulticastGroups.begin());mg!=allMulticastGroups.end();++mg) {
if ((outp.size() + 18) >= ZT_UDP_DEFAULT_PAYLOAD_MTU) {
RR->sw->send(outp,true,0);
outp.reset(peerAddress,RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
outp.reset(peer->address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
}
// network ID, MAC, ADI

View File

@ -47,7 +47,7 @@ namespace ZeroTier {
class RuntimeEnvironment;
class Peer;
class _GetPeersThatNeedMulticastAnnouncement;
class _MulticastAnnounceAll;
/**
* A virtual LAN
@ -55,7 +55,7 @@ class _GetPeersThatNeedMulticastAnnouncement;
class Network : NonCopyable
{
friend class SharedPtr<Network>;
friend class _GetPeersThatNeedMulticastAnnouncement; // internal function object
friend class _MulticastAnnounceAll; // internal function object
public:
/**
@ -317,9 +317,8 @@ private:
ZT_VirtualNetworkStatus _status() const;
void _externalConfig(ZT_VirtualNetworkConfig *ec) const; // assumes _lock is locked
bool _isAllowed(const SharedPtr<Peer> &peer) const;
bool _tryAnnounceMulticastGroupsTo(const std::vector<Address> &rootAddresses,const std::vector<MulticastGroup> &allMulticastGroups,const SharedPtr<Peer> &peer,uint64_t now) const;
void _announceMulticastGroups();
void _announceMulticastGroupsTo(const Address &peerAddress,const std::vector<MulticastGroup> &allMulticastGroups) const;
void _announceMulticastGroupsTo(const SharedPtr<Peer> &peer,const std::vector<MulticastGroup> &allMulticastGroups) const;
std::vector<MulticastGroup> _allMulticastGroups() const;
const RuntimeEnvironment *RR;

View File

@ -191,7 +191,7 @@ public:
// If this is a world root, pick (if possible) both an IPv4 and an IPv6 stable endpoint to use if link isn't currently alive.
for(std::vector<World::Root>::const_iterator r(_world.roots().begin());r!=_world.roots().end();++r) {
if (r->identity.address() == p->address()) {
if (r->identity == p->identity()) {
upstream = true;
for(unsigned long k=0,ptr=(unsigned long)RR->node->prng();k<(unsigned long)r->stableEndpoints.size();++k) {
const InetAddress &addr = r->stableEndpoints[ptr++ % r->stableEndpoints.size()];

View File

@ -19,11 +19,25 @@
#include "Path.hpp"
#include "RuntimeEnvironment.hpp"
#include "Node.hpp"
//#include "Topology.hpp"
namespace ZeroTier {
bool Path::send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
{
/*
if (len > 13) {
Address zta(reinterpret_cast<const uint8_t *>(data)+8,5);
if ((zta.toInt() == 0x9d219039f3ULL)||(zta.toInt() == 0x8841408a2eULL)) {
printf(">> %s@%s %u ",zta.toString().c_str(),address().toString().c_str(),len);
Packet pcopy(data,len);
SharedPtr<Peer> rp(RR->topology->getPeer(zta));
if (pcopy.dearmor(rp->key())) {
printf("%s\n",Packet::verbString(pcopy.verb()));
} else printf("!!!!\n");
}
}
*/
if (RR->node->putPacket(_localAddress,address(),data,len)) {
sent(now);
return true;

View File

@ -28,6 +28,9 @@
#include "Constants.hpp"
#include "InetAddress.hpp"
// Note: if you change these flags check the logic below. Some of it depends
// on these bits being what they are.
/**
* Flag indicating that this path is suboptimal
*
@ -133,9 +136,8 @@ public:
* @return True if this path appears active
*/
inline bool active(uint64_t now) const
throw()
{
return (((now - _lastReceived) < ZT_PATH_ACTIVITY_TIMEOUT)&&(_probation < ZT_PEER_DEAD_PATH_DETECTION_MAX_PROBATION));
return ( ((now - _lastReceived) < ZT_PATH_ACTIVITY_TIMEOUT) && (_probation < ZT_PEER_DEAD_PATH_DETECTION_MAX_PROBATION) );
}
/**
@ -221,20 +223,27 @@ public:
}
/**
* @return This path's overall score (higher == better)
* @return This path's overall quality score (higher is better)
*/
inline uint64_t score() const throw()
{
/* We compute the score based on the "freshness" of the path (when we last
* received something) scaled/corrected by the preference rank within the
* ping keepalive window. That way higher ranking paths are preferred but
* not to the point of overriding timeouts and choosing potentially dead
* paths. Finally we increase the score for known to be cluster optimal
* paths and decrease it for paths known to be suboptimal. */
uint64_t score = _lastReceived + ZT_PEER_DIRECT_PING_DELAY; // make sure it's never less than ZT_PEER_DIRECT_PING_DELAY to prevent integer underflow
// This is a little bit convoluted because we try to be branch-free, using multiplication instead of branches for boolean flags
// Start with the last time this path was active, and add a fudge factor to prevent integer underflow if _lastReceived is 0
uint64_t score = _lastReceived + (ZT_PEER_DIRECT_PING_DELAY * (ZT_PEER_DEAD_PATH_DETECTION_MAX_PROBATION + 1));
// Increase score based on path preference rank, which is based on IP scope and address family
score += preferenceRank() * (ZT_PEER_DIRECT_PING_DELAY / ZT_PATH_MAX_PREFERENCE_RANK);
// Increase score if this is known to be an optimal path to a cluster
score += (uint64_t)(_flags & ZT_PATH_FLAG_CLUSTER_OPTIMAL) * (ZT_PEER_DIRECT_PING_DELAY / 2); // /2 because CLUSTER_OPTIMAL is flag 0x0002
// Decrease score if this is known to be a sub-optimal path to a cluster
score -= (uint64_t)(_flags & ZT_PATH_FLAG_CLUSTER_SUBOPTIMAL) * ZT_PEER_DIRECT_PING_DELAY;
// Penalize for missed ECHO tests in dead path detection
score -= (uint64_t)((ZT_PEER_DIRECT_PING_DELAY / 2) * _probation);
return score;
}

View File

@ -76,7 +76,7 @@ void Peer::received(
// Note: findBetterEndpoint() is first since we still want to check
// for a better endpoint even if we don't actually send a redirect.
InetAddress redirectTo;
if ( (RR->cluster->findBetterEndpoint(redirectTo,_id.address(),remoteAddr,false)) && (verb != Packet::VERB_OK)&&(verb != Packet::VERB_ERROR)&&(verb != Packet::VERB_RENDEZVOUS)&&(verb != Packet::VERB_PUSH_DIRECT_PATHS) ) {
if ( (verb != Packet::VERB_OK) && (verb != Packet::VERB_ERROR) && (verb != Packet::VERB_RENDEZVOUS) && (verb != Packet::VERB_PUSH_DIRECT_PATHS) && (RR->cluster->findBetterEndpoint(redirectTo,_id.address(),remoteAddr,false)) ) {
if (_vProto >= 5) {
// For newer peers we can send a more idiomatic verb: PUSH_DIRECT_PATHS.
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS);
@ -243,7 +243,7 @@ bool Peer::doPingAndKeepalive(uint64_t now,int inetAddressFamily)
return false;
}
bool Peer::pushDirectPaths(const InetAddress &localAddr,const InetAddress &toAddress,uint64_t now,bool force)
bool Peer::pushDirectPaths(const InetAddress &localAddr,const InetAddress &toAddress,uint64_t now,bool force,bool includePrivatePaths)
{
#ifdef ZT_ENABLE_CLUSTER
// Cluster mode disables normal PUSH_DIRECT_PATHS in favor of cluster-based peer redirection
@ -257,38 +257,45 @@ bool Peer::pushDirectPaths(const InetAddress &localAddr,const InetAddress &toAdd
else _lastDirectPathPushSent = now;
}
std::vector<InetAddress> pathsToPush;
std::vector<InetAddress> dps(RR->node->directPaths());
for(std::vector<InetAddress>::const_iterator i(dps.begin());i!=dps.end();++i) {
if ((includePrivatePaths)||(i->ipScope() == InetAddress::IP_SCOPE_GLOBAL))
pathsToPush.push_back(*i);
}
std::vector<InetAddress> sym(RR->sa->getSymmetricNatPredictions());
for(unsigned long i=0,added=0;i<sym.size();++i) {
InetAddress tmp(sym[(unsigned long)RR->node->prng() % sym.size()]);
if (std::find(dps.begin(),dps.end(),tmp) == dps.end()) {
dps.push_back(tmp);
if (std::find(pathsToPush.begin(),pathsToPush.end(),tmp) == pathsToPush.end()) {
pathsToPush.push_back(tmp);
if (++added >= ZT_PUSH_DIRECT_PATHS_MAX_PER_SCOPE_AND_FAMILY)
break;
}
}
if (dps.empty())
if (pathsToPush.empty())
return false;
#ifdef ZT_TRACE
{
std::string ps;
for(std::vector<InetAddress>::const_iterator p(dps.begin());p!=dps.end();++p) {
for(std::vector<InetAddress>::const_iterator p(pathsToPush.begin());p!=pathsToPush.end();++p) {
if (ps.length() > 0)
ps.push_back(',');
ps.append(p->toString());
}
TRACE("pushing %u direct paths to %s: %s",(unsigned int)dps.size(),_id.address().toString().c_str(),ps.c_str());
TRACE("pushing %u direct paths to %s: %s",(unsigned int)pathsToPush.size(),_id.address().toString().c_str(),ps.c_str());
}
#endif
std::vector<InetAddress>::const_iterator p(dps.begin());
while (p != dps.end()) {
std::vector<InetAddress>::const_iterator p(pathsToPush.begin());
while (p != pathsToPush.end()) {
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS);
outp.addSize(2); // leave room for count
unsigned int count = 0;
while ((p != dps.end())&&((outp.size() + 24) < 1200)) {
while ((p != pathsToPush.end())&&((outp.size() + 24) < 1200)) {
uint8_t addressType = 4;
switch(p->ss_family) {
case AF_INET:
@ -475,11 +482,8 @@ void Peer::clean(uint64_t now)
}
}
bool Peer::_checkPath(Path &p,const uint64_t now)
void Peer::_doDeadPathDetection(Path &p,const uint64_t now)
{
if (!p.active(now))
return false;
/* Dead path detection: if we have sent something to this peer and have not
* yet received a reply, double check this path. The majority of outbound
* packets including Ethernet frames do generate some kind of reply either
@ -499,6 +503,7 @@ bool Peer::_checkPath(Path &p,const uint64_t now)
(p.lastSend() > p.lastReceived()) &&
((p.lastSend() - p.lastReceived()) >= ZT_PEER_DEAD_PATH_DETECTION_NO_ANSWER_TIMEOUT) &&
((now - p.lastPing()) >= ZT_PEER_DEAD_PATH_DETECTION_NO_ANSWER_TIMEOUT) &&
(!p.isClusterSuboptimal()) &&
(!RR->topology->amRoot())
) {
TRACE("%s(%s) does not seem to be answering in a timely manner, checking if dead (probation == %u)",_id.address().toString().c_str(),p.address().toString().c_str(),p.probation());
@ -516,8 +521,6 @@ bool Peer::_checkPath(Path &p,const uint64_t now)
p.increaseProbation();
}
return true;
}
Path *Peer::_getBestPath(const uint64_t now)
@ -526,11 +529,13 @@ Path *Peer::_getBestPath(const uint64_t now)
uint64_t bestPathScore = 0;
for(unsigned int i=0;i<_numPaths;++i) {
const uint64_t score = _paths[i].score();
if ((score >= bestPathScore)&&(_checkPath(_paths[i],now))) {
if ((score >= bestPathScore)&&(_paths[i].active(now))) {
bestPathScore = score;
bestPath = &(_paths[i]);
}
}
if (bestPath)
_doDeadPathDetection(*bestPath,now);
return bestPath;
}
@ -540,11 +545,13 @@ Path *Peer::_getBestPath(const uint64_t now,int inetAddressFamily)
uint64_t bestPathScore = 0;
for(unsigned int i=0;i<_numPaths;++i) {
const uint64_t score = _paths[i].score();
if (((int)_paths[i].address().ss_family == inetAddressFamily)&&(score >= bestPathScore)&&(_checkPath(_paths[i],now))) {
if (((int)_paths[i].address().ss_family == inetAddressFamily)&&(score >= bestPathScore)&&(_paths[i].active(now))) {
bestPathScore = score;
bestPath = &(_paths[i]);
}
}
if (bestPath)
_doDeadPathDetection(*bestPath,now);
return bestPath;
}

View File

@ -204,9 +204,10 @@ public:
* @param toAddress Remote address to send push to (usually from path)
* @param now Current time
* @param force If true, push regardless of rate limit
* @param includePrivatePaths If true, include local interface address paths (should only be done to peers with a trust relationship)
* @return True if something was actually sent
*/
bool pushDirectPaths(const InetAddress &localAddr,const InetAddress &toAddress,uint64_t now,bool force);
bool pushDirectPaths(const InetAddress &localAddr,const InetAddress &toAddress,uint64_t now,bool force,bool includePrivatePaths);
/**
* @return All known direct paths to this peer (active or inactive)
@ -560,7 +561,7 @@ public:
}
private:
bool _checkPath(Path &p,const uint64_t now);
void _doDeadPathDetection(Path &p,const uint64_t now);
Path *_getBestPath(const uint64_t now);
Path *_getBestPath(const uint64_t now,int inetAddressFamily);

View File

@ -229,7 +229,6 @@ void Switch::onRemotePacket(const InetAddress &localAddr,const InetAddress &from
return;
}
#endif
relayTo = RR->topology->getBestRoot(&source,1,true);
if (relayTo)
relayTo->send(packet.data(),packet.size(),now);
@ -681,7 +680,7 @@ unsigned long Switch::doTimerTasks(uint64_t now)
Mutex::Lock _l(_contactQueue_m);
for(std::list<ContactQueueEntry>::iterator qi(_contactQueue.begin());qi!=_contactQueue.end();) {
if (now >= qi->fireAtTime) {
if (!qi->peer->pushDirectPaths(qi->localAddr,qi->inaddr,now,true))
if (!qi->peer->pushDirectPaths(qi->localAddr,qi->inaddr,now,true,false))
qi->peer->sendHELLO(qi->localAddr,qi->inaddr,now);
_contactQueue.erase(qi++);
continue;
@ -790,38 +789,38 @@ bool Switch::_trySend(const Packet &packet,bool encrypt,uint64_t nwid)
return false; // we probably just left this network, let its packets die
}
Path *viaPath = peer->getBestPath(now);
SharedPtr<Peer> relay;
// Check for a network preferred relay
Path *viaPath = peer->getBestPath(now);
if ((!viaPath)&&(network)) {
unsigned int bestq = ~((unsigned int)0); // max unsigned int since quality is lower==better
for(unsigned int ri=0;ri<network->config().staticDeviceCount();++ri) {
const ZT_VirtualNetworkStaticDevice &r = network->config().staticDevice(ri);
if ((r.address != peer->address().toInt())&&((r.flags & ZT_NETWORK_STATIC_DEVICE_IS_RELAY) != 0)) {
SharedPtr<Peer> rp(RR->topology->getPeer(Address(r.address)));
if (rp) {
const unsigned int q = rp->relayQuality(now);
if (q < bestq) {
bestq = q;
rp.swap(relay);
if (!viaPath) {
if (network) {
unsigned int bestq = ~((unsigned int)0); // max unsigned int since quality is lower==better
for(unsigned int ri=0;ri<network->config().staticDeviceCount();++ri) {
const ZT_VirtualNetworkStaticDevice &r = network->config().staticDevice(ri);
if ((r.address != peer->address().toInt())&&((r.flags & ZT_NETWORK_STATIC_DEVICE_IS_RELAY) != 0)) {
SharedPtr<Peer> rp(RR->topology->getPeer(Address(r.address)));
if (rp) {
const unsigned int q = rp->relayQuality(now);
if (q < bestq) {
bestq = q;
rp.swap(relay);
}
}
}
}
}
if (!relay)
relay = RR->topology->getBestRoot();
if ( (!relay) || (!(viaPath = relay->getBestPath(now))) )
return false;
}
// viaPath will not be null if we make it here
// Otherwise relay off a root server
if (!relay)
relay = RR->topology->getBestRoot();
// No relay or relay has no active paths == :P~~~~
if ( (!(relay)) || (!(viaPath = relay->getBestPath(now))) )
return false;
if ((network)&&(relay)&&(network->isAllowed(peer))) {
// Push hints for direct connectivity to this peer if we are relaying
peer->pushDirectPaths(viaPath->localAddress(),viaPath->address(),now,false);
// Push possible direct paths to us if we are relaying
if (relay) {
peer->pushDirectPaths(viaPath->localAddress(),viaPath->address(),now,false,( (network)&&(network->isAllowed(peer)) ));
viaPath->sent(now);
}

View File

@ -48,8 +48,6 @@ public:
char name[256];
InetAddress clusterEndpoint;
std::vector<InetAddress> zeroTierEndpoints;
//inline operator<(const MemberDefinition &md) const { return (id < md.id); } // sort order
};
/**