2013-07-04 20:56:19 +00:00
|
|
|
/*
|
2015-02-17 21:11:34 +00:00
|
|
|
* ZeroTier One - Network Virtualization Everywhere
|
2019-01-14 18:25:53 +00:00
|
|
|
* Copyright (C) 2011-2019 ZeroTier, Inc. https://www.zerotier.com/
|
2013-07-04 20:56:19 +00:00
|
|
|
*
|
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation, either version 3 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2019-01-14 18:25:53 +00:00
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2017-04-28 03:47:25 +00:00
|
|
|
*
|
|
|
|
* --
|
|
|
|
*
|
|
|
|
* You can be released from the requirements of the license by purchasing
|
|
|
|
* a commercial license. Buying such a license is mandatory as soon as you
|
|
|
|
* develop commercial closed-source software that incorporates or links
|
|
|
|
* directly against ZeroTier software without disclosing the source code
|
|
|
|
* of your own application.
|
2013-07-04 20:56:19 +00:00
|
|
|
*/
|
|
|
|
|
2014-08-05 21:05:50 +00:00
|
|
|
#include "Constants.hpp"
|
2013-07-04 20:56:19 +00:00
|
|
|
#include "Peer.hpp"
|
2015-04-03 00:54:56 +00:00
|
|
|
#include "Node.hpp"
|
2013-10-01 20:01:36 +00:00
|
|
|
#include "Switch.hpp"
|
2014-09-30 15:38:03 +00:00
|
|
|
#include "Network.hpp"
|
2015-07-28 00:02:43 +00:00
|
|
|
#include "SelfAwareness.hpp"
|
2015-10-27 16:36:48 +00:00
|
|
|
#include "Packet.hpp"
|
2017-07-07 23:58:05 +00:00
|
|
|
#include "Trace.hpp"
|
2017-10-02 22:52:57 +00:00
|
|
|
#include "InetAddress.hpp"
|
2018-05-01 23:32:15 +00:00
|
|
|
#include "RingBuffer.hpp"
|
2018-05-31 00:45:29 +00:00
|
|
|
#include "Utils.hpp"
|
2013-07-04 20:56:19 +00:00
|
|
|
|
|
|
|
namespace ZeroTier {
|
|
|
|
|
2019-06-25 20:42:20 +00:00
|
|
|
static unsigned char s_freeRandomByteCounter = 0;
|
|
|
|
|
2016-01-06 00:41:54 +00:00
|
|
|
Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Identity &peerIdentity) :
|
2017-02-04 21:17:00 +00:00
|
|
|
RR(renv),
|
2014-06-30 18:31:04 +00:00
|
|
|
_lastReceive(0),
|
2016-11-10 00:04:08 +00:00
|
|
|
_lastNontrivialReceive(0),
|
2016-11-22 22:23:13 +00:00
|
|
|
_lastTriedMemorizedPath(0),
|
2015-10-16 17:28:09 +00:00
|
|
|
_lastDirectPathPushSent(0),
|
2015-10-28 01:18:26 +00:00
|
|
|
_lastDirectPathPushReceive(0),
|
2016-09-09 18:36:10 +00:00
|
|
|
_lastCredentialRequestSent(0),
|
|
|
|
_lastWhoisRequestReceived(0),
|
|
|
|
_lastEchoRequestReceived(0),
|
2016-09-13 17:13:23 +00:00
|
|
|
_lastCredentialsReceived(0),
|
2016-09-13 21:27:18 +00:00
|
|
|
_lastTrustEstablishedPacketReceived(0),
|
2017-10-25 19:42:14 +00:00
|
|
|
_lastSentFullHello(0),
|
2018-06-12 22:24:12 +00:00
|
|
|
_lastACKWindowReset(0),
|
|
|
|
_lastQoSWindowReset(0),
|
2018-06-22 23:30:20 +00:00
|
|
|
_lastMultipathCompatibilityCheck(0),
|
2019-06-25 20:42:20 +00:00
|
|
|
_freeRandomByte((unsigned char)((uintptr_t)this >> 4) ^ ++s_freeRandomByteCounter),
|
2018-06-22 23:30:20 +00:00
|
|
|
_uniqueAlivePathCount(0),
|
|
|
|
_localMultipathSupported(false),
|
|
|
|
_remoteMultipathSupported(false),
|
|
|
|
_canUseMultipath(false),
|
2015-10-16 17:58:59 +00:00
|
|
|
_vProto(0),
|
2013-07-12 02:06:25 +00:00
|
|
|
_vMajor(0),
|
|
|
|
_vMinor(0),
|
2014-02-03 18:46:37 +00:00
|
|
|
_vRevision(0),
|
2015-04-03 00:54:56 +00:00
|
|
|
_id(peerIdentity),
|
2016-09-13 17:13:23 +00:00
|
|
|
_directPathPushCutoffCount(0),
|
2018-05-01 23:32:15 +00:00
|
|
|
_credentialsCutoffCount(0),
|
2018-05-31 00:45:29 +00:00
|
|
|
_linkIsBalanced(false),
|
|
|
|
_linkIsRedundant(false),
|
2018-06-02 01:03:59 +00:00
|
|
|
_remotePeerMultipathEnabled(false),
|
2018-06-12 23:30:46 +00:00
|
|
|
_lastAggregateStatsReport(0),
|
|
|
|
_lastAggregateAllocation(0)
|
2013-07-04 20:56:19 +00:00
|
|
|
{
|
2013-10-05 14:19:12 +00:00
|
|
|
if (!myIdentity.agree(peerIdentity,_key,ZT_PEER_SECRET_KEY_LENGTH))
|
2017-07-17 21:21:09 +00:00
|
|
|
throw ZT_EXCEPTION_INVALID_ARGUMENT;
|
2013-07-04 20:56:19 +00:00
|
|
|
}
|
|
|
|
|
2014-10-21 17:42:04 +00:00
|
|
|
void Peer::received(
|
2017-03-28 00:03:17 +00:00
|
|
|
void *tPtr,
|
2016-09-02 18:51:33 +00:00
|
|
|
const SharedPtr<Path> &path,
|
2016-09-07 22:15:52 +00:00
|
|
|
const unsigned int hops,
|
|
|
|
const uint64_t packetId,
|
2018-05-31 00:45:29 +00:00
|
|
|
const unsigned int payloadLength,
|
2016-09-07 22:15:52 +00:00
|
|
|
const Packet::Verb verb,
|
|
|
|
const uint64_t inRePacketId,
|
|
|
|
const Packet::Verb inReVerb,
|
2017-07-13 17:51:05 +00:00
|
|
|
const bool trustEstablished,
|
|
|
|
const uint64_t networkId)
|
2013-07-04 20:56:19 +00:00
|
|
|
{
|
2017-10-02 22:52:57 +00:00
|
|
|
const int64_t now = RR->node->now();
|
2016-09-02 18:51:33 +00:00
|
|
|
|
2016-01-12 20:12:25 +00:00
|
|
|
_lastReceive = now;
|
2016-11-10 00:04:08 +00:00
|
|
|
switch (verb) {
|
|
|
|
case Packet::VERB_FRAME:
|
|
|
|
case Packet::VERB_EXT_FRAME:
|
|
|
|
case Packet::VERB_NETWORK_CONFIG_REQUEST:
|
|
|
|
case Packet::VERB_NETWORK_CONFIG:
|
|
|
|
case Packet::VERB_MULTICAST_FRAME:
|
|
|
|
_lastNontrivialReceive = now;
|
|
|
|
break;
|
2019-06-25 20:42:20 +00:00
|
|
|
default:
|
|
|
|
break;
|
2016-11-10 00:04:08 +00:00
|
|
|
}
|
2016-01-12 20:12:25 +00:00
|
|
|
|
2016-09-13 21:27:18 +00:00
|
|
|
if (trustEstablished) {
|
|
|
|
_lastTrustEstablishedPacketReceived = now;
|
|
|
|
path->trustedPacketReceived(now);
|
|
|
|
}
|
|
|
|
|
2018-05-02 22:24:14 +00:00
|
|
|
{
|
|
|
|
Mutex::Lock _l(_paths_m);
|
2018-06-02 01:03:59 +00:00
|
|
|
|
|
|
|
recordIncomingPacket(tPtr, path, packetId, payloadLength, verb, now);
|
|
|
|
|
2018-06-22 23:30:20 +00:00
|
|
|
if (_canUseMultipath) {
|
2018-05-31 00:45:29 +00:00
|
|
|
if (path->needsToSendQoS(now)) {
|
|
|
|
sendQOS_MEASUREMENT(tPtr, path, path->localSocket(), path->address(), now);
|
2018-05-02 22:24:14 +00:00
|
|
|
}
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
2018-06-14 23:34:45 +00:00
|
|
|
_paths[i].p->processBackgroundPathMeasurements(now);
|
2018-05-02 22:24:14 +00:00
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-02 00:33:34 +00:00
|
|
|
if (hops == 0) {
|
2017-07-06 17:25:36 +00:00
|
|
|
// If this is a direct packet (no hops), update existing paths or learn new ones
|
2017-10-25 19:42:14 +00:00
|
|
|
bool havePath = false;
|
2017-10-25 22:44:10 +00:00
|
|
|
{
|
|
|
|
Mutex::Lock _l(_paths_m);
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
|
|
|
if (_paths[i].p == path) {
|
|
|
|
_paths[i].lr = now;
|
|
|
|
havePath = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else break;
|
2017-04-17 16:14:21 +00:00
|
|
|
}
|
2017-10-25 19:42:14 +00:00
|
|
|
}
|
2017-07-07 23:58:05 +00:00
|
|
|
|
2017-11-08 19:06:14 +00:00
|
|
|
bool attemptToContact = false;
|
2017-10-25 19:42:14 +00:00
|
|
|
if ((!havePath)&&(RR->node->shouldUsePathForZeroTierTraffic(tPtr,_id.address(),path->localSocket(),path->address()))) {
|
2017-10-25 22:44:10 +00:00
|
|
|
Mutex::Lock _l(_paths_m);
|
|
|
|
|
2018-06-08 00:25:27 +00:00
|
|
|
// Paths are redundant if they duplicate an alive path to the same IP or
|
2017-10-25 22:44:10 +00:00
|
|
|
// with the same local socket and address family.
|
|
|
|
bool redundant = false;
|
2018-06-22 23:30:20 +00:00
|
|
|
unsigned int replacePath = ZT_MAX_PEER_NETWORK_PATHS;
|
2017-10-25 22:44:10 +00:00
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
2018-06-22 23:30:20 +00:00
|
|
|
if ( (_paths[i].p->alive(now)) && ( ((_paths[i].p->localSocket() == path->localSocket())&&(_paths[i].p->address().ss_family == path->address().ss_family)) || (_paths[i].p->address().ipsEqual2(path->address())) ) ) {
|
2017-10-25 22:44:10 +00:00
|
|
|
redundant = true;
|
|
|
|
break;
|
|
|
|
}
|
2018-06-22 23:30:20 +00:00
|
|
|
// If the path is the same address and port, simply assume this is a replacement
|
2019-06-12 20:14:14 +00:00
|
|
|
if ( (_paths[i].p->address().ipsEqual2(path->address()))) {
|
2018-06-22 23:30:20 +00:00
|
|
|
replacePath = i;
|
|
|
|
break;
|
|
|
|
}
|
2017-10-25 22:44:10 +00:00
|
|
|
} else break;
|
|
|
|
}
|
2019-03-26 22:23:51 +00:00
|
|
|
|
2018-06-22 23:30:20 +00:00
|
|
|
// If the path isn't a duplicate of the same localSocket AND we haven't already determined a replacePath,
|
|
|
|
// then find the worst path and replace it.
|
|
|
|
if (!redundant && replacePath == ZT_MAX_PEER_NETWORK_PATHS) {
|
2017-10-25 22:44:10 +00:00
|
|
|
int replacePathQuality = 0;
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
|
|
|
const int q = _paths[i].p->quality(now);
|
|
|
|
if (q > replacePathQuality) {
|
|
|
|
replacePathQuality = q;
|
|
|
|
replacePath = i;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
replacePath = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-06-22 23:30:20 +00:00
|
|
|
}
|
2019-03-26 22:23:51 +00:00
|
|
|
|
2018-06-22 23:30:20 +00:00
|
|
|
if (replacePath != ZT_MAX_PEER_NETWORK_PATHS) {
|
|
|
|
if (verb == Packet::VERB_OK) {
|
|
|
|
RR->t->peerLearnedNewPath(tPtr,networkId,*this,path,packetId);
|
|
|
|
_paths[replacePath].lr = now;
|
|
|
|
_paths[replacePath].p = path;
|
|
|
|
_paths[replacePath].priority = 1;
|
|
|
|
} else {
|
|
|
|
attemptToContact = true;
|
2017-10-25 22:44:10 +00:00
|
|
|
}
|
2014-03-21 21:31:10 +00:00
|
|
|
}
|
2014-03-21 03:07:35 +00:00
|
|
|
}
|
2017-11-08 19:06:14 +00:00
|
|
|
|
|
|
|
if (attemptToContact) {
|
2018-01-08 21:06:24 +00:00
|
|
|
attemptToContactAt(tPtr,path->localSocket(),path->address(),now,true);
|
2017-11-08 19:06:14 +00:00
|
|
|
path->sent(now);
|
|
|
|
RR->t->peerConfirmingUnknownPath(tPtr,networkId,*this,path,packetId,verb);
|
|
|
|
}
|
2017-09-15 03:56:50 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 19:42:14 +00:00
|
|
|
// If we have a trust relationship periodically push a message enumerating
|
2019-06-25 20:42:20 +00:00
|
|
|
// all known external addresses for ourselves. If we already have a path this
|
|
|
|
// is done less frequently.
|
2017-10-25 19:42:14 +00:00
|
|
|
if (this->trustEstablished(now)) {
|
2019-06-25 20:42:20 +00:00
|
|
|
const int64_t sinceLastPush = now - _lastDirectPathPushSent;
|
|
|
|
if (sinceLastPush >= ((hops == 0) ? ZT_DIRECT_PATH_PUSH_INTERVAL_HAVEPATH : ZT_DIRECT_PATH_PUSH_INTERVAL)) {
|
2016-09-09 18:36:10 +00:00
|
|
|
_lastDirectPathPushSent = now;
|
2019-03-26 22:23:51 +00:00
|
|
|
std::vector<InetAddress> pathsToPush(RR->node->directPaths());
|
2016-09-09 18:36:10 +00:00
|
|
|
if (pathsToPush.size() > 0) {
|
|
|
|
std::vector<InetAddress>::const_iterator p(pathsToPush.begin());
|
|
|
|
while (p != pathsToPush.end()) {
|
2019-06-25 20:42:20 +00:00
|
|
|
Packet *const outp = new Packet(_id.address(),RR->identity.address(),Packet::VERB_PUSH_DIRECT_PATHS);
|
2019-06-17 21:38:27 +00:00
|
|
|
outp->addSize(2); // leave room for count
|
2016-09-09 18:36:10 +00:00
|
|
|
unsigned int count = 0;
|
2019-06-17 21:38:27 +00:00
|
|
|
while ((p != pathsToPush.end())&&((outp->size() + 24) < 1200)) {
|
2016-09-09 18:36:10 +00:00
|
|
|
uint8_t addressType = 4;
|
|
|
|
switch(p->ss_family) {
|
|
|
|
case AF_INET:
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
addressType = 6;
|
|
|
|
break;
|
|
|
|
default: // we currently only push IP addresses
|
|
|
|
++p;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-06-17 21:38:27 +00:00
|
|
|
outp->append((uint8_t)0); // no flags
|
|
|
|
outp->append((uint16_t)0); // no extensions
|
|
|
|
outp->append(addressType);
|
|
|
|
outp->append((uint8_t)((addressType == 4) ? 6 : 18));
|
|
|
|
outp->append(p->rawIpData(),((addressType == 4) ? 4 : 16));
|
|
|
|
outp->append((uint16_t)p->port());
|
2016-09-09 18:36:10 +00:00
|
|
|
|
|
|
|
++count;
|
|
|
|
++p;
|
|
|
|
}
|
|
|
|
if (count) {
|
2019-06-17 21:38:27 +00:00
|
|
|
outp->setAt(ZT_PACKET_IDX_PAYLOAD,(uint16_t)count);
|
|
|
|
outp->compress();
|
|
|
|
outp->armor(_key,true);
|
|
|
|
path->send(RR,tPtr,outp->data(),outp->size(),now);
|
2016-09-09 18:36:10 +00:00
|
|
|
}
|
2019-06-17 21:38:27 +00:00
|
|
|
delete outp;
|
2016-09-09 18:36:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-01-12 20:12:25 +00:00
|
|
|
}
|
2013-07-04 20:56:19 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 00:45:29 +00:00
|
|
|
void Peer::recordOutgoingPacket(const SharedPtr<Path> &path, const uint64_t packetId,
|
|
|
|
uint16_t payloadLength, const Packet::Verb verb, int64_t now)
|
|
|
|
{
|
2019-06-25 20:42:20 +00:00
|
|
|
_freeRandomByte += (unsigned char)(packetId >> 8); // grab entropy to use in path selection logic for multipath
|
2018-06-22 23:30:20 +00:00
|
|
|
if (_canUseMultipath) {
|
2018-06-02 01:03:59 +00:00
|
|
|
path->recordOutgoingPacket(now, packetId, payloadLength, verb);
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Peer::recordIncomingPacket(void *tPtr, const SharedPtr<Path> &path, const uint64_t packetId,
|
|
|
|
uint16_t payloadLength, const Packet::Verb verb, int64_t now)
|
|
|
|
{
|
2018-06-22 23:30:20 +00:00
|
|
|
if (_canUseMultipath) {
|
2018-05-31 00:45:29 +00:00
|
|
|
if (path->needsToSendAck(now)) {
|
|
|
|
sendACK(tPtr, path, path->localSocket(), path->address(), now);
|
|
|
|
}
|
2018-06-02 01:03:59 +00:00
|
|
|
path->recordIncomingPacket(now, packetId, payloadLength, verb);
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-12 23:30:46 +00:00
|
|
|
void Peer::computeAggregateProportionalAllocation(int64_t now)
|
2018-05-31 00:45:29 +00:00
|
|
|
{
|
|
|
|
float maxStability = 0;
|
|
|
|
float totalRelativeQuality = 0;
|
|
|
|
float maxThroughput = 1;
|
|
|
|
float maxScope = 0;
|
|
|
|
float relStability[ZT_MAX_PEER_NETWORK_PATHS];
|
|
|
|
float relThroughput[ZT_MAX_PEER_NETWORK_PATHS];
|
|
|
|
memset(&relStability, 0, sizeof(relStability));
|
|
|
|
memset(&relThroughput, 0, sizeof(relThroughput));
|
|
|
|
// Survey all paths
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
|
|
|
relStability[i] = _paths[i].p->lastComputedStability();
|
2019-06-13 17:27:54 +00:00
|
|
|
relThroughput[i] = (float)_paths[i].p->maxLifetimeThroughput();
|
2018-05-31 00:45:29 +00:00
|
|
|
maxStability = relStability[i] > maxStability ? relStability[i] : maxStability;
|
|
|
|
maxThroughput = relThroughput[i] > maxThroughput ? relThroughput[i] : maxThroughput;
|
|
|
|
maxScope = _paths[i].p->ipScope() > maxScope ? _paths[i].p->ipScope() : maxScope;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Convert to relative values
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
|
|
|
relStability[i] /= maxStability ? maxStability : 1;
|
|
|
|
relThroughput[i] /= maxThroughput ? maxThroughput : 1;
|
2019-06-13 17:27:54 +00:00
|
|
|
float normalized_ma = Utils::normalize((float)_paths[i].p->ackAge(now), 0, ZT_PATH_MAX_AGE, 0, 10);
|
2018-05-31 00:45:29 +00:00
|
|
|
float age_contrib = exp((-1)*normalized_ma);
|
|
|
|
float relScope = ((float)(_paths[i].p->ipScope()+1) / (maxScope + 1));
|
|
|
|
float relQuality =
|
2019-06-17 20:32:41 +00:00
|
|
|
(relStability[i] * (float)ZT_PATH_CONTRIB_STABILITY)
|
|
|
|
+ (fmaxf(1.0f, relThroughput[i]) * (float)ZT_PATH_CONTRIB_THROUGHPUT)
|
|
|
|
+ relScope * (float)ZT_PATH_CONTRIB_SCOPE;
|
2018-05-31 00:45:29 +00:00
|
|
|
relQuality *= age_contrib;
|
2018-06-22 23:30:20 +00:00
|
|
|
// Arbitrary cutoffs
|
2019-06-17 20:32:41 +00:00
|
|
|
relQuality = relQuality > (1.00f / 100.0f) ? relQuality : 0.0f;
|
|
|
|
relQuality = relQuality < (99.0f / 100.0f) ? relQuality : 1.0f;
|
2018-05-31 00:45:29 +00:00
|
|
|
totalRelativeQuality += relQuality;
|
|
|
|
_paths[i].p->updateRelativeQuality(relQuality);
|
|
|
|
}
|
|
|
|
}
|
2018-06-12 23:30:46 +00:00
|
|
|
// Convert set of relative performances into an allocation set
|
|
|
|
for(uint16_t i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
2019-06-13 17:27:54 +00:00
|
|
|
_paths[i].p->updateComponentAllocationOfAggregateLink((unsigned char)((_paths[i].p->relativeQuality() / totalRelativeQuality) * 255));
|
2018-06-12 23:30:46 +00:00
|
|
|
}
|
|
|
|
}
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
|
2018-06-22 23:30:20 +00:00
|
|
|
int Peer::computeAggregateLinkPacketDelayVariance()
|
2018-05-31 00:45:29 +00:00
|
|
|
{
|
|
|
|
float pdv = 0.0;
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
|
|
|
pdv += _paths[i].p->relativeQuality() * _paths[i].p->packetDelayVariance();
|
|
|
|
}
|
|
|
|
}
|
2019-06-13 17:27:54 +00:00
|
|
|
return (int)pdv;
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
|
2018-06-22 23:30:20 +00:00
|
|
|
int Peer::computeAggregateLinkMeanLatency()
|
2018-05-31 00:45:29 +00:00
|
|
|
{
|
2018-06-22 23:30:20 +00:00
|
|
|
int ml = 0;
|
2018-06-14 23:48:35 +00:00
|
|
|
int pathCount = 0;
|
2018-05-31 00:45:29 +00:00
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
2018-06-14 23:48:35 +00:00
|
|
|
pathCount++;
|
2019-06-13 17:27:54 +00:00
|
|
|
ml += (int)(_paths[i].p->relativeQuality() * _paths[i].p->meanLatency());
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-14 23:48:35 +00:00
|
|
|
return ml / pathCount;
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int Peer::aggregateLinkPhysicalPathCount()
|
|
|
|
{
|
|
|
|
std::map<std::string, bool> ifnamemap;
|
|
|
|
int pathCount = 0;
|
|
|
|
int64_t now = RR->node->now();
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p && _paths[i].p->alive(now)) {
|
|
|
|
if (!ifnamemap[_paths[i].p->getName()]) {
|
|
|
|
ifnamemap[_paths[i].p->getName()] = true;
|
|
|
|
pathCount++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pathCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
int Peer::aggregateLinkLogicalPathCount()
|
|
|
|
{
|
|
|
|
int pathCount = 0;
|
|
|
|
int64_t now = RR->node->now();
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p && _paths[i].p->alive(now)) {
|
|
|
|
pathCount++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pathCount;
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:32:15 +00:00
|
|
|
SharedPtr<Path> Peer::getAppropriatePath(int64_t now, bool includeExpired)
|
2016-09-02 18:51:33 +00:00
|
|
|
{
|
|
|
|
Mutex::Lock _l(_paths_m);
|
2017-10-25 20:27:28 +00:00
|
|
|
unsigned int bestPath = ZT_MAX_PEER_NETWORK_PATHS;
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Send traffic across the highest quality path only. This algorithm will still
|
2018-06-02 01:03:59 +00:00
|
|
|
* use the old path quality metric from protocol version 9.
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-06-22 23:30:20 +00:00
|
|
|
if (!_canUseMultipath) {
|
2018-05-01 23:32:15 +00:00
|
|
|
long bestPathQuality = 2147483647;
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
2018-05-31 00:45:29 +00:00
|
|
|
if (_paths[i].p) {
|
2018-05-01 23:32:15 +00:00
|
|
|
if ((includeExpired)||((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION)) {
|
|
|
|
const long q = _paths[i].p->quality(now) / _paths[i].priority;
|
|
|
|
if (q <= bestPathQuality) {
|
|
|
|
bestPathQuality = q;
|
|
|
|
bestPath = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else break;
|
|
|
|
}
|
|
|
|
if (bestPath != ZT_MAX_PEER_NETWORK_PATHS) {
|
|
|
|
return _paths[bestPath].p;
|
|
|
|
}
|
|
|
|
return SharedPtr<Path>();
|
|
|
|
}
|
|
|
|
|
2017-10-25 20:27:28 +00:00
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
2017-10-25 19:42:14 +00:00
|
|
|
if (_paths[i].p) {
|
2018-06-14 23:34:45 +00:00
|
|
|
_paths[i].p->processBackgroundPathMeasurements(now);
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Randomly distribute traffic across all paths
|
|
|
|
*/
|
|
|
|
int numAlivePaths = 0;
|
|
|
|
int numStalePaths = 0;
|
|
|
|
if (RR->node->getMultipathMode() == ZT_MULTIPATH_RANDOM) {
|
|
|
|
int alivePaths[ZT_MAX_PEER_NETWORK_PATHS];
|
|
|
|
int stalePaths[ZT_MAX_PEER_NETWORK_PATHS];
|
|
|
|
memset(&alivePaths, -1, sizeof(alivePaths));
|
|
|
|
memset(&stalePaths, -1, sizeof(stalePaths));
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
2018-05-31 00:45:29 +00:00
|
|
|
if (_paths[i].p->alive(now)) {
|
|
|
|
alivePaths[numAlivePaths] = i;
|
|
|
|
numAlivePaths++;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
stalePaths[numStalePaths] = i;
|
|
|
|
numStalePaths++;
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-22 23:30:20 +00:00
|
|
|
unsigned int r = _freeRandomByte;
|
2018-05-01 23:32:15 +00:00
|
|
|
if (numAlivePaths > 0) {
|
2018-05-02 18:22:07 +00:00
|
|
|
int rf = r % numAlivePaths;
|
2018-05-01 23:32:15 +00:00
|
|
|
return _paths[alivePaths[rf]].p;
|
|
|
|
}
|
|
|
|
else if(numStalePaths > 0) {
|
2018-06-22 23:30:20 +00:00
|
|
|
// Resort to trying any non-expired path
|
2018-05-02 18:22:07 +00:00
|
|
|
int rf = r % numStalePaths;
|
2018-05-01 23:32:15 +00:00
|
|
|
return _paths[stalePaths[rf]].p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Proportionally allocate traffic according to dynamic path quality measurements
|
|
|
|
*/
|
|
|
|
if (RR->node->getMultipathMode() == ZT_MULTIPATH_PROPORTIONALLY_BALANCED) {
|
2018-06-12 23:30:46 +00:00
|
|
|
if ((now - _lastAggregateAllocation) >= ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
|
|
|
|
_lastAggregateAllocation = now;
|
|
|
|
computeAggregateProportionalAllocation(now);
|
|
|
|
}
|
2018-05-31 00:45:29 +00:00
|
|
|
// Randomly choose path according to their allocations
|
2018-06-22 23:30:20 +00:00
|
|
|
float rf = _freeRandomByte;
|
2018-05-01 23:32:15 +00:00
|
|
|
for(int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
2018-05-31 00:45:29 +00:00
|
|
|
if (_paths[i].p) {
|
2018-06-12 23:30:46 +00:00
|
|
|
if (rf < _paths[i].p->allocation()) {
|
2018-05-01 23:32:15 +00:00
|
|
|
bestPath = i;
|
2019-03-22 21:39:52 +00:00
|
|
|
_pathChoiceHist.push(bestPath); // Record which path we chose
|
2018-05-01 23:32:15 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-06-12 23:30:46 +00:00
|
|
|
rf -= _paths[i].p->allocation();
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (bestPath < ZT_MAX_PEER_NETWORK_PATHS) {
|
|
|
|
return _paths[bestPath].p;
|
|
|
|
}
|
|
|
|
}
|
2018-05-31 00:45:29 +00:00
|
|
|
return SharedPtr<Path>();
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
|
2018-05-31 00:45:29 +00:00
|
|
|
char *Peer::interfaceListStr()
|
|
|
|
{
|
|
|
|
std::map<std::string, int> ifnamemap;
|
|
|
|
char tmp[32];
|
|
|
|
const int64_t now = RR->node->now();
|
|
|
|
char *ptr = _interfaceListStr;
|
|
|
|
bool imbalanced = false;
|
|
|
|
memset(_interfaceListStr, 0, sizeof(_interfaceListStr));
|
|
|
|
int alivePathCount = aggregateLinkLogicalPathCount();
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p && _paths[i].p->alive(now)) {
|
|
|
|
int ipv = _paths[i].p->address().isV4();
|
|
|
|
// If this is acting as an aggregate link, check allocations
|
2019-06-17 20:32:41 +00:00
|
|
|
float targetAllocation = 1.0f / (float)alivePathCount;
|
|
|
|
float currentAllocation = 1.0f;
|
2018-05-31 00:45:29 +00:00
|
|
|
if (alivePathCount > 1) {
|
2019-03-22 21:39:52 +00:00
|
|
|
currentAllocation = (float)_pathChoiceHist.countValue(i) / (float)_pathChoiceHist.count();
|
2018-05-31 00:45:29 +00:00
|
|
|
if (fabs(targetAllocation - currentAllocation) > ZT_PATH_IMBALANCE_THRESHOLD) {
|
|
|
|
imbalanced = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
char *ipvStr = ipv ? (char*)"ipv4" : (char*)"ipv6";
|
2018-06-02 01:03:59 +00:00
|
|
|
sprintf(tmp, "(%s, %s, %.3f)", _paths[i].p->getName(), ipvStr, currentAllocation);
|
2018-05-31 00:45:29 +00:00
|
|
|
// Prevent duplicates
|
|
|
|
if(ifnamemap[_paths[i].p->getName()] != ipv) {
|
|
|
|
memcpy(ptr, tmp, strlen(tmp));
|
|
|
|
ptr += strlen(tmp);
|
|
|
|
*ptr = ' ';
|
|
|
|
ptr++;
|
|
|
|
ifnamemap[_paths[i].p->getName()] = ipv;
|
|
|
|
}
|
|
|
|
}
|
2016-09-02 18:51:33 +00:00
|
|
|
}
|
2018-05-31 00:45:29 +00:00
|
|
|
ptr--; // Overwrite trailing space
|
|
|
|
if (imbalanced) {
|
2018-06-02 01:03:59 +00:00
|
|
|
sprintf(tmp, ", is asymmetrical");
|
2018-05-31 00:45:29 +00:00
|
|
|
memcpy(ptr, tmp, sizeof(tmp));
|
|
|
|
} else {
|
|
|
|
*ptr = '\0';
|
|
|
|
}
|
|
|
|
return _interfaceListStr;
|
2016-09-02 18:51:33 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 19:42:14 +00:00
|
|
|
void Peer::introduce(void *const tPtr,const int64_t now,const SharedPtr<Peer> &other) const
|
2016-09-02 18:51:33 +00:00
|
|
|
{
|
2017-10-25 19:42:14 +00:00
|
|
|
unsigned int myBestV4ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
|
|
|
|
unsigned int myBestV6ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
|
2017-10-25 22:44:10 +00:00
|
|
|
long myBestV4QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
|
|
|
|
long myBestV6QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
|
2017-10-25 19:42:14 +00:00
|
|
|
unsigned int theirBestV4ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
|
|
|
|
unsigned int theirBestV6ByScope[ZT_INETADDRESS_MAX_SCOPE+1];
|
2017-10-25 22:44:10 +00:00
|
|
|
long theirBestV4QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
|
|
|
|
long theirBestV6QualityByScope[ZT_INETADDRESS_MAX_SCOPE+1];
|
2017-10-25 19:42:14 +00:00
|
|
|
for(int i=0;i<=ZT_INETADDRESS_MAX_SCOPE;++i) {
|
2017-10-25 20:27:28 +00:00
|
|
|
myBestV4ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
|
|
|
|
myBestV6ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
|
2017-10-25 19:42:14 +00:00
|
|
|
myBestV4QualityByScope[i] = 2147483647;
|
|
|
|
myBestV6QualityByScope[i] = 2147483647;
|
2017-10-25 20:27:28 +00:00
|
|
|
theirBestV4ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
|
|
|
|
theirBestV6ByScope[i] = ZT_MAX_PEER_NETWORK_PATHS;
|
2017-10-25 19:42:14 +00:00
|
|
|
theirBestV4QualityByScope[i] = 2147483647;
|
|
|
|
theirBestV6QualityByScope[i] = 2147483647;
|
|
|
|
}
|
2016-09-02 18:51:33 +00:00
|
|
|
|
2017-10-25 19:42:14 +00:00
|
|
|
Mutex::Lock _l1(_paths_m);
|
|
|
|
|
2017-10-25 20:27:28 +00:00
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
2017-10-25 19:42:14 +00:00
|
|
|
if (_paths[i].p) {
|
2017-10-25 22:44:10 +00:00
|
|
|
const long q = _paths[i].p->quality(now) / _paths[i].priority;
|
2017-10-25 19:42:14 +00:00
|
|
|
const unsigned int s = (unsigned int)_paths[i].p->ipScope();
|
|
|
|
switch(_paths[i].p->address().ss_family) {
|
|
|
|
case AF_INET:
|
2017-10-25 20:27:28 +00:00
|
|
|
if (q <= myBestV4QualityByScope[s]) {
|
2017-10-25 19:42:14 +00:00
|
|
|
myBestV4QualityByScope[s] = q;
|
|
|
|
myBestV4ByScope[s] = i;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2017-10-25 20:27:28 +00:00
|
|
|
if (q <= myBestV6QualityByScope[s]) {
|
2017-10-25 19:42:14 +00:00
|
|
|
myBestV6QualityByScope[s] = q;
|
|
|
|
myBestV6ByScope[s] = i;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else break;
|
2017-10-02 22:52:57 +00:00
|
|
|
}
|
2017-10-25 19:42:14 +00:00
|
|
|
|
|
|
|
Mutex::Lock _l2(other->_paths_m);
|
|
|
|
|
2017-10-25 20:27:28 +00:00
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
2017-10-25 19:42:14 +00:00
|
|
|
if (other->_paths[i].p) {
|
2017-10-25 22:44:10 +00:00
|
|
|
const long q = other->_paths[i].p->quality(now) / other->_paths[i].priority;
|
2017-10-25 19:42:14 +00:00
|
|
|
const unsigned int s = (unsigned int)other->_paths[i].p->ipScope();
|
|
|
|
switch(other->_paths[i].p->address().ss_family) {
|
|
|
|
case AF_INET:
|
2017-10-25 20:27:28 +00:00
|
|
|
if (q <= theirBestV4QualityByScope[s]) {
|
2017-10-25 19:42:14 +00:00
|
|
|
theirBestV4QualityByScope[s] = q;
|
|
|
|
theirBestV4ByScope[s] = i;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2017-10-25 20:27:28 +00:00
|
|
|
if (q <= theirBestV6QualityByScope[s]) {
|
2017-10-25 19:42:14 +00:00
|
|
|
theirBestV6QualityByScope[s] = q;
|
|
|
|
theirBestV6ByScope[s] = i;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else break;
|
2017-10-02 22:52:57 +00:00
|
|
|
}
|
2017-04-15 00:53:32 +00:00
|
|
|
|
2017-10-25 20:27:28 +00:00
|
|
|
unsigned int mine = ZT_MAX_PEER_NETWORK_PATHS;
|
|
|
|
unsigned int theirs = ZT_MAX_PEER_NETWORK_PATHS;
|
2017-10-25 19:42:14 +00:00
|
|
|
|
|
|
|
for(int s=ZT_INETADDRESS_MAX_SCOPE;s>=0;--s) {
|
2017-10-25 20:27:28 +00:00
|
|
|
if ((myBestV6ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)&&(theirBestV6ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)) {
|
2017-10-25 19:42:14 +00:00
|
|
|
mine = myBestV6ByScope[s];
|
|
|
|
theirs = theirBestV6ByScope[s];
|
|
|
|
break;
|
|
|
|
}
|
2017-10-25 20:27:28 +00:00
|
|
|
if ((myBestV4ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)&&(theirBestV4ByScope[s] != ZT_MAX_PEER_NETWORK_PATHS)) {
|
2017-10-25 19:42:14 +00:00
|
|
|
mine = myBestV4ByScope[s];
|
|
|
|
theirs = theirBestV4ByScope[s];
|
|
|
|
break;
|
|
|
|
}
|
2016-09-02 18:51:33 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 20:27:28 +00:00
|
|
|
if (mine != ZT_MAX_PEER_NETWORK_PATHS) {
|
2017-10-25 19:42:14 +00:00
|
|
|
unsigned int alt = (unsigned int)RR->node->prng() & 1; // randomize which hint we send first for black magickal NAT-t reasons
|
|
|
|
const unsigned int completed = alt + 2;
|
|
|
|
while (alt != completed) {
|
|
|
|
if ((alt & 1) == 0) {
|
|
|
|
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_RENDEZVOUS);
|
|
|
|
outp.append((uint8_t)0);
|
|
|
|
other->_id.address().appendTo(outp);
|
|
|
|
outp.append((uint16_t)other->_paths[theirs].p->address().port());
|
|
|
|
if (other->_paths[theirs].p->address().ss_family == AF_INET6) {
|
|
|
|
outp.append((uint8_t)16);
|
|
|
|
outp.append(other->_paths[theirs].p->address().rawIpData(),16);
|
|
|
|
} else {
|
|
|
|
outp.append((uint8_t)4);
|
|
|
|
outp.append(other->_paths[theirs].p->address().rawIpData(),4);
|
|
|
|
}
|
2018-01-08 21:06:24 +00:00
|
|
|
outp.armor(_key,true);
|
2017-10-25 19:42:14 +00:00
|
|
|
_paths[mine].p->send(RR,tPtr,outp.data(),outp.size(),now);
|
|
|
|
} else {
|
|
|
|
Packet outp(other->_id.address(),RR->identity.address(),Packet::VERB_RENDEZVOUS);
|
|
|
|
outp.append((uint8_t)0);
|
|
|
|
_id.address().appendTo(outp);
|
|
|
|
outp.append((uint16_t)_paths[mine].p->address().port());
|
|
|
|
if (_paths[mine].p->address().ss_family == AF_INET6) {
|
|
|
|
outp.append((uint8_t)16);
|
|
|
|
outp.append(_paths[mine].p->address().rawIpData(),16);
|
|
|
|
} else {
|
|
|
|
outp.append((uint8_t)4);
|
|
|
|
outp.append(_paths[mine].p->address().rawIpData(),4);
|
|
|
|
}
|
2018-01-08 21:06:24 +00:00
|
|
|
outp.armor(other->_key,true);
|
2017-10-25 19:42:14 +00:00
|
|
|
other->_paths[theirs].p->send(RR,tPtr,outp.data(),outp.size(),now);
|
|
|
|
}
|
|
|
|
++alt;
|
|
|
|
}
|
|
|
|
}
|
2016-09-02 18:51:33 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 20:54:27 +00:00
|
|
|
inline void Peer::processBackgroundPeerTasks(const int64_t now)
|
2018-06-22 23:30:20 +00:00
|
|
|
{
|
|
|
|
// Determine current multipath compatibility with other peer
|
|
|
|
if ((now - _lastMultipathCompatibilityCheck) >= ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
|
2019-05-14 20:54:27 +00:00
|
|
|
//
|
2018-06-22 23:30:20 +00:00
|
|
|
// Cache number of available paths so that we can short-circuit multipath logic elsewhere
|
|
|
|
//
|
|
|
|
// We also take notice of duplicate paths (same IP only) because we may have
|
|
|
|
// recently received a direct path push from a peer and our list might contain
|
|
|
|
// a dead path which hasn't been fully recognized as such. In this case we
|
|
|
|
// don't want the duplicate to trigger execution of multipath code prematurely.
|
|
|
|
//
|
|
|
|
// This is done to support the behavior of auto multipath enable/disable
|
|
|
|
// without user intervention.
|
2019-05-14 20:54:27 +00:00
|
|
|
//
|
2018-06-22 23:30:20 +00:00
|
|
|
int currAlivePathCount = 0;
|
|
|
|
int duplicatePathsFound = 0;
|
|
|
|
for (unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
|
|
|
currAlivePathCount++;
|
|
|
|
for (unsigned int j=0;j<ZT_MAX_PEER_NETWORK_PATHS;++j) {
|
|
|
|
if (_paths[i].p && _paths[j].p && _paths[i].p->address().ipsEqual2(_paths[j].p->address()) && i != j) {
|
|
|
|
duplicatePathsFound+=1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_uniqueAlivePathCount = (currAlivePathCount - (duplicatePathsFound / 2));
|
|
|
|
_lastMultipathCompatibilityCheck = now;
|
|
|
|
_localMultipathSupported = ((RR->node->getMultipathMode() != ZT_MULTIPATH_NONE) && (ZT_PROTO_VERSION > 9));
|
|
|
|
_remoteMultipathSupported = _vProto > 9;
|
|
|
|
// If both peers support multipath and more than one path exist, we can use multipath logic
|
|
|
|
_canUseMultipath = _localMultipathSupported && _remoteMultipathSupported && (_uniqueAlivePathCount > 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:45:29 +00:00
|
|
|
void Peer::sendACK(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
|
|
|
|
{
|
|
|
|
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ACK);
|
|
|
|
uint32_t bytesToAck = path->bytesToAck();
|
|
|
|
outp.append<uint32_t>(bytesToAck);
|
|
|
|
if (atAddress) {
|
|
|
|
outp.armor(_key,false);
|
|
|
|
RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
|
|
|
|
} else {
|
|
|
|
RR->sw->send(tPtr,outp,false);
|
|
|
|
}
|
|
|
|
path->sentAck(now);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Peer::sendQOS_MEASUREMENT(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
|
|
|
|
{
|
2018-06-02 01:03:59 +00:00
|
|
|
const int64_t _now = RR->node->now();
|
2018-05-31 00:45:29 +00:00
|
|
|
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_QOS_MEASUREMENT);
|
|
|
|
char qosData[ZT_PATH_MAX_QOS_PACKET_SZ];
|
2018-06-02 01:03:59 +00:00
|
|
|
int16_t len = path->generateQoSPacket(_now,qosData);
|
|
|
|
outp.append(qosData,len);
|
2018-05-31 00:45:29 +00:00
|
|
|
if (atAddress) {
|
|
|
|
outp.armor(_key,false);
|
|
|
|
RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
|
|
|
|
} else {
|
|
|
|
RR->sw->send(tPtr,outp,false);
|
|
|
|
}
|
|
|
|
path->sentQoS(now);
|
|
|
|
}
|
|
|
|
|
2018-01-08 21:06:24 +00:00
|
|
|
void Peer::sendHELLO(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
|
2015-04-07 19:22:33 +00:00
|
|
|
{
|
|
|
|
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
|
2017-01-27 21:50:56 +00:00
|
|
|
|
2015-04-07 19:22:33 +00:00
|
|
|
outp.append((unsigned char)ZT_PROTO_VERSION);
|
|
|
|
outp.append((unsigned char)ZEROTIER_ONE_VERSION_MAJOR);
|
|
|
|
outp.append((unsigned char)ZEROTIER_ONE_VERSION_MINOR);
|
|
|
|
outp.append((uint16_t)ZEROTIER_ONE_VERSION_REVISION);
|
|
|
|
outp.append(now);
|
|
|
|
RR->identity.serialize(outp,false);
|
2015-10-07 17:30:47 +00:00
|
|
|
atAddress.serialize(outp);
|
2017-01-27 21:50:56 +00:00
|
|
|
|
2016-09-09 15:43:58 +00:00
|
|
|
RR->node->expectReplyTo(outp.packetId());
|
2017-01-27 23:27:26 +00:00
|
|
|
|
|
|
|
if (atAddress) {
|
2018-01-08 21:06:24 +00:00
|
|
|
outp.armor(_key,false); // false == don't encrypt full payload, but add MAC
|
2017-07-06 18:45:22 +00:00
|
|
|
RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
|
2017-01-27 23:27:26 +00:00
|
|
|
} else {
|
2017-03-28 00:03:17 +00:00
|
|
|
RR->sw->send(tPtr,outp,false); // false == don't encrypt full payload, but add MAC
|
2017-01-27 23:27:26 +00:00
|
|
|
}
|
2015-04-07 19:22:33 +00:00
|
|
|
}
|
|
|
|
|
2018-01-08 21:06:24 +00:00
|
|
|
void Peer::attemptToContactAt(void *tPtr,const int64_t localSocket,const InetAddress &atAddress,int64_t now,bool sendFullHello)
|
2016-09-07 19:01:03 +00:00
|
|
|
{
|
2017-02-06 00:19:03 +00:00
|
|
|
if ( (!sendFullHello) && (_vProto >= 5) && (!((_vMajor == 1)&&(_vMinor == 1)&&(_vRevision == 0))) ) {
|
2016-09-07 19:01:03 +00:00
|
|
|
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_ECHO);
|
2016-09-09 15:43:58 +00:00
|
|
|
RR->node->expectReplyTo(outp.packetId());
|
2018-01-08 21:06:24 +00:00
|
|
|
outp.armor(_key,true);
|
2017-07-06 18:45:22 +00:00
|
|
|
RR->node->putPacket(tPtr,localSocket,atAddress,outp.data(),outp.size());
|
2016-09-07 19:01:03 +00:00
|
|
|
} else {
|
2018-01-08 21:06:24 +00:00
|
|
|
sendHELLO(tPtr,localSocket,atAddress,now);
|
2016-09-07 19:01:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-02 22:52:57 +00:00
|
|
|
void Peer::tryMemorizedPath(void *tPtr,int64_t now)
|
2016-11-22 22:23:13 +00:00
|
|
|
{
|
|
|
|
if ((now - _lastTriedMemorizedPath) >= ZT_TRY_MEMORIZED_PATH_INTERVAL) {
|
|
|
|
_lastTriedMemorizedPath = now;
|
|
|
|
InetAddress mp;
|
2017-03-28 00:03:17 +00:00
|
|
|
if (RR->node->externalPathLookup(tPtr,_id.address(),-1,mp))
|
2018-01-08 21:06:24 +00:00
|
|
|
attemptToContactAt(tPtr,-1,mp,now,true);
|
2016-11-22 22:23:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-25 19:42:14 +00:00
|
|
|
unsigned int Peer::doPingAndKeepalive(void *tPtr,int64_t now)
|
2015-04-08 02:31:11 +00:00
|
|
|
{
|
2017-10-25 19:42:14 +00:00
|
|
|
unsigned int sent = 0;
|
2016-09-02 18:51:33 +00:00
|
|
|
Mutex::Lock _l(_paths_m);
|
2016-09-02 20:33:56 +00:00
|
|
|
|
2017-10-25 19:42:14 +00:00
|
|
|
const bool sendFullHello = ((now - _lastSentFullHello) >= ZT_PEER_PING_PERIOD);
|
|
|
|
_lastSentFullHello = now;
|
|
|
|
|
2018-06-22 23:30:20 +00:00
|
|
|
processBackgroundPeerTasks(now);
|
|
|
|
|
2018-06-02 01:03:59 +00:00
|
|
|
// Emit traces regarding aggregate link status
|
2018-06-22 23:30:20 +00:00
|
|
|
if (_canUseMultipath) {
|
2018-05-31 00:45:29 +00:00
|
|
|
int alivePathCount = aggregateLinkPhysicalPathCount();
|
|
|
|
if ((now - _lastAggregateStatsReport) > ZT_PATH_AGGREGATE_STATS_REPORT_INTERVAL) {
|
|
|
|
_lastAggregateStatsReport = now;
|
|
|
|
if (alivePathCount) {
|
|
|
|
RR->t->peerLinkAggregateStatistics(NULL,*this);
|
|
|
|
}
|
2018-06-02 01:03:59 +00:00
|
|
|
} if (alivePathCount < 2 && _linkIsRedundant) {
|
2018-05-31 00:45:29 +00:00
|
|
|
_linkIsRedundant = !_linkIsRedundant;
|
|
|
|
RR->t->peerLinkNoLongerRedundant(NULL,*this);
|
|
|
|
} if (alivePathCount > 1 && !_linkIsRedundant) {
|
|
|
|
_linkIsRedundant = !_linkIsRedundant;
|
|
|
|
RR->t->peerLinkNowRedundant(NULL,*this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-25 22:44:10 +00:00
|
|
|
// Right now we only keep pinging links that have the maximum priority. The
|
|
|
|
// priority is used to track cluster redirections, meaning that when a cluster
|
|
|
|
// redirects us its redirect target links override all other links and we
|
|
|
|
// let those old links expire.
|
|
|
|
long maxPriority = 0;
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p)
|
|
|
|
maxPriority = std::max(_paths[i].priority,maxPriority);
|
|
|
|
else break;
|
|
|
|
}
|
|
|
|
|
2017-10-25 19:42:14 +00:00
|
|
|
unsigned int j = 0;
|
2017-10-25 20:27:28 +00:00
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
2017-10-25 22:44:10 +00:00
|
|
|
if (_paths[i].p) {
|
|
|
|
// Clean expired and reduced priority paths
|
|
|
|
if ( ((now - _paths[i].lr) < ZT_PEER_PATH_EXPIRATION) && (_paths[i].priority == maxPriority) ) {
|
|
|
|
if ((sendFullHello)||(_paths[i].p->needsHeartbeat(now))) {
|
2018-01-08 21:06:24 +00:00
|
|
|
attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,sendFullHello);
|
2017-10-25 22:44:10 +00:00
|
|
|
_paths[i].p->sent(now);
|
|
|
|
sent |= (_paths[i].p->address().ss_family == AF_INET) ? 0x1 : 0x2;
|
|
|
|
}
|
|
|
|
if (i != j)
|
|
|
|
_paths[j] = _paths[i];
|
|
|
|
++j;
|
2014-04-09 23:00:25 +00:00
|
|
|
}
|
2017-10-25 22:44:10 +00:00
|
|
|
} else break;
|
2014-04-09 23:00:25 +00:00
|
|
|
}
|
2018-06-02 01:03:59 +00:00
|
|
|
if (canUseMultipath()) {
|
2018-05-01 23:32:15 +00:00
|
|
|
while(j < ZT_MAX_PEER_NETWORK_PATHS) {
|
|
|
|
_paths[j].lr = 0;
|
|
|
|
_paths[j].p.zero();
|
|
|
|
_paths[j].priority = 1;
|
|
|
|
++j;
|
|
|
|
}
|
2017-10-25 19:42:14 +00:00
|
|
|
}
|
|
|
|
return sent;
|
2014-04-09 23:00:25 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 22:44:10 +00:00
|
|
|
void Peer::clusterRedirect(void *tPtr,const SharedPtr<Path> &originatingPath,const InetAddress &remoteAddress,const int64_t now)
|
2017-07-06 19:33:00 +00:00
|
|
|
{
|
2017-10-25 22:44:10 +00:00
|
|
|
SharedPtr<Path> np(RR->topology->getPath(originatingPath->localSocket(),remoteAddress));
|
2017-10-25 19:42:14 +00:00
|
|
|
RR->t->peerRedirected(tPtr,0,*this,np);
|
2017-10-25 22:44:10 +00:00
|
|
|
|
2018-01-08 21:06:24 +00:00
|
|
|
attemptToContactAt(tPtr,originatingPath->localSocket(),remoteAddress,now,true);
|
2017-10-25 22:44:10 +00:00
|
|
|
|
2017-07-07 23:58:05 +00:00
|
|
|
{
|
|
|
|
Mutex::Lock _l(_paths_m);
|
2017-10-25 22:44:10 +00:00
|
|
|
|
|
|
|
// New priority is higher than the priority of the originating path (if known)
|
|
|
|
long newPriority = 1;
|
2017-10-25 20:27:28 +00:00
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
2017-10-25 19:42:14 +00:00
|
|
|
if (_paths[i].p) {
|
2017-10-25 22:44:10 +00:00
|
|
|
if (_paths[i].p == originatingPath) {
|
|
|
|
newPriority = _paths[i].priority;
|
|
|
|
break;
|
2017-10-25 19:42:14 +00:00
|
|
|
}
|
2017-10-25 22:44:10 +00:00
|
|
|
} else break;
|
|
|
|
}
|
|
|
|
newPriority += 2;
|
|
|
|
|
|
|
|
// Erase any paths with lower priority than this one or that are duplicate
|
|
|
|
// IPs and add this path.
|
|
|
|
unsigned int j = 0;
|
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
|
|
|
if (_paths[i].p) {
|
2017-10-25 23:01:36 +00:00
|
|
|
if ((_paths[i].priority >= newPriority)&&(!_paths[i].p->address().ipsEqual2(remoteAddress))) {
|
2017-10-25 22:44:10 +00:00
|
|
|
if (i != j)
|
|
|
|
_paths[j] = _paths[i];
|
|
|
|
++j;
|
2017-10-25 19:42:14 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-07 23:58:05 +00:00
|
|
|
}
|
2017-10-25 22:44:10 +00:00
|
|
|
if (j < ZT_MAX_PEER_NETWORK_PATHS) {
|
|
|
|
_paths[j].lr = now;
|
|
|
|
_paths[j].p = np;
|
|
|
|
_paths[j].priority = newPriority;
|
|
|
|
++j;
|
|
|
|
while (j < ZT_MAX_PEER_NETWORK_PATHS) {
|
|
|
|
_paths[j].lr = 0;
|
|
|
|
_paths[j].p.zero();
|
|
|
|
_paths[j].priority = 1;
|
|
|
|
++j;
|
|
|
|
}
|
|
|
|
}
|
2017-07-06 19:33:00 +00:00
|
|
|
}
|
2017-10-25 19:42:14 +00:00
|
|
|
}
|
2017-07-07 23:58:05 +00:00
|
|
|
|
2017-10-25 19:42:14 +00:00
|
|
|
void Peer::resetWithinScope(void *tPtr,InetAddress::IpScope scope,int inetAddressFamily,int64_t now)
|
|
|
|
{
|
|
|
|
Mutex::Lock _l(_paths_m);
|
2017-10-25 20:27:28 +00:00
|
|
|
for(unsigned int i=0;i<ZT_MAX_PEER_NETWORK_PATHS;++i) {
|
2017-10-25 19:42:14 +00:00
|
|
|
if (_paths[i].p) {
|
|
|
|
if ((_paths[i].p->address().ss_family == inetAddressFamily)&&(_paths[i].p->ipScope() == scope)) {
|
2018-01-08 21:06:24 +00:00
|
|
|
attemptToContactAt(tPtr,_paths[i].p->localSocket(),_paths[i].p->address(),now,false);
|
2017-10-25 19:42:14 +00:00
|
|
|
_paths[i].p->sent(now);
|
|
|
|
_paths[i].lr = 0; // path will not be used unless it speaks again
|
|
|
|
}
|
|
|
|
} else break;
|
|
|
|
}
|
2017-07-06 19:33:00 +00:00
|
|
|
}
|
|
|
|
|
2013-07-04 20:56:19 +00:00
|
|
|
} // namespace ZeroTier
|