2015-07-06 21:08:13 +00:00
|
|
|
/*
|
|
|
|
* ZeroTier One - Network Virtualization Everywhere
|
2018-01-08 22:33:28 +00:00
|
|
|
* Copyright (C) 2011-2018 ZeroTier, Inc. https://www.zerotier.com/
|
2015-07-06 21:08:13 +00:00
|
|
|
*
|
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation, either version 3 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2017-04-28 03:47:25 +00:00
|
|
|
*
|
|
|
|
* --
|
|
|
|
*
|
|
|
|
* You can be released from the requirements of the license by purchasing
|
|
|
|
* a commercial license. Buying such a license is mandatory as soon as you
|
|
|
|
* develop commercial closed-source software that incorporates or links
|
|
|
|
* directly against ZeroTier software without disclosing the source code
|
|
|
|
* of your own application.
|
2015-07-06 21:08:13 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef ZT_PATH_HPP
|
|
|
|
#define ZT_PATH_HPP
|
|
|
|
|
2015-10-27 22:00:16 +00:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <string.h>
|
2017-03-01 22:36:52 +00:00
|
|
|
#include <stdlib.h>
|
2015-10-27 22:00:16 +00:00
|
|
|
|
|
|
|
#include <stdexcept>
|
|
|
|
#include <algorithm>
|
|
|
|
|
2015-07-06 21:08:13 +00:00
|
|
|
#include "Constants.hpp"
|
|
|
|
#include "InetAddress.hpp"
|
2016-09-01 22:43:07 +00:00
|
|
|
#include "SharedPtr.hpp"
|
|
|
|
#include "AtomicCounter.hpp"
|
2017-03-01 22:36:52 +00:00
|
|
|
#include "Utils.hpp"
|
2018-05-01 23:32:15 +00:00
|
|
|
#include "RingBuffer.hpp"
|
2018-06-02 01:03:59 +00:00
|
|
|
#include "Packet.hpp"
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
#include "../osdep/Phy.hpp"
|
2016-04-19 16:22:51 +00:00
|
|
|
|
2015-12-22 00:15:39 +00:00
|
|
|
/**
|
|
|
|
* Maximum return value of preferenceRank()
|
|
|
|
*/
|
|
|
|
#define ZT_PATH_MAX_PREFERENCE_RANK ((ZT_INETADDRESS_MAX_SCOPE << 1) | 1)
|
|
|
|
|
2015-07-06 21:08:13 +00:00
|
|
|
namespace ZeroTier {
|
|
|
|
|
2015-10-27 22:00:16 +00:00
|
|
|
class RuntimeEnvironment;
|
|
|
|
|
2015-07-13 17:03:04 +00:00
|
|
|
/**
|
2016-09-01 22:43:07 +00:00
|
|
|
* A path across the physical network
|
2015-07-13 17:03:04 +00:00
|
|
|
*/
|
2018-01-27 02:34:56 +00:00
|
|
|
class Path
|
2015-07-06 21:08:13 +00:00
|
|
|
{
|
2016-09-01 22:43:07 +00:00
|
|
|
friend class SharedPtr<Path>;
|
2018-05-01 23:32:15 +00:00
|
|
|
Phy<Path *> *_phy;
|
2016-09-01 22:43:07 +00:00
|
|
|
|
2015-07-06 21:08:13 +00:00
|
|
|
public:
|
2016-09-01 22:43:07 +00:00
|
|
|
/**
|
|
|
|
* Efficient unique key for paths in a Hashtable
|
|
|
|
*/
|
|
|
|
class HashKey
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
HashKey() {}
|
|
|
|
|
2017-07-06 18:45:22 +00:00
|
|
|
HashKey(const int64_t l,const InetAddress &r)
|
2016-09-01 22:43:07 +00:00
|
|
|
{
|
|
|
|
if (r.ss_family == AF_INET) {
|
|
|
|
_k[0] = (uint64_t)reinterpret_cast<const struct sockaddr_in *>(&r)->sin_addr.s_addr;
|
|
|
|
_k[1] = (uint64_t)reinterpret_cast<const struct sockaddr_in *>(&r)->sin_port;
|
2017-07-06 18:45:22 +00:00
|
|
|
_k[2] = (uint64_t)l;
|
2016-09-01 22:43:07 +00:00
|
|
|
} else if (r.ss_family == AF_INET6) {
|
2017-12-15 19:03:20 +00:00
|
|
|
ZT_FAST_MEMCPY(_k,reinterpret_cast<const struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,16);
|
2017-07-06 18:45:22 +00:00
|
|
|
_k[2] = ((uint64_t)reinterpret_cast<const struct sockaddr_in6 *>(&r)->sin6_port << 32) ^ (uint64_t)l;
|
2016-09-01 22:43:07 +00:00
|
|
|
} else {
|
2017-12-15 19:03:20 +00:00
|
|
|
ZT_FAST_MEMCPY(_k,&r,std::min(sizeof(_k),sizeof(InetAddress)));
|
2017-07-06 18:45:22 +00:00
|
|
|
_k[2] += (uint64_t)l;
|
2016-09-01 22:43:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-06 18:45:22 +00:00
|
|
|
inline unsigned long hashCode() const { return (unsigned long)(_k[0] + _k[1] + _k[2]); }
|
2016-09-01 22:43:07 +00:00
|
|
|
|
2017-07-06 18:45:22 +00:00
|
|
|
inline bool operator==(const HashKey &k) const { return ( (_k[0] == k._k[0]) && (_k[1] == k._k[1]) && (_k[2] == k._k[2]) ); }
|
2016-09-01 22:43:07 +00:00
|
|
|
inline bool operator!=(const HashKey &k) const { return (!(*this == k)); }
|
|
|
|
|
|
|
|
private:
|
2017-07-06 18:45:22 +00:00
|
|
|
uint64_t _k[3];
|
2016-09-01 22:43:07 +00:00
|
|
|
};
|
|
|
|
|
2015-07-06 21:08:13 +00:00
|
|
|
Path() :
|
2016-09-01 22:43:07 +00:00
|
|
|
_lastOut(0),
|
|
|
|
_lastIn(0),
|
2016-09-13 21:27:18 +00:00
|
|
|
_lastTrustEstablishedPacketReceived(0),
|
2018-05-01 23:32:15 +00:00
|
|
|
_lastPathQualityComputeTime(0),
|
2017-07-06 18:45:22 +00:00
|
|
|
_localSocket(-1),
|
2017-10-25 19:42:14 +00:00
|
|
|
_latency(0xffff),
|
2015-07-06 21:08:13 +00:00
|
|
|
_addr(),
|
2018-05-01 23:32:15 +00:00
|
|
|
_ipScope(InetAddress::IP_SCOPE_NONE),
|
2018-05-31 00:45:29 +00:00
|
|
|
_lastAck(0),
|
|
|
|
_lastThroughputEstimation(0),
|
|
|
|
_lastQoSMeasurement(0),
|
2018-06-02 01:03:59 +00:00
|
|
|
_lastQoSRecordPurge(0),
|
2018-05-31 00:45:29 +00:00
|
|
|
_unackedBytes(0),
|
|
|
|
_expectingAckAsOf(0),
|
|
|
|
_packetsReceivedSinceLastAck(0),
|
2018-06-02 01:03:59 +00:00
|
|
|
_packetsReceivedSinceLastQoS(0),
|
2018-05-01 23:32:15 +00:00
|
|
|
_meanThroughput(0.0),
|
2018-05-31 00:45:29 +00:00
|
|
|
_maxLifetimeThroughput(0),
|
|
|
|
_bytesAckedSinceLastThroughputEstimation(0),
|
|
|
|
_meanLatency(0.0),
|
|
|
|
_packetDelayVariance(0.0),
|
|
|
|
_packetErrorRatio(0.0),
|
|
|
|
_packetLossRatio(0),
|
|
|
|
_lastComputedStability(0.0),
|
|
|
|
_lastComputedRelativeQuality(0)
|
2015-07-06 21:08:13 +00:00
|
|
|
{
|
2018-05-31 00:45:29 +00:00
|
|
|
prepareBuffers();
|
2015-07-06 21:08:13 +00:00
|
|
|
}
|
|
|
|
|
2017-07-06 18:45:22 +00:00
|
|
|
Path(const int64_t localSocket,const InetAddress &addr) :
|
2016-09-01 22:43:07 +00:00
|
|
|
_lastOut(0),
|
|
|
|
_lastIn(0),
|
2016-09-13 21:27:18 +00:00
|
|
|
_lastTrustEstablishedPacketReceived(0),
|
2018-05-01 23:32:15 +00:00
|
|
|
_lastPathQualityComputeTime(0),
|
2017-07-06 18:45:22 +00:00
|
|
|
_localSocket(localSocket),
|
2017-10-25 19:42:14 +00:00
|
|
|
_latency(0xffff),
|
2015-07-06 21:08:13 +00:00
|
|
|
_addr(addr),
|
2018-05-01 23:32:15 +00:00
|
|
|
_ipScope(addr.ipScope()),
|
2018-05-31 00:45:29 +00:00
|
|
|
_lastAck(0),
|
|
|
|
_lastThroughputEstimation(0),
|
|
|
|
_lastQoSMeasurement(0),
|
2018-06-02 01:03:59 +00:00
|
|
|
_lastQoSRecordPurge(0),
|
2018-05-31 00:45:29 +00:00
|
|
|
_unackedBytes(0),
|
|
|
|
_expectingAckAsOf(0),
|
|
|
|
_packetsReceivedSinceLastAck(0),
|
2018-06-02 01:03:59 +00:00
|
|
|
_packetsReceivedSinceLastQoS(0),
|
2018-05-01 23:32:15 +00:00
|
|
|
_meanThroughput(0.0),
|
2018-05-31 00:45:29 +00:00
|
|
|
_maxLifetimeThroughput(0),
|
|
|
|
_bytesAckedSinceLastThroughputEstimation(0),
|
|
|
|
_meanLatency(0.0),
|
|
|
|
_packetDelayVariance(0.0),
|
|
|
|
_packetErrorRatio(0.0),
|
|
|
|
_packetLossRatio(0),
|
|
|
|
_lastComputedStability(0.0),
|
|
|
|
_lastComputedRelativeQuality(0)
|
2018-05-01 23:32:15 +00:00
|
|
|
{
|
2018-05-31 00:45:29 +00:00
|
|
|
prepareBuffers();
|
2018-06-02 01:03:59 +00:00
|
|
|
_phy->getIfName((PhySocket *)((uintptr_t)_localSocket), _ifname, 16);
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
~Path()
|
2015-10-27 22:00:16 +00:00
|
|
|
{
|
2018-05-01 23:32:15 +00:00
|
|
|
delete _throughputSamples;
|
|
|
|
delete _latencySamples;
|
2018-05-31 00:45:29 +00:00
|
|
|
delete _qualitySamples;
|
|
|
|
delete _packetValiditySamples;
|
2018-05-01 23:32:15 +00:00
|
|
|
_throughputSamples = NULL;
|
|
|
|
_latencySamples = NULL;
|
2018-05-31 00:45:29 +00:00
|
|
|
_qualitySamples = NULL;
|
|
|
|
_packetValiditySamples = NULL;
|
2015-10-27 22:00:16 +00:00
|
|
|
}
|
|
|
|
|
2016-01-06 18:00:03 +00:00
|
|
|
/**
|
2016-09-01 22:43:07 +00:00
|
|
|
* Called when a packet is received from this remote path, regardless of content
|
2015-10-27 22:00:16 +00:00
|
|
|
*
|
|
|
|
* @param t Time of receive
|
|
|
|
*/
|
2016-09-01 22:43:07 +00:00
|
|
|
inline void received(const uint64_t t) { _lastIn = t; }
|
2015-10-27 22:00:16 +00:00
|
|
|
|
2016-09-13 21:27:18 +00:00
|
|
|
/**
|
|
|
|
* Set time last trusted packet was received (done in Peer::received())
|
|
|
|
*/
|
|
|
|
inline void trustedPacketReceived(const uint64_t t) { _lastTrustEstablishedPacketReceived = t; }
|
|
|
|
|
2015-10-27 22:00:16 +00:00
|
|
|
/**
|
2016-09-01 22:43:07 +00:00
|
|
|
* Send a packet via this path (last out time is also updated)
|
2015-10-27 22:00:16 +00:00
|
|
|
*
|
|
|
|
* @param RR Runtime environment
|
2017-03-28 00:03:17 +00:00
|
|
|
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
|
2015-10-27 22:00:16 +00:00
|
|
|
* @param data Packet data
|
|
|
|
* @param len Packet length
|
|
|
|
* @param now Current time
|
|
|
|
* @return True if transport reported success
|
|
|
|
*/
|
2017-10-02 22:52:57 +00:00
|
|
|
bool send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now);
|
2015-10-27 22:00:16 +00:00
|
|
|
|
2016-09-07 19:01:03 +00:00
|
|
|
/**
|
|
|
|
* Manually update last sent time
|
|
|
|
*
|
|
|
|
* @param t Time of send
|
|
|
|
*/
|
2017-10-02 22:52:57 +00:00
|
|
|
inline void sent(const int64_t t) { _lastOut = t; }
|
2016-09-07 19:01:03 +00:00
|
|
|
|
2017-10-25 19:42:14 +00:00
|
|
|
/**
|
|
|
|
* Update path latency with a new measurement
|
|
|
|
*
|
|
|
|
* @param l Measured latency
|
|
|
|
*/
|
2018-05-01 23:32:15 +00:00
|
|
|
inline void updateLatency(const unsigned int l, int64_t now)
|
2017-10-25 19:42:14 +00:00
|
|
|
{
|
|
|
|
unsigned int pl = _latency;
|
2018-05-01 23:32:15 +00:00
|
|
|
if (pl < 0xffff) {
|
2017-10-25 19:42:14 +00:00
|
|
|
_latency = (pl + l) / 2;
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
_latency = l;
|
|
|
|
}
|
|
|
|
_latencySamples->push(l);
|
2017-10-25 19:42:14 +00:00
|
|
|
}
|
|
|
|
|
2015-10-27 22:00:16 +00:00
|
|
|
/**
|
2017-07-06 18:45:22 +00:00
|
|
|
* @return Local socket as specified by external code
|
2015-10-27 22:00:16 +00:00
|
|
|
*/
|
2018-02-01 20:46:35 +00:00
|
|
|
inline int64_t localSocket() const { return _localSocket; }
|
2015-10-27 22:00:16 +00:00
|
|
|
|
2015-07-06 21:08:13 +00:00
|
|
|
/**
|
|
|
|
* @return Physical address
|
|
|
|
*/
|
2016-09-02 18:51:33 +00:00
|
|
|
inline const InetAddress &address() const { return _addr; }
|
2015-07-06 21:08:13 +00:00
|
|
|
|
|
|
|
/**
|
2015-07-13 17:03:04 +00:00
|
|
|
* @return IP scope -- faster shortcut for address().ipScope()
|
|
|
|
*/
|
2016-09-02 18:51:33 +00:00
|
|
|
inline InetAddress::IpScope ipScope() const { return _ipScope; }
|
2016-04-19 16:22:51 +00:00
|
|
|
|
2016-09-13 21:27:18 +00:00
|
|
|
/**
|
|
|
|
* @return True if path has received a trust established packet (e.g. common network membership) in the past ZT_TRUST_EXPIRATION ms
|
|
|
|
*/
|
2017-10-02 22:52:57 +00:00
|
|
|
inline bool trustEstablished(const int64_t now) const { return ((now - _lastTrustEstablishedPacketReceived) < ZT_TRUST_EXPIRATION); }
|
2016-09-13 21:27:18 +00:00
|
|
|
|
2015-07-13 17:03:04 +00:00
|
|
|
/**
|
2016-09-05 22:47:22 +00:00
|
|
|
* @return Preference rank, higher == better
|
2015-07-06 21:08:13 +00:00
|
|
|
*/
|
2016-09-02 18:51:33 +00:00
|
|
|
inline unsigned int preferenceRank() const
|
2015-09-23 23:16:36 +00:00
|
|
|
{
|
2016-09-06 18:10:04 +00:00
|
|
|
// This causes us to rank paths in order of IP scope rank (see InetAdddress.hpp) but
|
|
|
|
// within each IP scope class to prefer IPv6 over IPv4.
|
2015-12-22 00:15:39 +00:00
|
|
|
return ( ((unsigned int)_ipScope << 1) | (unsigned int)(_addr.ss_family == AF_INET6) );
|
|
|
|
}
|
|
|
|
|
2015-07-07 15:54:48 +00:00
|
|
|
/**
|
|
|
|
* Check whether this address is valid for a ZeroTier path
|
|
|
|
*
|
|
|
|
* This checks the address type and scope against address types and scopes
|
|
|
|
* that we currently support for ZeroTier communication.
|
|
|
|
*
|
|
|
|
* @param a Address to check
|
|
|
|
* @return True if address is good for ZeroTier path use
|
|
|
|
*/
|
|
|
|
static inline bool isAddressValidForPath(const InetAddress &a)
|
|
|
|
{
|
|
|
|
if ((a.ss_family == AF_INET)||(a.ss_family == AF_INET6)) {
|
|
|
|
switch(a.ipScope()) {
|
|
|
|
/* Note: we don't do link-local at the moment. Unfortunately these
|
|
|
|
* cause several issues. The first is that they usually require a
|
|
|
|
* device qualifier, which we don't handle yet and can't portably
|
|
|
|
* push in PUSH_DIRECT_PATHS. The second is that some OSes assign
|
|
|
|
* these very ephemerally or otherwise strangely. So we'll use
|
|
|
|
* private, pseudo-private, shared (e.g. carrier grade NAT), or
|
|
|
|
* global IP addresses. */
|
|
|
|
case InetAddress::IP_SCOPE_PRIVATE:
|
|
|
|
case InetAddress::IP_SCOPE_PSEUDOPRIVATE:
|
|
|
|
case InetAddress::IP_SCOPE_SHARED:
|
|
|
|
case InetAddress::IP_SCOPE_GLOBAL:
|
2016-02-10 19:06:26 +00:00
|
|
|
if (a.ss_family == AF_INET6) {
|
|
|
|
// TEMPORARY HACK: for now, we are going to blacklist he.net IPv6
|
|
|
|
// tunnels due to very spotty performance and low MTU issues over
|
|
|
|
// these IPv6 tunnel links.
|
|
|
|
const uint8_t *ipd = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr);
|
|
|
|
if ((ipd[0] == 0x20)&&(ipd[1] == 0x01)&&(ipd[2] == 0x04)&&(ipd[3] == 0x70))
|
|
|
|
return false;
|
|
|
|
}
|
2015-07-07 15:54:48 +00:00
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-02 18:51:33 +00:00
|
|
|
/**
|
2017-10-25 19:42:14 +00:00
|
|
|
* @return Latency or 0xffff if unknown
|
2016-09-02 18:51:33 +00:00
|
|
|
*/
|
2017-10-25 19:42:14 +00:00
|
|
|
inline unsigned int latency() const { return _latency; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @return Path quality -- lower is better
|
|
|
|
*/
|
2017-10-25 22:44:10 +00:00
|
|
|
inline long quality(const int64_t now) const
|
2017-10-25 19:42:14 +00:00
|
|
|
{
|
2017-10-25 22:44:10 +00:00
|
|
|
const int l = (long)_latency;
|
|
|
|
const int age = (long)std::min((now - _lastIn),(int64_t)(ZT_PATH_HEARTBEAT_PERIOD * 10)); // set an upper sanity limit to avoid overflow
|
|
|
|
return (((age < (ZT_PATH_HEARTBEAT_PERIOD + 5000)) ? l : (l + 0xffff + age)) * (long)((ZT_INETADDRESS_MAX_SCOPE - _ipScope) + 1));
|
2017-10-25 19:42:14 +00:00
|
|
|
}
|
2016-09-02 18:51:33 +00:00
|
|
|
|
2018-05-01 23:32:15 +00:00
|
|
|
/**
|
2018-06-02 01:03:59 +00:00
|
|
|
* Record statistics on outgoing packets. Used later to estimate QoS metrics.
|
2018-05-31 00:45:29 +00:00
|
|
|
*
|
|
|
|
* @param now Current time
|
2018-06-02 01:03:59 +00:00
|
|
|
* @param packetId ID of packet
|
|
|
|
* @param payloadLength Length of payload
|
|
|
|
* @param verb Packet verb
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-06-02 01:03:59 +00:00
|
|
|
inline void recordOutgoingPacket(int64_t now, int64_t packetId, uint16_t payloadLength, Packet::Verb verb)
|
2018-05-01 23:32:15 +00:00
|
|
|
{
|
2018-06-02 01:03:59 +00:00
|
|
|
Mutex::Lock _l(_statistics_m);
|
|
|
|
if (verb == Packet::VERB_FRAME || verb == Packet::VERB_EXT_FRAME) {
|
|
|
|
if (packetId % 2 == 0) { // even -> use for ACK
|
|
|
|
_unackedBytes += payloadLength;
|
|
|
|
// Take note that we're expecting a VERB_ACK on this path as of a specific time
|
|
|
|
_expectingAckAsOf = ackAge(now) > ZT_PATH_ACK_INTERVAL ? _expectingAckAsOf : now;
|
|
|
|
}
|
|
|
|
else { // odd -> use for QoS
|
|
|
|
if (_outQoSRecords.size() < ZT_PATH_MAX_OUTSTANDING_QOS_RECORDS) {
|
|
|
|
_outQoSRecords[packetId] = now;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Record statistics on incoming packets. Used later to estimate QoS metrics.
|
|
|
|
*
|
|
|
|
* @param now Current time
|
|
|
|
* @param packetId ID of packet
|
|
|
|
* @param payloadLength Length of payload
|
|
|
|
* @param verb Packet verb
|
|
|
|
*/
|
|
|
|
inline void recordIncomingPacket(int64_t now, int64_t packetId, uint16_t payloadLength, Packet::Verb verb)
|
|
|
|
{
|
|
|
|
Mutex::Lock _l(_statistics_m);
|
|
|
|
if (verb == Packet::VERB_FRAME || verb == Packet::VERB_EXT_FRAME) {
|
|
|
|
if (packetId % 2 == 0) { // even -> use for ACK
|
|
|
|
_inACKRecords[packetId] = payloadLength;
|
|
|
|
_packetsReceivedSinceLastAck++;
|
|
|
|
}
|
|
|
|
else { // odd -> use for QoS
|
|
|
|
_inQoSRecords[packetId] = now;
|
|
|
|
_packetsReceivedSinceLastQoS++;
|
|
|
|
}
|
|
|
|
_packetValiditySamples->push(true);
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* Record that we've received a VERB_ACK on this path, also compute throughput if required.
|
|
|
|
*
|
|
|
|
* @param now Current time
|
2018-06-08 00:25:27 +00:00
|
|
|
* @param ackedBytes Number of bytes acknowledged by other peer
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline void receivedAck(int64_t now, int32_t ackedBytes)
|
|
|
|
{
|
|
|
|
_expectingAckAsOf = 0;
|
|
|
|
_unackedBytes = (ackedBytes > _unackedBytes) ? 0 : _unackedBytes - ackedBytes;
|
|
|
|
int64_t timeSinceThroughputEstimate = (now - _lastThroughputEstimation);
|
|
|
|
if (timeSinceThroughputEstimate >= ZT_PATH_THROUGHPUT_MEASUREMENT_INTERVAL) {
|
|
|
|
uint64_t throughput = (float)(_bytesAckedSinceLastThroughputEstimation) / ((float)timeSinceThroughputEstimate / (float)1000);
|
|
|
|
_throughputSamples->push(throughput);
|
|
|
|
_maxLifetimeThroughput = throughput > _maxLifetimeThroughput ? throughput : _maxLifetimeThroughput;
|
|
|
|
_lastThroughputEstimation = now;
|
|
|
|
_bytesAckedSinceLastThroughputEstimation = 0;
|
|
|
|
} else {
|
|
|
|
_bytesAckedSinceLastThroughputEstimation += ackedBytes;
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @return Number of bytes this peer is responsible for ACKing since last ACK
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline int32_t bytesToAck()
|
|
|
|
{
|
2018-06-02 01:03:59 +00:00
|
|
|
Mutex::Lock _l(_statistics_m);
|
2018-05-31 00:45:29 +00:00
|
|
|
int32_t bytesToAck = 0;
|
2018-06-02 01:03:59 +00:00
|
|
|
std::map<uint64_t,uint16_t>::iterator it = _inACKRecords.begin();
|
|
|
|
while (it != _inACKRecords.end()) {
|
|
|
|
bytesToAck += it->second;
|
|
|
|
it++;
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
return bytesToAck;
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-06-08 00:25:27 +00:00
|
|
|
* @return Number of bytes thus far sent that have not been acknowledged by the remote peer
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline int64_t unackedSentBytes()
|
|
|
|
{
|
|
|
|
return _unackedBytes;
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* Account for the fact that an ACK was just sent. Reset counters, timers, and clear statistics buffers
|
|
|
|
*
|
|
|
|
* @param Current time
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline void sentAck(int64_t now)
|
|
|
|
{
|
2018-06-02 01:03:59 +00:00
|
|
|
Mutex::Lock _l(_statistics_m);
|
|
|
|
_inACKRecords.clear();
|
2018-05-31 00:45:29 +00:00
|
|
|
_packetsReceivedSinceLastAck = 0;
|
|
|
|
_lastAck = now;
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* Receive QoS data, match with recorded egress times from this peer, compute latency
|
|
|
|
* estimates.
|
|
|
|
*
|
|
|
|
* @param now Current time
|
|
|
|
* @param count Number of records
|
|
|
|
* @param rx_id table of packet IDs
|
|
|
|
* @param rx_ts table of holding times
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-06-02 01:03:59 +00:00
|
|
|
inline void receivedQoS(int64_t now, int count, uint64_t *rx_id, uint16_t *rx_ts)
|
2018-05-31 00:45:29 +00:00
|
|
|
{
|
2018-06-02 01:03:59 +00:00
|
|
|
Mutex::Lock _l(_statistics_m);
|
2018-05-31 00:45:29 +00:00
|
|
|
// Look up egress times and compute latency values for each record
|
2018-06-02 01:03:59 +00:00
|
|
|
std::map<uint64_t,uint64_t>::iterator it;
|
2018-05-31 00:45:29 +00:00
|
|
|
for (int j=0; j<count; j++) {
|
2018-06-02 01:03:59 +00:00
|
|
|
it = _outQoSRecords.find(rx_id[j]);
|
|
|
|
if (it != _outQoSRecords.end()) {
|
2018-05-31 00:45:29 +00:00
|
|
|
uint16_t rtt = (uint16_t)(now - it->second);
|
|
|
|
uint16_t rtt_compensated = rtt - rx_ts[j];
|
|
|
|
float latency = rtt_compensated / 2.0;
|
|
|
|
updateLatency(latency, now);
|
2018-06-02 01:03:59 +00:00
|
|
|
_outQoSRecords.erase(it);
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* Generate the contents of a VERB_QOS_MEASUREMENT packet.
|
|
|
|
*
|
|
|
|
* @param now Current time
|
|
|
|
* @param qosBuffer destination buffer
|
|
|
|
* @return Size of payload
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline int32_t generateQoSPacket(int64_t now, char *qosBuffer)
|
|
|
|
{
|
2018-06-02 01:03:59 +00:00
|
|
|
Mutex::Lock _l(_statistics_m);
|
2018-05-31 00:45:29 +00:00
|
|
|
int32_t len = 0;
|
2018-06-02 01:03:59 +00:00
|
|
|
std::map<uint64_t,uint64_t>::iterator it = _inQoSRecords.begin();
|
|
|
|
int i=0;
|
|
|
|
while (i<_packetsReceivedSinceLastQoS && it != _inQoSRecords.end()) {
|
|
|
|
uint64_t id = it->first;
|
2018-05-31 00:45:29 +00:00
|
|
|
memcpy(qosBuffer, &id, sizeof(uint64_t));
|
|
|
|
qosBuffer+=sizeof(uint64_t);
|
2018-06-02 01:03:59 +00:00
|
|
|
uint16_t holdingTime = (now - it->second);
|
|
|
|
memcpy(qosBuffer, &holdingTime, sizeof(uint16_t));
|
|
|
|
qosBuffer+=sizeof(uint16_t);
|
|
|
|
len+=sizeof(uint64_t)+sizeof(uint16_t);
|
|
|
|
_inQoSRecords.erase(it++);
|
|
|
|
i++;
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* Account for the fact that a VERB_QOS_MEASUREMENT was just sent. Reset timers.
|
|
|
|
*
|
|
|
|
* @param Current time
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-06-02 01:03:59 +00:00
|
|
|
inline void sentQoS(int64_t now) {
|
|
|
|
_packetsReceivedSinceLastQoS = 0;
|
|
|
|
_lastQoSMeasurement = now;
|
2018-05-01 23:32:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @param now Current time
|
|
|
|
* @return Whether an ACK (VERB_ACK) packet needs to be emitted at this time
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline bool needsToSendAck(int64_t now) {
|
|
|
|
return ((now - _lastAck) >= ZT_PATH_ACK_INTERVAL ||
|
|
|
|
(_packetsReceivedSinceLastAck == ZT_PATH_QOS_TABLE_SIZE)) && _packetsReceivedSinceLastAck;
|
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @param now Current time
|
|
|
|
* @return Whether a QoS (VERB_QOS_MEASUREMENT) packet needs to be emitted at this time
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline bool needsToSendQoS(int64_t now) {
|
2018-06-02 01:03:59 +00:00
|
|
|
return ((_packetsReceivedSinceLastQoS >= ZT_PATH_QOS_TABLE_SIZE) ||
|
|
|
|
((now - _lastQoSMeasurement) > ZT_PATH_QOS_INTERVAL)) && _packetsReceivedSinceLastQoS;
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* How much time has elapsed since we've been expecting a VERB_ACK on this path. This value
|
|
|
|
* is used to determine a more relevant path "age". This lets us penalize paths which are no
|
|
|
|
* longer ACKing, but not those that simple aren't being used to carry traffic at the
|
|
|
|
* current time.
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline int64_t ackAge(int64_t now) { return _expectingAckAsOf ? now - _expectingAckAsOf : 0; }
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* The maximum observed throughput for this path
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline uint64_t maxLifetimeThroughput() { return _maxLifetimeThroughput; }
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @return The mean throughput (in bits/s) of this link
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline float meanThroughput() { return _meanThroughput; }
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* Assign a new relative quality value for this path in the aggregate link
|
|
|
|
*
|
|
|
|
* @param rq Quality of this path in comparison to other paths available to this peer
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline void updateRelativeQuality(float rq) { _lastComputedRelativeQuality = rq; }
|
2018-05-01 23:32:15 +00:00
|
|
|
|
2018-05-02 22:24:14 +00:00
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @return Quality of this path compared to others in the aggregate link
|
2018-05-02 22:24:14 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline float relativeQuality() { return _lastComputedRelativeQuality; }
|
2018-05-02 22:24:14 +00:00
|
|
|
|
2018-05-01 23:32:15 +00:00
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @return Stability estimates can become expensive to compute, we cache the most recent result.
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline float lastComputedStability() { return _lastComputedStability; }
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @return A pointer to a cached copy of the human-readable name of the interface this Path's localSocket is bound to
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline char *getName() { return _ifname; }
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
/**
|
2018-06-08 00:25:27 +00:00
|
|
|
* @return Packet delay variance
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline float packetDelayVariance() { return _packetDelayVariance; }
|
2018-05-01 23:32:15 +00:00
|
|
|
|
2017-10-25 22:44:10 +00:00
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @return Previously-computed mean latency
|
2017-10-25 22:44:10 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline float meanLatency() { return _meanLatency; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @return Packet loss rate (PLR)
|
|
|
|
*/
|
|
|
|
inline float packetLossRatio() { return _packetLossRatio; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @return Packet error ratio (PER)
|
|
|
|
*/
|
|
|
|
inline float packetErrorRatio() { return _packetErrorRatio; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Record an invalid incoming packet. This packet failed MAC/compression/cipher checks and will now
|
|
|
|
* contribute to a Packet Error Ratio (PER).
|
|
|
|
*/
|
|
|
|
inline void recordInvalidPacket() { _packetValiditySamples->push(false); }
|
2017-10-25 22:44:10 +00:00
|
|
|
|
2018-05-01 23:32:15 +00:00
|
|
|
/**
|
2018-05-31 00:45:29 +00:00
|
|
|
* @return A pointer to a cached copy of the address string for this Path (For debugging only)
|
2018-05-01 23:32:15 +00:00
|
|
|
*/
|
2018-05-31 00:45:29 +00:00
|
|
|
inline char *getAddressString() { return _addrString; }
|
|
|
|
|
|
|
|
/**
|
2018-06-02 01:03:59 +00:00
|
|
|
* Compute and cache stability and performance metrics. The resultant stability coefficient is a measure of how "well behaved"
|
2018-05-31 00:45:29 +00:00
|
|
|
* this path is. This figure is substantially different from (but required for the estimation of the path's overall "quality".
|
|
|
|
*
|
|
|
|
* @param now Current time
|
|
|
|
*/
|
|
|
|
inline void processBackgroundPathMeasurements(int64_t now, const int64_t peerId) {
|
2018-06-02 01:03:59 +00:00
|
|
|
// Compute path stability
|
2018-05-31 00:45:29 +00:00
|
|
|
if (now - _lastPathQualityComputeTime > ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
|
2018-06-12 22:24:12 +00:00
|
|
|
Mutex::Lock _l(_statistics_m);
|
2018-05-31 00:45:29 +00:00
|
|
|
_lastPathQualityComputeTime = now;
|
|
|
|
address().toString(_addrString);
|
|
|
|
_meanThroughput = _throughputSamples->mean();
|
|
|
|
_meanLatency = _latencySamples->mean();
|
|
|
|
_packetDelayVariance = _latencySamples->stddev(); // Similar to "jitter" (SEE: RFC 3393, RFC 4689)
|
|
|
|
// If no packet validity samples, assume PER==0
|
|
|
|
_packetErrorRatio = 1 - (_packetValiditySamples->count() ? _packetValiditySamples->mean() : 1);
|
|
|
|
// Compute path stability
|
|
|
|
// Normalize measurements with wildly different ranges into a reasonable range
|
|
|
|
float normalized_pdv = Utils::normalize(_packetDelayVariance, 0, ZT_PATH_MAX_PDV, 0, 10);
|
|
|
|
float normalized_la = Utils::normalize(_meanLatency, 0, ZT_PATH_MAX_MEAN_LATENCY, 0, 10);
|
|
|
|
float throughput_cv = _throughputSamples->mean() > 0 ? _throughputSamples->stddev() / _throughputSamples->mean() : 1;
|
|
|
|
// Form an exponential cutoff and apply contribution weights
|
|
|
|
float pdv_contrib = exp((-1)*normalized_pdv) * ZT_PATH_CONTRIB_PDV;
|
|
|
|
float latency_contrib = exp((-1)*normalized_la) * ZT_PATH_CONTRIB_LATENCY;
|
|
|
|
float throughput_disturbance_contrib = exp((-1)*throughput_cv) * ZT_PATH_CONTRIB_THROUGHPUT_DISTURBANCE;
|
|
|
|
// Obey user-defined ignored contributions
|
|
|
|
pdv_contrib = ZT_PATH_CONTRIB_PDV > 0.0 ? pdv_contrib : 1;
|
|
|
|
latency_contrib = ZT_PATH_CONTRIB_LATENCY > 0.0 ? latency_contrib : 1;
|
|
|
|
throughput_disturbance_contrib = ZT_PATH_CONTRIB_THROUGHPUT_DISTURBANCE > 0.0 ? throughput_disturbance_contrib : 1;
|
|
|
|
// Compute the quality product
|
|
|
|
_lastComputedStability = pdv_contrib + latency_contrib + throughput_disturbance_contrib;
|
|
|
|
_lastComputedStability *= 1 - _packetErrorRatio;
|
|
|
|
_qualitySamples->push(_lastComputedStability);
|
2018-06-12 22:24:12 +00:00
|
|
|
|
|
|
|
// Prevent QoS records from sticking around for too long
|
2018-06-02 01:03:59 +00:00
|
|
|
std::map<uint64_t,uint64_t>::iterator it = _outQoSRecords.begin();
|
|
|
|
while (it != _outQoSRecords.end()) {
|
|
|
|
// Time since egress of tracked packet
|
|
|
|
if ((now - it->second) >= ZT_PATH_QOS_TIMEOUT) {
|
|
|
|
_outQoSRecords.erase(it++);
|
|
|
|
} else { it++; }
|
|
|
|
}
|
|
|
|
}
|
2018-05-31 00:45:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @return True if this path is alive (receiving heartbeats)
|
|
|
|
*/
|
|
|
|
inline bool alive(const int64_t now) const { return ((now - _lastIn) < (ZT_PATH_HEARTBEAT_PERIOD + 5000)); }
|
2018-05-01 23:32:15 +00:00
|
|
|
|
2016-09-02 18:51:33 +00:00
|
|
|
/**
|
|
|
|
* @return True if this path needs a heartbeat
|
|
|
|
*/
|
2017-10-02 22:52:57 +00:00
|
|
|
inline bool needsHeartbeat(const int64_t now) const { return ((now - _lastOut) >= ZT_PATH_HEARTBEAT_PERIOD); }
|
2016-09-02 18:51:33 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @return Last time we sent something
|
|
|
|
*/
|
2017-10-02 22:52:57 +00:00
|
|
|
inline int64_t lastOut() const { return _lastOut; }
|
2016-09-02 18:51:33 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @return Last time we received anything
|
|
|
|
*/
|
2017-10-02 22:52:57 +00:00
|
|
|
inline int64_t lastIn() const { return _lastIn; }
|
2016-09-02 18:51:33 +00:00
|
|
|
|
2017-07-01 00:32:07 +00:00
|
|
|
/**
|
|
|
|
* @return Time last trust-established packet was received
|
|
|
|
*/
|
2017-10-02 22:52:57 +00:00
|
|
|
inline int64_t lastTrustEstablishedPacketReceived() const { return _lastTrustEstablishedPacketReceived; }
|
2017-07-01 00:32:07 +00:00
|
|
|
|
2018-05-31 00:45:29 +00:00
|
|
|
/**
|
|
|
|
* Initialize statistical buffers
|
|
|
|
*/
|
|
|
|
inline void prepareBuffers() {
|
|
|
|
_throughputSamples = new RingBuffer<uint64_t>(ZT_PATH_QUALITY_METRIC_WIN_SZ);
|
|
|
|
_latencySamples = new RingBuffer<uint32_t>(ZT_PATH_QUALITY_METRIC_WIN_SZ);
|
|
|
|
_qualitySamples = new RingBuffer<float>(ZT_PATH_QUALITY_METRIC_WIN_SZ);
|
|
|
|
_packetValiditySamples = new RingBuffer<bool>(ZT_PATH_QUALITY_METRIC_WIN_SZ);
|
|
|
|
memset(_ifname, 0, 16);
|
|
|
|
memset(_addrString, 0, sizeof(_addrString));
|
|
|
|
}
|
|
|
|
|
2015-10-27 22:00:16 +00:00
|
|
|
private:
|
2018-06-02 01:03:59 +00:00
|
|
|
Mutex _statistics_m;
|
|
|
|
|
2017-10-02 22:52:57 +00:00
|
|
|
volatile int64_t _lastOut;
|
|
|
|
volatile int64_t _lastIn;
|
|
|
|
volatile int64_t _lastTrustEstablishedPacketReceived;
|
2018-05-01 23:32:15 +00:00
|
|
|
volatile int64_t _lastPathQualityComputeTime;
|
2017-07-06 18:45:22 +00:00
|
|
|
int64_t _localSocket;
|
2017-10-25 19:42:14 +00:00
|
|
|
volatile unsigned int _latency;
|
2015-07-06 21:08:13 +00:00
|
|
|
InetAddress _addr;
|
2015-07-13 17:03:04 +00:00
|
|
|
InetAddress::IpScope _ipScope; // memoize this since it's a computed value checked often
|
2016-09-01 22:43:07 +00:00
|
|
|
AtomicCounter __refCount;
|
2018-05-01 23:32:15 +00:00
|
|
|
|
2018-06-02 01:03:59 +00:00
|
|
|
std::map<uint64_t, uint64_t> _outQoSRecords; // id:egress_time
|
|
|
|
std::map<uint64_t, uint64_t> _inQoSRecords; // id:now
|
|
|
|
std::map<uint64_t, uint16_t> _inACKRecords; // id:len
|
2018-05-31 00:45:29 +00:00
|
|
|
|
|
|
|
int64_t _lastAck;
|
|
|
|
int64_t _lastThroughputEstimation;
|
|
|
|
int64_t _lastQoSMeasurement;
|
2018-06-02 01:03:59 +00:00
|
|
|
int64_t _lastQoSRecordPurge;
|
2018-05-31 00:45:29 +00:00
|
|
|
|
|
|
|
int64_t _unackedBytes;
|
|
|
|
int64_t _expectingAckAsOf;
|
|
|
|
int16_t _packetsReceivedSinceLastAck;
|
2018-06-02 01:03:59 +00:00
|
|
|
int16_t _packetsReceivedSinceLastQoS;
|
2018-05-01 23:32:15 +00:00
|
|
|
|
|
|
|
float _meanThroughput;
|
2018-05-31 00:45:29 +00:00
|
|
|
uint64_t _maxLifetimeThroughput;
|
|
|
|
uint64_t _bytesAckedSinceLastThroughputEstimation;
|
2018-05-01 23:32:15 +00:00
|
|
|
|
2018-05-31 00:45:29 +00:00
|
|
|
volatile float _meanLatency;
|
|
|
|
float _packetDelayVariance;
|
2018-05-01 23:32:15 +00:00
|
|
|
|
2018-05-31 00:45:29 +00:00
|
|
|
float _packetErrorRatio;
|
2018-05-01 23:32:15 +00:00
|
|
|
float _packetLossRatio;
|
|
|
|
|
2018-05-31 00:45:29 +00:00
|
|
|
// cached estimates
|
|
|
|
float _lastComputedStability;
|
|
|
|
float _lastComputedRelativeQuality;
|
|
|
|
|
|
|
|
// cached human-readable strings for tracing purposes
|
|
|
|
char _ifname[16];
|
2018-05-01 23:32:15 +00:00
|
|
|
char _addrString[256];
|
2018-05-31 00:45:29 +00:00
|
|
|
|
|
|
|
RingBuffer<uint64_t> *_throughputSamples;
|
|
|
|
RingBuffer<uint32_t> *_latencySamples;
|
|
|
|
RingBuffer<float> *_qualitySamples;
|
|
|
|
RingBuffer<bool> *_packetValiditySamples;
|
2015-07-06 21:08:13 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace ZeroTier
|
|
|
|
|
|
|
|
#endif
|