ZeroTierOne/node/Path.hpp

653 lines
20 KiB
C++
Raw Normal View History

/*
* ZeroTier One - Network Virtualization Everywhere
2018-01-08 22:33:28 +00:00
* Copyright (C) 2011-2018 ZeroTier, Inc. https://www.zerotier.com/
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
2017-04-28 03:47:25 +00:00
*
* --
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial closed-source software that incorporates or links
* directly against ZeroTier software without disclosing the source code
* of your own application.
*/
#ifndef ZT_PATH_HPP
#define ZT_PATH_HPP
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <stdexcept>
#include <algorithm>
#include "Constants.hpp"
#include "InetAddress.hpp"
#include "SharedPtr.hpp"
#include "AtomicCounter.hpp"
#include "Utils.hpp"
2018-05-01 23:32:15 +00:00
#include "RingBuffer.hpp"
#include "../osdep/Phy.hpp"
/**
* Maximum return value of preferenceRank()
*/
#define ZT_PATH_MAX_PREFERENCE_RANK ((ZT_INETADDRESS_MAX_SCOPE << 1) | 1)
namespace ZeroTier {
class RuntimeEnvironment;
/**
* A path across the physical network
*/
2018-01-27 02:34:56 +00:00
class Path
{
friend class SharedPtr<Path>;
2018-05-01 23:32:15 +00:00
Phy<Path *> *_phy;
public:
/**
* Efficient unique key for paths in a Hashtable
*/
class HashKey
{
public:
HashKey() {}
2017-07-06 18:45:22 +00:00
HashKey(const int64_t l,const InetAddress &r)
{
if (r.ss_family == AF_INET) {
_k[0] = (uint64_t)reinterpret_cast<const struct sockaddr_in *>(&r)->sin_addr.s_addr;
_k[1] = (uint64_t)reinterpret_cast<const struct sockaddr_in *>(&r)->sin_port;
2017-07-06 18:45:22 +00:00
_k[2] = (uint64_t)l;
} else if (r.ss_family == AF_INET6) {
ZT_FAST_MEMCPY(_k,reinterpret_cast<const struct sockaddr_in6 *>(&r)->sin6_addr.s6_addr,16);
2017-07-06 18:45:22 +00:00
_k[2] = ((uint64_t)reinterpret_cast<const struct sockaddr_in6 *>(&r)->sin6_port << 32) ^ (uint64_t)l;
} else {
ZT_FAST_MEMCPY(_k,&r,std::min(sizeof(_k),sizeof(InetAddress)));
2017-07-06 18:45:22 +00:00
_k[2] += (uint64_t)l;
}
}
2017-07-06 18:45:22 +00:00
inline unsigned long hashCode() const { return (unsigned long)(_k[0] + _k[1] + _k[2]); }
2017-07-06 18:45:22 +00:00
inline bool operator==(const HashKey &k) const { return ( (_k[0] == k._k[0]) && (_k[1] == k._k[1]) && (_k[2] == k._k[2]) ); }
inline bool operator!=(const HashKey &k) const { return (!(*this == k)); }
private:
2017-07-06 18:45:22 +00:00
uint64_t _k[3];
};
Path() :
_lastOut(0),
_lastIn(0),
_lastTrustEstablishedPacketReceived(0),
2018-05-01 23:32:15 +00:00
_lastPathQualityComputeTime(0),
2017-07-06 18:45:22 +00:00
_localSocket(-1),
_latency(0xffff),
_addr(),
2018-05-01 23:32:15 +00:00
_ipScope(InetAddress::IP_SCOPE_NONE),
_lastAck(0),
_lastThroughputEstimation(0),
_lastQoSMeasurement(0),
_unackedBytes(0),
_expectingAckAsOf(0),
_packetsReceivedSinceLastAck(0),
2018-05-01 23:32:15 +00:00
_meanThroughput(0.0),
_maxLifetimeThroughput(0),
_bytesAckedSinceLastThroughputEstimation(0),
_meanLatency(0.0),
_packetDelayVariance(0.0),
_packetErrorRatio(0.0),
_packetLossRatio(0),
_lastComputedStability(0.0),
_lastComputedRelativeQuality(0)
{
prepareBuffers();
}
2017-07-06 18:45:22 +00:00
Path(const int64_t localSocket,const InetAddress &addr) :
_lastOut(0),
_lastIn(0),
_lastTrustEstablishedPacketReceived(0),
2018-05-01 23:32:15 +00:00
_lastPathQualityComputeTime(0),
2017-07-06 18:45:22 +00:00
_localSocket(localSocket),
_latency(0xffff),
_addr(addr),
2018-05-01 23:32:15 +00:00
_ipScope(addr.ipScope()),
_lastAck(0),
_lastThroughputEstimation(0),
_lastQoSMeasurement(0),
_unackedBytes(0),
_expectingAckAsOf(0),
_packetsReceivedSinceLastAck(0),
2018-05-01 23:32:15 +00:00
_meanThroughput(0.0),
_maxLifetimeThroughput(0),
_bytesAckedSinceLastThroughputEstimation(0),
_meanLatency(0.0),
_packetDelayVariance(0.0),
_packetErrorRatio(0.0),
_packetLossRatio(0),
_lastComputedStability(0.0),
_lastComputedRelativeQuality(0)
2018-05-01 23:32:15 +00:00
{
prepareBuffers();
2018-05-01 23:32:15 +00:00
}
~Path()
{
2018-05-01 23:32:15 +00:00
delete _throughputSamples;
delete _latencySamples;
delete _qualitySamples;
delete _packetValiditySamples;
2018-05-01 23:32:15 +00:00
_throughputSamples = NULL;
_latencySamples = NULL;
_qualitySamples = NULL;
_packetValiditySamples = NULL;
}
/**
* Called when a packet is received from this remote path, regardless of content
*
* @param t Time of receive
*/
inline void received(const uint64_t t) { _lastIn = t; }
/**
* Set time last trusted packet was received (done in Peer::received())
*/
inline void trustedPacketReceived(const uint64_t t) { _lastTrustEstablishedPacketReceived = t; }
/**
* Send a packet via this path (last out time is also updated)
*
* @param RR Runtime environment
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param data Packet data
* @param len Packet length
* @param now Current time
* @return True if transport reported success
*/
bool send(const RuntimeEnvironment *RR,void *tPtr,const void *data,unsigned int len,int64_t now);
/**
* Manually update last sent time
*
* @param t Time of send
*/
inline void sent(const int64_t t) { _lastOut = t; }
/**
* Update path latency with a new measurement
*
* @param l Measured latency
*/
2018-05-01 23:32:15 +00:00
inline void updateLatency(const unsigned int l, int64_t now)
{
unsigned int pl = _latency;
2018-05-01 23:32:15 +00:00
if (pl < 0xffff) {
_latency = (pl + l) / 2;
2018-05-01 23:32:15 +00:00
}
else {
_latency = l;
}
_latencySamples->push(l);
}
/**
2017-07-06 18:45:22 +00:00
* @return Local socket as specified by external code
*/
inline int64_t localSocket() const { return _localSocket; }
/**
* @return Physical address
*/
inline const InetAddress &address() const { return _addr; }
/**
* @return IP scope -- faster shortcut for address().ipScope()
*/
inline InetAddress::IpScope ipScope() const { return _ipScope; }
/**
* @return True if path has received a trust established packet (e.g. common network membership) in the past ZT_TRUST_EXPIRATION ms
*/
inline bool trustEstablished(const int64_t now) const { return ((now - _lastTrustEstablishedPacketReceived) < ZT_TRUST_EXPIRATION); }
/**
2016-09-05 22:47:22 +00:00
* @return Preference rank, higher == better
*/
inline unsigned int preferenceRank() const
2015-09-23 23:16:36 +00:00
{
2016-09-06 18:10:04 +00:00
// This causes us to rank paths in order of IP scope rank (see InetAdddress.hpp) but
// within each IP scope class to prefer IPv6 over IPv4.
return ( ((unsigned int)_ipScope << 1) | (unsigned int)(_addr.ss_family == AF_INET6) );
}
/**
* Check whether this address is valid for a ZeroTier path
*
* This checks the address type and scope against address types and scopes
* that we currently support for ZeroTier communication.
*
* @param a Address to check
* @return True if address is good for ZeroTier path use
*/
static inline bool isAddressValidForPath(const InetAddress &a)
{
if ((a.ss_family == AF_INET)||(a.ss_family == AF_INET6)) {
switch(a.ipScope()) {
/* Note: we don't do link-local at the moment. Unfortunately these
* cause several issues. The first is that they usually require a
* device qualifier, which we don't handle yet and can't portably
* push in PUSH_DIRECT_PATHS. The second is that some OSes assign
* these very ephemerally or otherwise strangely. So we'll use
* private, pseudo-private, shared (e.g. carrier grade NAT), or
* global IP addresses. */
case InetAddress::IP_SCOPE_PRIVATE:
case InetAddress::IP_SCOPE_PSEUDOPRIVATE:
case InetAddress::IP_SCOPE_SHARED:
case InetAddress::IP_SCOPE_GLOBAL:
if (a.ss_family == AF_INET6) {
// TEMPORARY HACK: for now, we are going to blacklist he.net IPv6
// tunnels due to very spotty performance and low MTU issues over
// these IPv6 tunnel links.
const uint8_t *ipd = reinterpret_cast<const uint8_t *>(reinterpret_cast<const struct sockaddr_in6 *>(&a)->sin6_addr.s6_addr);
if ((ipd[0] == 0x20)&&(ipd[1] == 0x01)&&(ipd[2] == 0x04)&&(ipd[3] == 0x70))
return false;
}
return true;
default:
return false;
}
}
return false;
}
/**
* @return Latency or 0xffff if unknown
*/
inline unsigned int latency() const { return _latency; }
/**
* @return Path quality -- lower is better
*/
2017-10-25 22:44:10 +00:00
inline long quality(const int64_t now) const
{
2017-10-25 22:44:10 +00:00
const int l = (long)_latency;
const int age = (long)std::min((now - _lastIn),(int64_t)(ZT_PATH_HEARTBEAT_PERIOD * 10)); // set an upper sanity limit to avoid overflow
return (((age < (ZT_PATH_HEARTBEAT_PERIOD + 5000)) ? l : (l + 0xffff + age)) * (long)((ZT_INETADDRESS_MAX_SCOPE - _ipScope) + 1));
}
2018-05-01 23:32:15 +00:00
/**
* Take note that we're expecting a VERB_ACK on this path as of a specific time
*
* @param now Current time
* @param packetId ID of the packet
* @param payloadLength Number of bytes we're is expecting a reply to
2018-05-01 23:32:15 +00:00
*/
inline void expectingAck(int64_t now, int64_t packetId, uint16_t payloadLength)
2018-05-01 23:32:15 +00:00
{
_expectingAckAsOf = ackAge(now) > ZT_PATH_ACK_INTERVAL ? _expectingAckAsOf : now;
_unackedBytes += payloadLength;
_outgoingPacketRecords[packetId] = now;
2018-05-01 23:32:15 +00:00
}
/**
* Record that we've received a VERB_ACK on this path, also compute throughput if required.
*
* @param now Current time
* @param ackedBytes Number of bytes awknowledged by other peer
2018-05-01 23:32:15 +00:00
*/
inline void receivedAck(int64_t now, int32_t ackedBytes)
{
_expectingAckAsOf = 0;
_unackedBytes = (ackedBytes > _unackedBytes) ? 0 : _unackedBytes - ackedBytes;
int64_t timeSinceThroughputEstimate = (now - _lastThroughputEstimation);
if (timeSinceThroughputEstimate >= ZT_PATH_THROUGHPUT_MEASUREMENT_INTERVAL) {
uint64_t throughput = (float)(_bytesAckedSinceLastThroughputEstimation) / ((float)timeSinceThroughputEstimate / (float)1000);
_throughputSamples->push(throughput);
_maxLifetimeThroughput = throughput > _maxLifetimeThroughput ? throughput : _maxLifetimeThroughput;
_lastThroughputEstimation = now;
_bytesAckedSinceLastThroughputEstimation = 0;
} else {
_bytesAckedSinceLastThroughputEstimation += ackedBytes;
}
2018-05-01 23:32:15 +00:00
}
/**
* @return Number of bytes this peer is responsible for ACKing since last ACK
2018-05-01 23:32:15 +00:00
*/
inline int32_t bytesToAck()
{
int32_t bytesToAck = 0;
for (int i=0; i<_packetsReceivedSinceLastAck; i++) {
bytesToAck += _recorded_len[i];
}
return bytesToAck;
}
2018-05-01 23:32:15 +00:00
/**
* @return Number of bytes thusfar sent that have not been awknowledged by the remote peer
2018-05-01 23:32:15 +00:00
*/
inline int64_t unackedSentBytes()
{
return _unackedBytes;
}
2018-05-01 23:32:15 +00:00
/**
* Account for the fact that an ACK was just sent. Reset counters, timers, and clear statistics buffers
*
* @param Current time
2018-05-01 23:32:15 +00:00
*/
inline void sentAck(int64_t now)
{
memset(_recorded_id, 0, sizeof(_recorded_id));
memset(_recorded_ts, 0, sizeof(_recorded_ts));
memset(_recorded_len, 0, sizeof(_recorded_len));
_packetsReceivedSinceLastAck = 0;
_lastAck = now;
}
2018-05-01 23:32:15 +00:00
/**
* Receive QoS data, match with recorded egress times from this peer, compute latency
* estimates.
*
* @param now Current time
* @param count Number of records
* @param rx_id table of packet IDs
* @param rx_ts table of holding times
2018-05-01 23:32:15 +00:00
*/
inline void receivedQoS(int64_t now, int count, uint64_t *rx_id, uint8_t *rx_ts)
{
// Look up egress times and compute latency values for each record
for (int j=0; j<count; j++) {
std::map<uint64_t,uint64_t>::iterator it = _outgoingPacketRecords.find(rx_id[j]);
if (it != _outgoingPacketRecords.end()) {
uint16_t rtt = (uint16_t)(now - it->second);
uint16_t rtt_compensated = rtt - rx_ts[j];
float latency = rtt_compensated / 2.0;
updateLatency(latency, now);
_outgoingPacketRecords.erase(it);
}
}
}
2018-05-01 23:32:15 +00:00
/**
* Generate the contents of a VERB_QOS_MEASUREMENT packet.
*
* @param now Current time
* @param qosBuffer destination buffer
* @return Size of payload
2018-05-01 23:32:15 +00:00
*/
inline int32_t generateQoSPacket(int64_t now, char *qosBuffer)
{
int32_t len = 0;
for (int i=0; i<_packetsReceivedSinceLastAck; i++) {
uint64_t id = _recorded_id[i];
memcpy(qosBuffer, &id, sizeof(uint64_t));
qosBuffer+=sizeof(uint64_t);
uint8_t holdingTime = (uint8_t)(now - _recorded_ts[i]);
memcpy(qosBuffer, &holdingTime, sizeof(uint8_t));
qosBuffer+=sizeof(uint8_t);
len+=sizeof(uint64_t)+sizeof(uint8_t);
}
return len;
}
2018-05-01 23:32:15 +00:00
/**
* Account for the fact that a VERB_QOS_MEASUREMENT was just sent. Reset timers.
*
* @param Current time
2018-05-01 23:32:15 +00:00
*/
inline void sentQoS(int64_t now) { _lastQoSMeasurement = now; }
2018-05-01 23:32:15 +00:00
/**
* Record statistics on incoming packets. Used later to estimate QoS.
*
* @param now Current time
* @param packetId
* @param payloadLength
2018-05-01 23:32:15 +00:00
*/
inline void recordIncomingPacket(int64_t now, int64_t packetId, int32_t payloadLength)
{
_recorded_ts[_packetsReceivedSinceLastAck] = now;
_recorded_id[_packetsReceivedSinceLastAck] = packetId;
_recorded_len[_packetsReceivedSinceLastAck] = payloadLength;
_packetsReceivedSinceLastAck++;
_packetValiditySamples->push(true);
2018-05-01 23:32:15 +00:00
}
/**
* @param now Current time
* @return Whether an ACK (VERB_ACK) packet needs to be emitted at this time
2018-05-01 23:32:15 +00:00
*/
inline bool needsToSendAck(int64_t now) {
return ((now - _lastAck) >= ZT_PATH_ACK_INTERVAL ||
(_packetsReceivedSinceLastAck == ZT_PATH_QOS_TABLE_SIZE)) && _packetsReceivedSinceLastAck;
}
2018-05-01 23:32:15 +00:00
/**
* @param now Current time
* @return Whether a QoS (VERB_QOS_MEASUREMENT) packet needs to be emitted at this time
2018-05-01 23:32:15 +00:00
*/
inline bool needsToSendQoS(int64_t now) {
return ((_packetsReceivedSinceLastAck >= ZT_PATH_QOS_TABLE_SIZE) ||
((now - _lastQoSMeasurement) > ZT_PATH_QOS_INTERVAL)) && _packetsReceivedSinceLastAck;
}
2018-05-01 23:32:15 +00:00
/**
* How much time has elapsed since we've been expecting a VERB_ACK on this path. This value
* is used to determine a more relevant path "age". This lets us penalize paths which are no
* longer ACKing, but not those that simple aren't being used to carry traffic at the
* current time.
2018-05-01 23:32:15 +00:00
*/
inline int64_t ackAge(int64_t now) { return _expectingAckAsOf ? now - _expectingAckAsOf : 0; }
2018-05-01 23:32:15 +00:00
/**
* The maximum observed throughput for this path
2018-05-01 23:32:15 +00:00
*/
inline uint64_t maxLifetimeThroughput() { return _maxLifetimeThroughput; }
2018-05-01 23:32:15 +00:00
/**
* @return The mean throughput (in bits/s) of this link
2018-05-01 23:32:15 +00:00
*/
inline float meanThroughput() { return _meanThroughput; }
2018-05-01 23:32:15 +00:00
/**
* Assign a new relative quality value for this path in the aggregate link
*
* @param rq Quality of this path in comparison to other paths available to this peer
2018-05-01 23:32:15 +00:00
*/
inline void updateRelativeQuality(float rq) { _lastComputedRelativeQuality = rq; }
2018-05-01 23:32:15 +00:00
/**
* @return Quality of this path compared to others in the aggregate link
*/
inline float relativeQuality() { return _lastComputedRelativeQuality; }
2018-05-01 23:32:15 +00:00
/**
* @return Stability estimates can become expensive to compute, we cache the most recent result.
2018-05-01 23:32:15 +00:00
*/
inline float lastComputedStability() { return _lastComputedStability; }
2018-05-01 23:32:15 +00:00
/**
* @return A pointer to a cached copy of the human-readable name of the interface this Path's localSocket is bound to
2018-05-01 23:32:15 +00:00
*/
inline char *getName() { return _ifname; }
2018-05-01 23:32:15 +00:00
/**
* @return Packet delay varience
2018-05-01 23:32:15 +00:00
*/
inline float packetDelayVariance() { return _packetDelayVariance; }
2018-05-01 23:32:15 +00:00
2017-10-25 22:44:10 +00:00
/**
* @return Previously-computed mean latency
2017-10-25 22:44:10 +00:00
*/
inline float meanLatency() { return _meanLatency; }
/**
* @return Packet loss rate (PLR)
*/
inline float packetLossRatio() { return _packetLossRatio; }
/**
* @return Packet error ratio (PER)
*/
inline float packetErrorRatio() { return _packetErrorRatio; }
/**
* Record an invalid incoming packet. This packet failed MAC/compression/cipher checks and will now
* contribute to a Packet Error Ratio (PER).
*/
inline void recordInvalidPacket() { _packetValiditySamples->push(false); }
2017-10-25 22:44:10 +00:00
2018-05-01 23:32:15 +00:00
/**
* @return A pointer to a cached copy of the address string for this Path (For debugging only)
2018-05-01 23:32:15 +00:00
*/
inline char *getAddressString() { return _addrString; }
/**
* Compute and cache stability and performance metrics. The resultant stability coefficint is a measure of how "well behaved"
* this path is. This figure is substantially different from (but required for the estimation of the path's overall "quality".
*
* @param now Current time
*/
inline void processBackgroundPathMeasurements(int64_t now, const int64_t peerId) {
if (now - _lastPathQualityComputeTime > ZT_PATH_QUALITY_COMPUTE_INTERVAL) {
_lastPathQualityComputeTime = now;
_phy->getIfName((PhySocket *)((uintptr_t)_localSocket), _ifname, 16);
address().toString(_addrString);
_meanThroughput = _throughputSamples->mean();
_meanLatency = _latencySamples->mean();
_packetDelayVariance = _latencySamples->stddev(); // Similar to "jitter" (SEE: RFC 3393, RFC 4689)
// If no packet validity samples, assume PER==0
_packetErrorRatio = 1 - (_packetValiditySamples->count() ? _packetValiditySamples->mean() : 1);
// Compute path stability
// Normalize measurements with wildly different ranges into a reasonable range
float normalized_pdv = Utils::normalize(_packetDelayVariance, 0, ZT_PATH_MAX_PDV, 0, 10);
float normalized_la = Utils::normalize(_meanLatency, 0, ZT_PATH_MAX_MEAN_LATENCY, 0, 10);
float throughput_cv = _throughputSamples->mean() > 0 ? _throughputSamples->stddev() / _throughputSamples->mean() : 1;
// Form an exponential cutoff and apply contribution weights
float pdv_contrib = exp((-1)*normalized_pdv) * ZT_PATH_CONTRIB_PDV;
float latency_contrib = exp((-1)*normalized_la) * ZT_PATH_CONTRIB_LATENCY;
float throughput_disturbance_contrib = exp((-1)*throughput_cv) * ZT_PATH_CONTRIB_THROUGHPUT_DISTURBANCE;
// Obey user-defined ignored contributions
pdv_contrib = ZT_PATH_CONTRIB_PDV > 0.0 ? pdv_contrib : 1;
latency_contrib = ZT_PATH_CONTRIB_LATENCY > 0.0 ? latency_contrib : 1;
throughput_disturbance_contrib = ZT_PATH_CONTRIB_THROUGHPUT_DISTURBANCE > 0.0 ? throughput_disturbance_contrib : 1;
// Compute the quality product
_lastComputedStability = pdv_contrib + latency_contrib + throughput_disturbance_contrib;
_lastComputedStability *= 1 - _packetErrorRatio;
_qualitySamples->push(_lastComputedStability);
}
}
/**
* @return True if this path is alive (receiving heartbeats)
*/
inline bool alive(const int64_t now) const { return ((now - _lastIn) < (ZT_PATH_HEARTBEAT_PERIOD + 5000)); }
2018-05-01 23:32:15 +00:00
/**
* @return True if this path needs a heartbeat
*/
inline bool needsHeartbeat(const int64_t now) const { return ((now - _lastOut) >= ZT_PATH_HEARTBEAT_PERIOD); }
/**
* @return Last time we sent something
*/
inline int64_t lastOut() const { return _lastOut; }
/**
* @return Last time we received anything
*/
inline int64_t lastIn() const { return _lastIn; }
2017-07-01 00:32:07 +00:00
/**
* @return Time last trust-established packet was received
*/
inline int64_t lastTrustEstablishedPacketReceived() const { return _lastTrustEstablishedPacketReceived; }
2017-07-01 00:32:07 +00:00
/**
* Initialize statistical buffers
*/
inline void prepareBuffers() {
_throughputSamples = new RingBuffer<uint64_t>(ZT_PATH_QUALITY_METRIC_WIN_SZ);
_latencySamples = new RingBuffer<uint32_t>(ZT_PATH_QUALITY_METRIC_WIN_SZ);
_qualitySamples = new RingBuffer<float>(ZT_PATH_QUALITY_METRIC_WIN_SZ);
_packetValiditySamples = new RingBuffer<bool>(ZT_PATH_QUALITY_METRIC_WIN_SZ);
memset(_ifname, 0, 16);
memset(_recorded_id, 0, sizeof(_recorded_id));
memset(_recorded_ts, 0, sizeof(_recorded_ts));
memset(_recorded_len, 0, sizeof(_recorded_len));
memset(_addrString, 0, sizeof(_addrString));
}
private:
volatile int64_t _lastOut;
volatile int64_t _lastIn;
volatile int64_t _lastTrustEstablishedPacketReceived;
2018-05-01 23:32:15 +00:00
volatile int64_t _lastPathQualityComputeTime;
2017-07-06 18:45:22 +00:00
int64_t _localSocket;
volatile unsigned int _latency;
InetAddress _addr;
InetAddress::IpScope _ipScope; // memoize this since it's a computed value checked often
AtomicCounter __refCount;
2018-05-01 23:32:15 +00:00
uint64_t _recorded_id[ZT_PATH_QOS_TABLE_SIZE];
uint64_t _recorded_ts[ZT_PATH_QOS_TABLE_SIZE];
uint16_t _recorded_len[ZT_PATH_QOS_TABLE_SIZE];
2018-05-01 23:32:15 +00:00
std::map<uint64_t, uint64_t> _outgoingPacketRecords;
int64_t _lastAck;
int64_t _lastThroughputEstimation;
int64_t _lastQoSMeasurement;
int64_t _unackedBytes;
int64_t _expectingAckAsOf;
int16_t _packetsReceivedSinceLastAck;
2018-05-01 23:32:15 +00:00
float _meanThroughput;
uint64_t _maxLifetimeThroughput;
uint64_t _bytesAckedSinceLastThroughputEstimation;
2018-05-01 23:32:15 +00:00
volatile float _meanLatency;
float _packetDelayVariance;
2018-05-01 23:32:15 +00:00
float _packetErrorRatio;
2018-05-01 23:32:15 +00:00
float _packetLossRatio;
// cached estimates
float _lastComputedStability;
float _lastComputedRelativeQuality;
// cached human-readable strings for tracing purposes
char _ifname[16];
2018-05-01 23:32:15 +00:00
char _addrString[256];
RingBuffer<uint64_t> *_throughputSamples;
RingBuffer<uint32_t> *_latencySamples;
RingBuffer<float> *_qualitySamples;
RingBuffer<bool> *_packetValiditySamples;
};
} // namespace ZeroTier
#endif