mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-02-01 00:45:27 +00:00
Tons of refactoring, change to desperation algorithm to use max of core or link, porting over core loop code from old Node.cpp to new CAPI version, etc.
This commit is contained in:
parent
9e55f882d3
commit
49f031ccb4
@ -53,6 +53,11 @@ extern "C" {
|
||||
/* Core constants */
|
||||
/****************************************************************************/
|
||||
|
||||
/**
|
||||
* Default port for the ZeroTier service
|
||||
*/
|
||||
#define ZT1_DEFAULT_PORT 9993
|
||||
|
||||
/**
|
||||
* Maximum MTU for ZeroTier virtual networks
|
||||
*
|
||||
@ -149,7 +154,12 @@ enum ZT1_ResultCode
|
||||
/**
|
||||
* Invalid packet or failed authentication
|
||||
*/
|
||||
ZT1_RESULT_ERROR_PACKET_INVALID = 1000
|
||||
ZT1_RESULT_ERROR_PACKET_INVALID = 1000,
|
||||
|
||||
/**
|
||||
* Network ID not valid
|
||||
*/
|
||||
ZT1_RESULT_ERROR_NETWORK_NOT_FOUND = 1001
|
||||
};
|
||||
|
||||
/**
|
||||
@ -674,6 +684,7 @@ typedef void (*ZT1_VirtualNetworkFrameFunction)(ZT1_Node *,uint64_t,uint64_t,uin
|
||||
* @param dataStorePutFunction Function called to put objects in persistent storage
|
||||
* @param virtualNetworkConfigFunction Function to be called when virtual LANs are created, deleted, or their config parameters change
|
||||
* @param statusCallback Function to receive status updates and non-fatal error notices
|
||||
* @param overrideRootTopology If not NULL, must contain string-serialize root topology (for testing, default: NULL)
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_new(
|
||||
@ -684,7 +695,8 @@ enum ZT1_ResultCode ZT1_Node_new(
|
||||
ZT1_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT1_StatusCallback statusCallback);
|
||||
ZT1_StatusCallback statusCallback,
|
||||
const char *overrideRootTopology = (const char *)0);
|
||||
|
||||
/**
|
||||
* Delete a node and free all resources it consumes
|
||||
@ -705,7 +717,7 @@ void ZT1_Node_delete(ZT1_Node *node);
|
||||
* @param linkDesperation Link desperation metric for link or protocol over which packet arrived
|
||||
* @param packetData Packet data
|
||||
* @param packetLength Packet length
|
||||
* @param nextCallDeadline Result: set to deadline for next call to one of the three processXXX() methods
|
||||
* @param nextBackgroundTaskDeadline Value/result: set to deadline for next call to one of the three processXXX() methods
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_processWirePacket(
|
||||
@ -715,7 +727,7 @@ enum ZT1_ResultCode ZT1_Node_processWirePacket(
|
||||
unsigned int linkDesperation,
|
||||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
uint64_t *nextCallDeadline);
|
||||
uint64_t *nextBackgroundTaskDeadline);
|
||||
|
||||
/**
|
||||
* Process a frame from a virtual network port (tap)
|
||||
@ -729,7 +741,7 @@ enum ZT1_ResultCode ZT1_Node_processWirePacket(
|
||||
* @param vlanId 10-bit VLAN ID or 0 if none
|
||||
* @param frameData Frame payload data
|
||||
* @param frameLength Frame payload length
|
||||
* @param nextCallDeadline Result: set to deadline for next call to one of the three processXXX() methods
|
||||
* @param nextBackgroundTaskDeadline Value/result: set to deadline for next call to one of the three processXXX() methods
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
|
||||
@ -742,20 +754,17 @@ enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
|
||||
unsigned int vlanId,
|
||||
const void *frameData,
|
||||
unsigned int frameLength,
|
||||
uint64_t *nextCallDeadline);
|
||||
uint64_t *nextBackgroundTaskDeadline);
|
||||
|
||||
/**
|
||||
* Perform required periodic operations even if no new frames or packets have arrived
|
||||
*
|
||||
* If the nextCallDeadline arrives and nothing has happened, call this method
|
||||
* to do required background tasks like pinging and cleanup.
|
||||
*
|
||||
* @param node Node instance
|
||||
* @param now Current clock in milliseconds
|
||||
* @param nextCallDeadline Result: set to deadline for next call to one of the three processXXX() methods
|
||||
* @param nextBackgroundTaskDeadline Value/result: set to deadline for next call to one of the three processXXX() methods
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_processBackgroundTasks(ZT1_Node *node,uint64_t now,uint64_t *nextCallDeadline);
|
||||
enum ZT1_ResultCode ZT1_Node_processBackgroundTasks(ZT1_Node *node,uint64_t now,uint64_t *nextBackgroundTaskDeadline);
|
||||
|
||||
/**
|
||||
* Join a network
|
||||
|
@ -140,11 +140,6 @@
|
||||
*/
|
||||
#define ZT_ADDRESS_RESERVED_PREFIX 0xff
|
||||
|
||||
/**
|
||||
* Default local port for ZeroTier UDP traffic
|
||||
*/
|
||||
#define ZT_DEFAULT_UDP_PORT 9993
|
||||
|
||||
/**
|
||||
* Default payload MTU for UDP packets
|
||||
*
|
||||
@ -190,7 +185,12 @@
|
||||
/**
|
||||
* How often Topology::clean() and Network::clean() and similar are called, in ms
|
||||
*/
|
||||
#define ZT_DB_CLEAN_PERIOD 120000
|
||||
#define ZT_HOUSEKEEPING_PERIOD 120000
|
||||
|
||||
/**
|
||||
* Overriding granularity for timer tasks to prevent CPU-intensive thrashing on every packet
|
||||
*/
|
||||
#define ZT_CORE_TIMER_TASK_GRANULARITY 1000
|
||||
|
||||
/**
|
||||
* How long to remember peer records in RAM if they haven't been used
|
||||
@ -229,11 +229,6 @@
|
||||
*/
|
||||
#define ZT_MULTICAST_LIKE_EXPIRE 600000
|
||||
|
||||
/**
|
||||
* Time between polls of local tap devices for multicast membership changes
|
||||
*/
|
||||
#define ZT_MULTICAST_LOCAL_POLL_PERIOD 10000
|
||||
|
||||
/**
|
||||
* Delay between explicit MULTICAST_GATHER requests for a given multicast channel
|
||||
*/
|
||||
@ -252,9 +247,20 @@
|
||||
#define ZT_MULTICAST_DEFAULT_LIMIT 32
|
||||
|
||||
/**
|
||||
* Delay between scans of the topology active peer DB for peers that need ping
|
||||
* How frequently to send a zero-byte UDP keepalive packet
|
||||
*
|
||||
* There are NATs with timeouts as short as 30 seconds, so this turns out
|
||||
* to be needed.
|
||||
*/
|
||||
#define ZT_PING_CHECK_DELAY 10000
|
||||
#define ZT_NAT_KEEPALIVE_DELAY 25000
|
||||
|
||||
/**
|
||||
* Delay between scans of the topology active peer DB for peers that need ping
|
||||
*
|
||||
* This is also how often pings will be retried to upstream peers (supernodes)
|
||||
* constantly until something is heard.
|
||||
*/
|
||||
#define ZT_PING_CHECK_INVERVAL 6250
|
||||
|
||||
/**
|
||||
* Delay between ordinary case pings of direct links
|
||||
@ -267,39 +273,14 @@
|
||||
#define ZT_NETWORK_AUTOCONF_DELAY 60000
|
||||
|
||||
/**
|
||||
* Delay in core loop between checks of network autoconf newness
|
||||
* Increment core desperation after this multiple of ping checks without responses from upstream peers
|
||||
*/
|
||||
#define ZT_NETWORK_AUTOCONF_CHECK_DELAY 10000
|
||||
|
||||
/**
|
||||
* Time since a ping was sent to be considered unanswered
|
||||
*/
|
||||
#define ZT_PING_UNANSWERED_AFTER 1500
|
||||
|
||||
/**
|
||||
* Try to ping supernodes this often until we get something from them
|
||||
*/
|
||||
#define ZT_STARTUP_AGGRO (ZT_PING_UNANSWERED_AFTER * 2)
|
||||
|
||||
/**
|
||||
* How long since last message from an authoritative upstream peer before we increment our desperation level?
|
||||
*/
|
||||
#define ZT_DESPERATION_INCREMENT (ZT_STARTUP_AGGRO * 2)
|
||||
|
||||
/**
|
||||
* Interval between "spams" if desperation > 0
|
||||
*/
|
||||
#define ZT_DESPERATION_SPAM_INTERVAL 60000
|
||||
|
||||
/**
|
||||
* Maximum delay between runs of the main loop in Node.cpp
|
||||
*/
|
||||
#define ZT_MAX_SERVICE_LOOP_INTERVAL ZT_STARTUP_AGGRO
|
||||
#define ZT_CORE_DESPERATION_INCREMENT 2
|
||||
|
||||
/**
|
||||
* Timeout for overall peer activity (measured from last receive)
|
||||
*/
|
||||
#define ZT_PEER_ACTIVITY_TIMEOUT ((ZT_PEER_DIRECT_PING_DELAY * 2) + ZT_PING_CHECK_DELAY)
|
||||
#define ZT_PEER_ACTIVITY_TIMEOUT ((ZT_PEER_DIRECT_PING_DELAY * 2) + ZT_PING_CHECK_INVERVAL)
|
||||
|
||||
/**
|
||||
* Stop relaying via peers that have not responded to direct sends
|
||||
@ -312,16 +293,6 @@
|
||||
*/
|
||||
#define ZT_PEER_RELAY_CONVERSATION_LATENCY_THRESHOLD 10000
|
||||
|
||||
/**
|
||||
* Delay sleep overshoot for detection of a probable sleep/wake event
|
||||
*/
|
||||
#define ZT_SLEEP_WAKE_DETECTION_THRESHOLD 5000
|
||||
|
||||
/**
|
||||
* Time to pause main service loop after sleep/wake detect
|
||||
*/
|
||||
#define ZT_SLEEP_WAKE_SETTLE_TIME 5000
|
||||
|
||||
/**
|
||||
* Minimum interval between attempts by relays to unite peers
|
||||
*
|
||||
@ -367,11 +338,6 @@
|
||||
*/
|
||||
#define ZT_MAX_BRIDGE_SPAM 16
|
||||
|
||||
/**
|
||||
* Timeout for IPC connections (e.g. unix domain sockets) in seconds
|
||||
*/
|
||||
#define ZT_IPC_TIMEOUT 600
|
||||
|
||||
/**
|
||||
* A test pseudo-network-ID that can be joined
|
||||
*
|
||||
|
@ -158,7 +158,6 @@ Defaults::Defaults() :
|
||||
rootTopologyAuthorities(_mkRootTopologyAuth()),
|
||||
updateAuthorities(_mkUpdateAuth()),
|
||||
updateLatestNfoURL(_mkUpdateUrl()),
|
||||
rootTopologyUpdateURL("http://download.zerotier.com/net/topology/ROOT"),
|
||||
v4Broadcast(((uint32_t)0xffffffff),ZT_DEFAULT_UDP_PORT)
|
||||
{
|
||||
}
|
||||
|
@ -82,11 +82,6 @@ public:
|
||||
*/
|
||||
const std::string updateLatestNfoURL;
|
||||
|
||||
/**
|
||||
* URL to check for updates to root topology
|
||||
*/
|
||||
const std::string rootTopologyUpdateURL;
|
||||
|
||||
/**
|
||||
* Address for IPv4 LAN auto-location broadcasts: 255.255.255.255:9993
|
||||
*/
|
||||
|
@ -70,15 +70,14 @@ public:
|
||||
/**
|
||||
* Create a new packet-in-decode
|
||||
*
|
||||
* @param b Source buffer with raw packet data
|
||||
* @param data Packet data
|
||||
* @param len Packet length
|
||||
* @param remoteAddress Address from which packet came
|
||||
* @param linkDesperation Link desperation for link over which packet was received
|
||||
* @throws std::out_of_range Range error processing packet
|
||||
*/
|
||||
template<unsigned int C2>
|
||||
IncomingPacket(const Buffer<C2> &b,const InetAddress &remoteAddress,unsigned int linkDesperation)
|
||||
throw(std::out_of_range) :
|
||||
Packet(b),
|
||||
IncomingPacket(const void *data,unsigned int len,const InetAddress &remoteAddress,unsigned int linkDesperation) :
|
||||
Packet(data,len),
|
||||
_receiveTime(Utils::now()),
|
||||
_remoteAddress(remoteAddress),
|
||||
_linkDesperation(linkDesperation),
|
||||
|
153
node/Node.cpp
153
node/Node.cpp
@ -42,6 +42,7 @@
|
||||
#include "Address.hpp"
|
||||
#include "Identity.hpp"
|
||||
#include "SelfAwareness.hpp"
|
||||
#include "Defaults.hpp"
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
@ -56,7 +57,8 @@ Node::Node(
|
||||
ZT1_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT1_StatusCallback statusCallback) :
|
||||
ZT1_StatusCallback statusCallback,
|
||||
const char *overrideRootTopology) :
|
||||
RR(new RuntimeEnvironment(this)),
|
||||
_dataStoreGetFunction(dataStoreGetFunction),
|
||||
_dataStorePutFunction(dataStorePutFunction),
|
||||
@ -66,7 +68,11 @@ Node::Node(
|
||||
_statusCallback(statusCallback),
|
||||
_networks(),
|
||||
_networks_m(),
|
||||
_now(now)
|
||||
_now(now),
|
||||
_startTimeAfterInactivity(0),
|
||||
_lastPingCheck(0),
|
||||
_lastHousekeepingRun(0),
|
||||
_coreDesperation(0)
|
||||
{
|
||||
_newestVersionSeen[0] = ZEROTIER_ONE_VERSION_MAJOR;
|
||||
_newestVersionSeen[1] = ZEROTIER_ONE_VERSION_MINOR;
|
||||
@ -106,6 +112,21 @@ Node::Node(
|
||||
throw;
|
||||
}
|
||||
|
||||
Dictionary rt;
|
||||
if (overrideRootTopology) {
|
||||
rt.fromString(std::string(overrideRootTopology));
|
||||
} else {
|
||||
std::string rttmp(dataStoreGet("root-topology"));
|
||||
if (rttmp.length() > 0) {
|
||||
rt.fromString(rttmp);
|
||||
if (!Topology::authenticateRootTopology(rt))
|
||||
rt.clear();
|
||||
}
|
||||
if (!rt.size())
|
||||
rt.fromString(ZT_DEFAULTS.defaultRootTopology);
|
||||
}
|
||||
RR->topology->setSupernodes(Dictionary(rt.get("supernodes","")));
|
||||
|
||||
postEvent(ZT1_EVENT_UP);
|
||||
}
|
||||
|
||||
@ -127,9 +148,17 @@ ZT1_ResultCode Node::processWirePacket(
|
||||
unsigned int linkDesperation,
|
||||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
uint64_t *nextCallDeadline)
|
||||
uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
processBackgroundTasks(now,nextCallDeadline);
|
||||
if (now >= *nextBackgroundTaskDeadline) {
|
||||
ZT1_ResultCode rc = processBackgroundTasks(now,nextBackgroundTaskDeadline);
|
||||
if (rc != ZT1_RESULT_OK)
|
||||
return rc;
|
||||
} else _now = now;
|
||||
|
||||
RR->sw->onRemotePacket(*(reinterpret_cast<const InetAddress *>(remoteAddress)),linkDesperation,packetData,packetLength);
|
||||
|
||||
return ZT1_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::processVirtualNetworkFrame(
|
||||
@ -141,14 +170,107 @@ ZT1_ResultCode Node::processVirtualNetworkFrame(
|
||||
unsigned int vlanId,
|
||||
const void *frameData,
|
||||
unsigned int frameLength,
|
||||
uint64_t *nextCallDeadline)
|
||||
uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
processBackgroundTasks(now,nextCallDeadline);
|
||||
if (now >= *nextBackgroundTaskDeadline) {
|
||||
ZT1_ResultCode rc = processBackgroundTasks(now,nextBackgroundTaskDeadline);
|
||||
if (rc != ZT1_RESULT_OK)
|
||||
return rc;
|
||||
} else _now = now;
|
||||
|
||||
try {
|
||||
SharedPtr<Network> nw(network(nwid));
|
||||
if (nw)
|
||||
RR->sw->onLocalEthernet(nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
|
||||
else return ZT1_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
return ZT1_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,uint64_t *nextCallDeadline)
|
||||
class _PingPeersThatNeedPing
|
||||
{
|
||||
public:
|
||||
_PingPeersThatNeedPing(const RuntimeEnvironment *renv,uint64_t now) :
|
||||
lastReceiveFromSupernode(0),
|
||||
RR(renv),
|
||||
_now(now),
|
||||
_supernodes(RR->topology->supernodeAddresses()) {}
|
||||
|
||||
uint64_t lastReceiveFromSupernode;
|
||||
|
||||
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
|
||||
{
|
||||
if (std::find(_supernodes.begin(),_supernodes.end(),p->address()) != _supernodes.end()) {
|
||||
p->doPingAndKeepalive(RR,_now);
|
||||
if (p->lastReceive() > lastReceiveFromSupernode)
|
||||
lastReceiveFromSupernode = p->lastReceive();
|
||||
} else if (p->alive(_now)) {
|
||||
p->doPingAndKeepalive(RR,_now);
|
||||
}
|
||||
}
|
||||
private:
|
||||
const RuntimeEnvironment *RR;
|
||||
uint64_t _now;
|
||||
std::vector<Address> _supernodes;
|
||||
};
|
||||
|
||||
ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
_now = now;
|
||||
Mutex::Lock bl(_backgroundTasksLock);
|
||||
|
||||
if ((now - _lastPingCheck) >= ZT_PING_CHECK_INVERVAL) {
|
||||
_lastPingCheck = now;
|
||||
|
||||
if ((now - _startTimeAfterInactivity) > (ZT_PING_CHECK_INVERVAL * 3))
|
||||
_startTimeAfterInactivity = now;
|
||||
|
||||
try {
|
||||
_PingPeersThatNeedPing pfunc(RR,now);
|
||||
RR->topology->eachPeer<_PingPeersThatNeedPing &>(pfunc);
|
||||
|
||||
_coreDesperation = (unsigned int)(std::max(_startTimeAfterInactivity,pfunc.lastReceiveFromSupernode) / (ZT_PING_CHECK_INVERVAL * ZT_CORE_DESPERATION_INCREMENT));
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
try {
|
||||
Mutex::Lock _l(_networks_m);
|
||||
for(std::map< uint64_t,SharedPtr<Network> >::const_iterator n(_networks.begin());n!=_networks.end();++n) {
|
||||
if ((now - n->second->lastConfigUpdate()) >= ZT_NETWORK_AUTOCONF_DELAY)
|
||||
n->second->requestConfiguration();
|
||||
}
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
|
||||
_lastHousekeepingRun = now;
|
||||
|
||||
try {
|
||||
RR->topology->clean(now);
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
try {
|
||||
RR->mc->clean(now);
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
*nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min((unsigned long)ZT_PING_CHECK_INVERVAL,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
return ZT1_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::join(uint64_t nwid)
|
||||
@ -265,11 +387,12 @@ enum ZT1_ResultCode ZT1_Node_new(
|
||||
ZT1_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT1_StatusCallback statusCallback)
|
||||
ZT1_StatusCallback statusCallback,
|
||||
const char *overrideRootTopology)
|
||||
{
|
||||
*node = (ZT1_Node *)0;
|
||||
try {
|
||||
*node = reinterpret_cast<ZT1_Node *>(new ZeroTier::Node(now,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,statusCallback));
|
||||
*node = reinterpret_cast<ZT1_Node *>(new ZeroTier::Node(now,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,statusCallback,overrideRootTopology));
|
||||
return ZT1_RESULT_OK;
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
@ -294,10 +417,10 @@ enum ZT1_ResultCode ZT1_Node_processWirePacket(
|
||||
unsigned int linkDesperation,
|
||||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
uint64_t *nextCallDeadline)
|
||||
uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(now,remoteAddress,linkDesperation,packetData,packetLength,nextCallDeadline);
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(now,remoteAddress,linkDesperation,packetData,packetLength,nextBackgroundTaskDeadline);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
@ -315,10 +438,10 @@ enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
|
||||
unsigned int vlanId,
|
||||
const void *frameData,
|
||||
unsigned int frameLength,
|
||||
uint64_t *nextCallDeadline)
|
||||
uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(now,nwid,sourceMac,destMac,etherType,vlanId,frameData,frameLength,nextCallDeadline);
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(now,nwid,sourceMac,destMac,etherType,vlanId,frameData,frameLength,nextBackgroundTaskDeadline);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
@ -326,10 +449,10 @@ enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
|
||||
}
|
||||
}
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_processBackgroundTasks(ZT1_Node *node,uint64_t now,uint64_t *nextCallDeadline)
|
||||
enum ZT1_ResultCode ZT1_Node_processBackgroundTasks(ZT1_Node *node,uint64_t now,uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(now,nextCallDeadline);
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(now,nextBackgroundTaskDeadline);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
|
@ -62,7 +62,8 @@ public:
|
||||
ZT1_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT1_StatusCallback statusCallback);
|
||||
ZT1_StatusCallback statusCallback,
|
||||
const char *overrideRootTopology);
|
||||
|
||||
~Node();
|
||||
|
||||
@ -74,7 +75,7 @@ public:
|
||||
unsigned int linkDesperation,
|
||||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
uint64_t *nextCallDeadline);
|
||||
uint64_t *nextBackgroundTaskDeadline);
|
||||
ZT1_ResultCode processVirtualNetworkFrame(
|
||||
uint64_t now,
|
||||
uint64_t nwid,
|
||||
@ -84,8 +85,8 @@ public:
|
||||
unsigned int vlanId,
|
||||
const void *frameData,
|
||||
unsigned int frameLength,
|
||||
uint64_t *nextCallDeadline);
|
||||
ZT1_ResultCode processBackgroundTasks(uint64_t now,uint64_t *nextCallDeadline);
|
||||
uint64_t *nextBackgroundTaskDeadline);
|
||||
ZT1_ResultCode processBackgroundTasks(uint64_t now,uint64_t *nextBackgroundTaskDeadline);
|
||||
ZT1_ResultCode join(uint64_t nwid);
|
||||
ZT1_ResultCode leave(uint64_t nwid);
|
||||
ZT1_ResultCode multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
|
||||
@ -163,6 +164,8 @@ public:
|
||||
return nw;
|
||||
}
|
||||
|
||||
inline unsigned int coreDesperation() const throw() { return _coreDesperation; }
|
||||
|
||||
inline bool dataStorePut(const char *name,const void *data,unsigned int len,bool secure) { return (_dataStorePutFunction(reinterpret_cast<ZT1_Node *>(this),name,data,len,(int)secure) == 0); }
|
||||
inline bool dataStorePut(const char *name,const std::string &data,bool secure) { return dataStorePut(name,(const void *)data.data(),(unsigned int)data.length(),secure); }
|
||||
inline void dataStoreDelete(const char *name) { _dataStorePutFunction(reinterpret_cast<ZT1_Node *>(this),name,(const void *)0,0,0); }
|
||||
@ -190,7 +193,13 @@ private:
|
||||
std::map< uint64_t,SharedPtr<Network> > _networks;
|
||||
Mutex _networks_m;
|
||||
|
||||
volatile uint64_t _now; // time of last run()
|
||||
Mutex _backgroundTasksLock;
|
||||
|
||||
uint64_t _now;
|
||||
uint64_t _startTimeAfterInactivity;
|
||||
uint64_t _lastPingCheck;
|
||||
uint64_t _lastHousekeepingRun;
|
||||
unsigned int _coreDesperation;
|
||||
unsigned int _newestVersionSeen[3]; // major, minor, revision
|
||||
};
|
||||
|
||||
|
@ -361,6 +361,11 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
Fragment(const void *data,unsigned int len) :
|
||||
Buffer<ZT_PROTO_MAX_PACKET_LENGTH>(data,len)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize from a packet
|
||||
*
|
||||
@ -793,12 +798,16 @@ public:
|
||||
throw();
|
||||
|
||||
template<unsigned int C2>
|
||||
Packet(const Buffer<C2> &b)
|
||||
throw(std::out_of_range) :
|
||||
Packet(const Buffer<C2> &b) :
|
||||
Buffer<ZT_PROTO_MAX_PACKET_LENGTH>(b)
|
||||
{
|
||||
}
|
||||
|
||||
Packet(const void *data,unsigned int len) :
|
||||
Buffer<ZT_PROTO_MAX_PACKET_LENGTH>(data,len)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a new empty packet with a unique random packet ID
|
||||
*
|
||||
|
@ -54,26 +54,26 @@ class Path
|
||||
{
|
||||
public:
|
||||
Path() :
|
||||
_addr(),
|
||||
_lastSend(0),
|
||||
_lastReceived(0),
|
||||
_addr(),
|
||||
_lastReceiveDesperation(0),
|
||||
_fixed(false) {}
|
||||
|
||||
Path(const Path &p) throw() { memcpy(this,&p,sizeof(Path)); }
|
||||
|
||||
Path(const InetAddress &addr,bool fixed) :
|
||||
_addr(addr),
|
||||
_lastSend(0),
|
||||
_lastReceived(0),
|
||||
_addr(addr),
|
||||
_lastReceiveDesperation(0),
|
||||
_fixed(fixed) {}
|
||||
|
||||
inline void init(const InetAddress &addr,bool fixed)
|
||||
{
|
||||
_addr = addr;
|
||||
_lastSend = 0;
|
||||
_lastReceived = 0;
|
||||
_addr = addr;
|
||||
_lastReceiveDesperation = 0;
|
||||
_fixed = fixed;
|
||||
}
|
||||
@ -89,7 +89,6 @@ public:
|
||||
|
||||
inline uint64_t lastSend() const throw() { return _lastSend; }
|
||||
inline uint64_t lastReceived() const throw() { return _lastReceived; }
|
||||
inline int lastReceiveDesperation() const throw() { return _lastReceiveDesperation; }
|
||||
|
||||
/**
|
||||
* Called when a packet is sent to this path
|
||||
@ -98,7 +97,11 @@ public:
|
||||
*
|
||||
* @param t Time of send
|
||||
*/
|
||||
inline void sent(uint64_t t) throw() { _lastSend = t; }
|
||||
inline void sent(uint64_t t)
|
||||
throw()
|
||||
{
|
||||
_lastSend = t;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when a packet is received from this path
|
||||
@ -106,7 +109,12 @@ public:
|
||||
* @param t Time of receive
|
||||
* @param d Link desperation of receive
|
||||
*/
|
||||
inline void received(uint64_t t,unsigned int d) throw() { _lastReceived = t; _lastReceiveDesperation = d; }
|
||||
inline void received(uint64_t t,unsigned int d)
|
||||
throw()
|
||||
{
|
||||
_lastReceived = t;
|
||||
_lastReceiveDesperation = d;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Is this a fixed path?
|
||||
@ -119,24 +127,9 @@ public:
|
||||
inline void setFixed(bool f) throw() { _fixed = f; }
|
||||
|
||||
/**
|
||||
* Compute path desperation
|
||||
*
|
||||
* Path desperation affects escalation to less efficient fallback
|
||||
* transports such as TCP or HTTP relaying.
|
||||
*
|
||||
* Right now we only escalate desperation for fixed paths, which
|
||||
* are paths to supernodes. This causes our fallback tunneling
|
||||
* mechanisms to kick in.
|
||||
*
|
||||
* @param now Current time
|
||||
* @return Path desperation, starting at 0
|
||||
* @return Last desperation reported via incoming link
|
||||
*/
|
||||
inline unsigned int desperation(uint64_t now) const
|
||||
{
|
||||
if ((_fixed)&&(_lastSend > _lastReceived))
|
||||
return std::max(_lastReceiveDesperation,(unsigned int)((_lastSend - _lastReceived) / ZT_DESPERATION_INCREMENT));
|
||||
return _lastReceiveDesperation;
|
||||
}
|
||||
inline unsigned int lastReceiveDesperation() const throw() { return _lastReceiveDesperation; }
|
||||
|
||||
/**
|
||||
* @param now Current time
|
||||
@ -159,7 +152,7 @@ public:
|
||||
*/
|
||||
inline bool send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
|
||||
{
|
||||
if (RR->node->putPacket(_addr,data,len,desperation(now))) {
|
||||
if (RR->node->putPacket(_addr,data,len,std::max(RR->node->coreDesperation(),_lastReceiveDesperation))) {
|
||||
sent(now);
|
||||
RR->antiRec->logOutgoingZT(data,len);
|
||||
return true;
|
||||
@ -191,9 +184,9 @@ public:
|
||||
inline bool operator>=(const Path &p) const throw() { return (_addr >= p._addr); }
|
||||
|
||||
private:
|
||||
InetAddress _addr;
|
||||
uint64_t _lastSend;
|
||||
uint64_t _lastReceived;
|
||||
InetAddress _addr;
|
||||
unsigned int _lastReceiveDesperation;
|
||||
bool _fixed;
|
||||
};
|
||||
|
@ -187,6 +187,24 @@ void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &at
|
||||
RR->node->putPacket(atAddress,outp.data(),outp.size(),linkDesperation);
|
||||
}
|
||||
|
||||
void Peer::doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now)
|
||||
{
|
||||
Path *const bestPath = getBestPath(now);
|
||||
if ((bestPath)&&(bestPath->active(now))) {
|
||||
if ((now - bestPath->lastReceived()) >= ZT_PEER_DIRECT_PING_DELAY) {
|
||||
attemptToContactAt(RR,bestPath->address(),bestPath->desperation(now),now);
|
||||
bestPath->sent(now);
|
||||
} else if ((now - bestPath->lastSend()) >= ZT_NAT_KEEPALIVE_DELAY) {
|
||||
// We only do keepalive if desperation is zero right now, since higher
|
||||
// desperation paths involve things like tunneling that do not need it.
|
||||
if (bestPath->desperation() == 0) {
|
||||
RR->node->putPacket(_paths[p].address(),"",0,0);
|
||||
bestPath->sent(now);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Peer::addPath(const Path &newp)
|
||||
{
|
||||
unsigned int np = _numPaths;
|
||||
|
@ -183,21 +183,12 @@ public:
|
||||
void attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &atAddress,unsigned int linkDesperation,uint64_t now);
|
||||
|
||||
/**
|
||||
* Send a HELLO to all active direct paths
|
||||
* Send pings or keepalives depending on configured timeouts
|
||||
*
|
||||
* @param RR Runtime environment
|
||||
* @param now Current time
|
||||
*/
|
||||
inline void ping(const RuntimeEnvironment *RR,uint64_t now)
|
||||
{
|
||||
unsigned int np = _numPaths;
|
||||
for(unsigned int p=0;p<np;++p) {
|
||||
if (_paths[p].active(now)) {
|
||||
attemptToContactAt(RR,_paths[p].address(),_paths[p].desperation(now),now);
|
||||
_paths[p].sent(now);
|
||||
}
|
||||
}
|
||||
}
|
||||
void doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now);
|
||||
|
||||
/**
|
||||
* @return All known direct paths to this peer
|
||||
|
@ -58,16 +58,16 @@ Switch::~Switch()
|
||||
{
|
||||
}
|
||||
|
||||
void Switch::onRemotePacket(const InetAddress &fromAddr,int linkDesperation,const Buffer<4096> &data)
|
||||
void Switch::onRemotePacket(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len)
|
||||
{
|
||||
try {
|
||||
if (data.size() == ZT_PROTO_BEACON_LENGTH) {
|
||||
_handleBeacon(fromAddr,linkDesperation,data);
|
||||
} else if (data.size() > ZT_PROTO_MIN_FRAGMENT_LENGTH) {
|
||||
if (data[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) {
|
||||
_handleRemotePacketFragment(fromAddr,linkDesperation,data);
|
||||
} else if (data.size() >= ZT_PROTO_MIN_PACKET_LENGTH) {
|
||||
_handleRemotePacketHead(fromAddr,linkDesperation,data);
|
||||
if (len == ZT_PROTO_BEACON_LENGTH) {
|
||||
_handleBeacon(fromAddr,linkDesperation,Buffer<ZT_PROTO_BEACON_LENGTH>(data,len));
|
||||
} else if (len > ZT_PROTO_MIN_FRAGMENT_LENGTH) {
|
||||
if (((const unsigned char *)data)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) {
|
||||
_handleRemotePacketFragment(fromAddr,linkDesperation,data,len);
|
||||
} else if (len >= ZT_PROTO_MIN_PACKET_LENGTH) {
|
||||
_handleRemotePacketHead(fromAddr,linkDesperation,data,len);
|
||||
}
|
||||
}
|
||||
} catch (std::exception &ex) {
|
||||
@ -77,7 +77,7 @@ void Switch::onRemotePacket(const InetAddress &fromAddr,int linkDesperation,cons
|
||||
}
|
||||
}
|
||||
|
||||
void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,const Buffer<4096> &data)
|
||||
void Switch::onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
|
||||
{
|
||||
SharedPtr<NetworkConfig> nconf(network->config2());
|
||||
if (!nconf)
|
||||
@ -415,10 +415,9 @@ void Switch::doAnythingWaitingForPeer(const SharedPtr<Peer> &peer)
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long Switch::doTimerTasks()
|
||||
unsigned long Switch::doTimerTasks(uint64_t now)
|
||||
{
|
||||
unsigned long nextDelay = ~((unsigned long)0); // big number, caller will cap return value
|
||||
const uint64_t now = RR->node->now();
|
||||
unsigned long nextDelay = 0xffffffff; // ceiling delay, caller will cap to minimum
|
||||
|
||||
{ // Aggressive NAT traversal time!
|
||||
Mutex::Lock _l(_contactQueue_m);
|
||||
@ -538,7 +537,7 @@ unsigned long Switch::doTimerTasks()
|
||||
}
|
||||
}
|
||||
|
||||
return std::max(nextDelay,(unsigned long)10); // minimum delay
|
||||
return nextDelay;
|
||||
}
|
||||
|
||||
const char *Switch::etherTypeName(const unsigned int etherType)
|
||||
@ -557,9 +556,9 @@ const char *Switch::etherTypeName(const unsigned int etherType)
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,int linkDesperation,const Buffer<4096> &data)
|
||||
void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len)
|
||||
{
|
||||
Packet::Fragment fragment(data);
|
||||
Packet::Fragment fragment(data,len);
|
||||
Address destination(fragment.destination());
|
||||
|
||||
if (destination != RR->identity.address()) {
|
||||
@ -629,9 +628,9 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,int linkDes
|
||||
}
|
||||
}
|
||||
|
||||
void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,int linkDesperation,const Buffer<4096> &data)
|
||||
void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len)
|
||||
{
|
||||
SharedPtr<IncomingPacket> packet(new IncomingPacket(data,fromAddr,linkDesperation));
|
||||
SharedPtr<IncomingPacket> packet(new IncomingPacket(data,len,fromAddr,linkDesperation));
|
||||
|
||||
Address source(packet->source());
|
||||
Address destination(packet->destination());
|
||||
@ -699,7 +698,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,int linkDespera
|
||||
}
|
||||
}
|
||||
|
||||
void Switch::_handleBeacon(const InetAddress &fromAddr,int linkDesperation,const Buffer<4096> &data)
|
||||
void Switch::_handleBeacon(const InetAddress &fromAddr,int linkDesperation,const Buffer<ZT_PROTO_BEACON_LENGTH> &data)
|
||||
{
|
||||
Address beaconAddr(data.field(ZT_PROTO_BEACON_IDX_ADDRESS,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
|
||||
if (beaconAddr == RR->identity.address())
|
||||
|
@ -82,8 +82,9 @@ public:
|
||||
* @param fromAddr Internet IP address of origin
|
||||
* @param linkDesperation Link desperation of path over which packet was received
|
||||
* @param data Packet data
|
||||
* @param len Packet length
|
||||
*/
|
||||
void onRemotePacket(const InetAddress &fromAddr,int linkDesperation,const Buffer<4096> &data);
|
||||
void onRemotePacket(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len);
|
||||
|
||||
/**
|
||||
* Called when a packet comes from a local Ethernet tap
|
||||
@ -92,9 +93,11 @@ public:
|
||||
* @param from Originating MAC address
|
||||
* @param to Destination MAC address
|
||||
* @param etherType Ethernet packet type
|
||||
* @param vlanId VLAN ID or 0 if none
|
||||
* @param data Ethernet payload
|
||||
* @param len Frame length
|
||||
*/
|
||||
void onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,const Buffer<4096> &data);
|
||||
void onLocalEthernet(const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len);
|
||||
|
||||
/**
|
||||
* Send a packet to a ZeroTier address (destination in packet)
|
||||
@ -164,9 +167,13 @@ public:
|
||||
/**
|
||||
* Perform retries and other periodic timer tasks
|
||||
*
|
||||
* This can return a very long delay if there are no pending timer
|
||||
* tasks. The caller should cap this comparatively vs. other values.
|
||||
*
|
||||
* @param now Current time
|
||||
* @return Number of milliseconds until doTimerTasks() should be run again
|
||||
*/
|
||||
unsigned long doTimerTasks();
|
||||
unsigned long doTimerTasks(uint64_t now);
|
||||
|
||||
/**
|
||||
* @param etherType Ethernet type ID
|
||||
@ -176,9 +183,9 @@ public:
|
||||
throw();
|
||||
|
||||
private:
|
||||
void _handleRemotePacketFragment(const InetAddress &fromAddr,int linkDesperation,const Buffer<4096> &data);
|
||||
void _handleRemotePacketHead(const InetAddress &fromAddr,int linkDesperation,const Buffer<4096> &data);
|
||||
void _handleBeacon(const InetAddress &fromAddr,int linkDesperation,const Buffer<4096> &data);
|
||||
void _handleRemotePacketFragment(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len);
|
||||
void _handleRemotePacketHead(const InetAddress &fromAddr,int linkDesperation,const void *data,unsigned int len);
|
||||
void _handleBeacon(const InetAddress &fromAddr,int linkDesperation,const Buffer<ZT_PROTO_BEACON_LENGTH> &data);
|
||||
|
||||
Address _sendWhoisRequest(
|
||||
const Address &addr,
|
||||
|
@ -188,100 +188,6 @@ public:
|
||||
f(*this,p->second);
|
||||
}
|
||||
|
||||
#if 0
|
||||
/**
|
||||
* Apply a function or function object to all supernode peers
|
||||
*
|
||||
* Note: explicitly template this by reference if you want the object
|
||||
* passed by reference instead of copied.
|
||||
*
|
||||
* Warning: be careful not to use features in these that call any other
|
||||
* methods of Topology that may lock _lock, otherwise a recursive lock
|
||||
* and deadlock or lock corruption may occur.
|
||||
*
|
||||
* @param f Function to apply
|
||||
* @tparam F Function or function object type
|
||||
*/
|
||||
template<typename F>
|
||||
inline void eachSupernodePeer(F f)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
for(std::vector< SharedPtr<Peer> >::const_iterator p(_supernodePeers.begin());p!=_supernodePeers.end();++p)
|
||||
f(*this,*p);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pings all peers that need a ping sent, excluding supernodes
|
||||
*
|
||||
* Ordinary peers are pinged if we haven't heard from them recently. Receive
|
||||
* time rather than send time as OK is returned on success and we want to
|
||||
* keep trying if a packet is lost. Ordinary peers are subject to a frame
|
||||
* inactivity timeout. We give up if we haven't actually transferred any
|
||||
* data to them recently, and eventually Topology purges them from memory.
|
||||
*/
|
||||
class PingPeersThatNeedPing
|
||||
{
|
||||
public:
|
||||
PingPeersThatNeedPing(const RuntimeEnvironment *renv,uint64_t now) throw() :
|
||||
_now(now),
|
||||
_supernodeAddresses(renv->topology->supernodeAddresses()),
|
||||
RR(renv) {}
|
||||
|
||||
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
|
||||
{
|
||||
/* For ordinary nodes we ping if they've sent us a frame recently,
|
||||
* otherwise they are stale and we let the link die.
|
||||
*
|
||||
* Note that we measure ping time from time of last receive rather
|
||||
* than time of last send in order to only count full round trips. */
|
||||
if ( (std::find(_supernodeAddresses.begin(),_supernodeAddresses.end(),p->address()) == _supernodeAddresses.end()) &&
|
||||
((_now - p->lastFrame()) < ZT_PEER_PATH_ACTIVITY_TIMEOUT) &&
|
||||
((_now - p->lastDirectReceive()) >= ZT_PEER_DIRECT_PING_DELAY) ) {
|
||||
p->sendPing(RR,_now);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t _now;
|
||||
std::vector<Address> _supernodeAddresses;
|
||||
const RuntimeEnvironment *RR;
|
||||
};
|
||||
|
||||
/**
|
||||
* Ping peers that need ping according to supernode rules
|
||||
*
|
||||
* Supernodes ping aggressively if a ping is unanswered and they are not
|
||||
* subject to the activity timeout. In other words: we assume they are
|
||||
* always there and always try to reach them.
|
||||
*
|
||||
* The ultimate rate limit for this is controlled up in the Node main loop.
|
||||
*/
|
||||
class PingSupernodesThatNeedPing
|
||||
{
|
||||
public:
|
||||
PingSupernodesThatNeedPing(const RuntimeEnvironment *renv,uint64_t now) throw() :
|
||||
_now(now),
|
||||
RR(renv) {}
|
||||
|
||||
inline void operator()(Topology &t,const SharedPtr<Peer> &p)
|
||||
{
|
||||
/* For supernodes we always ping even if no frames have been seen, and
|
||||
* we ping aggressively if pings are unanswered. The limit to this
|
||||
* frequency is set in the main loop to no more than ZT_STARTUP_AGGRO. */
|
||||
|
||||
uint64_t lp = 0;
|
||||
uint64_t lr = 0;
|
||||
p->lastPingAndDirectReceive(lp,lr);
|
||||
if ( ((lr < lp)&&((lp - lr) >= ZT_PING_UNANSWERED_AFTER)) || ((_now - lr) >= ZT_PEER_DIRECT_PING_DELAY) )
|
||||
p->sendPing(RR,_now);
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t _now;
|
||||
const RuntimeEnvironment *RR;
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Validate a root topology dictionary against the identities specified in Defaults
|
||||
*
|
||||
|
Loading…
x
Reference in New Issue
Block a user