2013-07-04 20:56:19 +00:00
/*
2020-05-12 08:35:48 +00:00
* Copyright ( c ) 2013 - 2020 ZeroTier , Inc .
2013-07-04 20:56:19 +00:00
*
2019-08-23 16:23:39 +00:00
* Use of this software is governed by the Business Source License included
* in the LICENSE . TXT file in the project ' s root directory .
2013-07-04 20:56:19 +00:00
*
2020-08-20 19:51:39 +00:00
* Change Date : 2025 - 01 - 01
2013-07-04 20:56:19 +00:00
*
2019-08-23 16:23:39 +00:00
* On the date above , in accordance with the Business Source License , use
* of this software will be governed by version 2.0 of the Apache License .
2013-07-04 20:56:19 +00:00
*/
2019-08-23 16:23:39 +00:00
/****/
2013-07-04 20:56:19 +00:00
# include <stdio.h>
# include <stdlib.h>
2013-08-12 20:57:34 +00:00
2013-07-04 20:56:19 +00:00
# include <algorithm>
# include <utility>
# include <stdexcept>
2015-04-03 20:14:37 +00:00
# include "../version.h"
# include "../include/ZeroTierOne.h"
2013-08-13 01:25:36 +00:00
2015-04-03 20:14:37 +00:00
# include "Constants.hpp"
2015-04-03 23:52:53 +00:00
# include "RuntimeEnvironment.hpp"
2013-07-04 20:56:19 +00:00
# include "Switch.hpp"
# include "Node.hpp"
# include "InetAddress.hpp"
# include "Topology.hpp"
# include "Peer.hpp"
2015-07-28 18:28:47 +00:00
# include "SelfAwareness.hpp"
2015-04-03 23:52:53 +00:00
# include "Packet.hpp"
2017-07-07 23:58:05 +00:00
# include "Trace.hpp"
2013-07-04 20:56:19 +00:00
namespace ZeroTier {
Switch : : Switch ( const RuntimeEnvironment * renv ) :
2015-07-13 15:33:02 +00:00
RR ( renv ) ,
2015-09-04 22:35:43 +00:00
_lastBeaconResponse ( 0 ) ,
2018-02-09 06:35:01 +00:00
_lastCheckedQueues ( 0 ) ,
2015-09-04 22:35:43 +00:00
_lastUniteAttempt ( 8 ) // only really used on root servers and upstreams, and it'll grow there just fine
2013-07-04 20:56:19 +00:00
{
}
2020-05-12 08:35:48 +00:00
// Returns true if packet appears valid; pos and proto will be set
static bool _ipv6GetPayload ( const uint8_t * frameData , unsigned int frameLen , unsigned int & pos , unsigned int & proto )
{
if ( frameLen < 40 )
return false ;
pos = 40 ;
proto = frameData [ 6 ] ;
while ( pos < = frameLen ) {
switch ( proto ) {
case 0 : // hop-by-hop options
case 43 : // routing
case 60 : // destination options
case 135 : // mobility options
if ( ( pos + 8 ) > frameLen )
return false ; // invalid!
proto = frameData [ pos ] ;
pos + = ( ( unsigned int ) frameData [ pos + 1 ] * 8 ) + 8 ;
break ;
//case 44: // fragment -- we currently can't parse these and they are deprecated in IPv6 anyway
//case 50:
//case 51: // IPSec ESP and AH -- we have to stop here since this is encrypted stuff
default :
return true ;
}
}
return false ; // overflow == invalid
}
2017-07-06 18:45:22 +00:00
void Switch : : onRemotePacket ( void * tPtr , const int64_t localSocket , const InetAddress & fromAddr , const void * data , unsigned int len )
2013-07-04 20:56:19 +00:00
{
2020-05-12 08:35:48 +00:00
int32_t flowId = ZT_QOS_NO_FLOW ;
2013-07-04 20:56:19 +00:00
try {
2017-10-02 22:52:57 +00:00
const int64_t now = RR - > node - > now ( ) ;
2016-03-18 21:16:07 +00:00
2017-07-17 20:48:39 +00:00
const SharedPtr < Path > path ( RR - > topology - > getPath ( localSocket , fromAddr ) ) ;
2016-09-01 22:43:07 +00:00
path - > received ( now ) ;
2015-07-13 15:33:02 +00:00
if ( len = = 13 ) {
/* LEGACY: before VERB_PUSH_DIRECT_PATHS, peers used broadcast
* announcements on the LAN to solve the ' same network problem . ' We
* no longer send these , but we ' ll listen for them for a while to
* locate peers with versions < 1.0 .4 . */
2016-03-18 21:16:07 +00:00
2017-02-01 23:22:14 +00:00
const Address beaconAddr ( reinterpret_cast < const char * > ( data ) + 8 , 5 ) ;
2015-07-13 15:33:02 +00:00
if ( beaconAddr = = RR - > identity . address ( ) )
return ;
2017-07-06 18:45:22 +00:00
if ( ! RR - > node - > shouldUsePathForZeroTierTraffic ( tPtr , beaconAddr , localSocket , fromAddr ) )
2016-01-11 23:57:58 +00:00
return ;
2017-03-28 00:03:17 +00:00
const SharedPtr < Peer > peer ( RR - > topology - > getPeer ( tPtr , beaconAddr ) ) ;
2015-07-13 15:33:02 +00:00
if ( peer ) { // we'll only respond to beacons from known peers
if ( ( now - _lastBeaconResponse ) > = 2500 ) { // limit rate of responses
_lastBeaconResponse = now ;
Packet outp ( peer - > address ( ) , RR - > identity . address ( ) , Packet : : VERB_NOP ) ;
2020-08-21 21:23:31 +00:00
outp . armor ( peer - > key ( ) , true , peer - > aesKeysIfSupported ( ) ) ;
2017-03-28 00:03:17 +00:00
path - > send ( RR , tPtr , outp . data ( ) , outp . size ( ) , now ) ;
2015-07-13 15:33:02 +00:00
}
}
2016-03-18 21:16:07 +00:00
2016-09-01 20:45:32 +00:00
} else if ( len > ZT_PROTO_MIN_FRAGMENT_LENGTH ) { // SECURITY: min length check is important since we do some C-style stuff below!
2016-03-18 21:16:07 +00:00
if ( reinterpret_cast < const uint8_t * > ( data ) [ ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR ] = = ZT_PACKET_FRAGMENT_INDICATOR ) {
// Handle fragment ----------------------------------------------------
Packet : : Fragment fragment ( data , len ) ;
2016-03-18 21:32:48 +00:00
const Address destination ( fragment . destination ( ) ) ;
2016-03-18 21:16:07 +00:00
if ( destination ! = RR - > identity . address ( ) ) {
2018-01-25 12:11:59 +00:00
if ( ( ! RR - > topology - > amUpstream ( ) ) & & ( ! path - > trustEstablished ( now ) ) )
2017-01-27 22:05:09 +00:00
return ;
2016-09-13 21:27:18 +00:00
2016-03-18 21:16:07 +00:00
if ( fragment . hops ( ) < ZT_RELAY_MAX_HOPS ) {
fragment . incrementHops ( ) ;
// Note: we don't bother initiating NAT-t for fragments, since heads will set that off.
// It wouldn't hurt anything, just redundant and unnecessary.
2017-03-28 00:03:17 +00:00
SharedPtr < Peer > relayTo = RR - > topology - > getPeer ( tPtr , destination ) ;
if ( ( ! relayTo ) | | ( ! relayTo - > sendDirect ( tPtr , fragment . data ( ) , fragment . size ( ) , now , false ) ) ) {
2016-11-18 00:31:58 +00:00
// Don't know peer or no direct path -- so relay via someone upstream
relayTo = RR - > topology - > getUpstreamPeer ( ) ;
2016-03-18 21:16:07 +00:00
if ( relayTo )
2017-03-28 00:03:17 +00:00
relayTo - > sendDirect ( tPtr , fragment . data ( ) , fragment . size ( ) , now , true ) ;
2016-03-18 21:16:07 +00:00
}
}
} else {
// Fragment looks like ours
const uint64_t fragmentPacketId = fragment . packetId ( ) ;
const unsigned int fragmentNumber = fragment . fragmentNumber ( ) ;
const unsigned int totalFragments = fragment . totalFragments ( ) ;
if ( ( totalFragments < = ZT_MAX_PACKET_FRAGMENTS ) & & ( fragmentNumber < ZT_MAX_PACKET_FRAGMENTS ) & & ( fragmentNumber > 0 ) & & ( totalFragments > 1 ) ) {
// Fragment appears basically sane. Its fragment number must be
// 1 or more, since a Packet with fragmented bit set is fragment 0.
// Total fragments must be more than 1, otherwise why are we
// seeing a Packet::Fragment?
2017-08-08 20:21:10 +00:00
RXQueueEntry * const rq = _findRXQueueEntry ( fragmentPacketId ) ;
2018-07-20 21:01:58 +00:00
Mutex : : Lock rql ( rq - > lock ) ;
2017-08-08 20:21:10 +00:00
if ( rq - > packetId ! = fragmentPacketId ) {
2016-03-18 21:16:07 +00:00
// No packet found, so we received a fragment without its head.
2020-05-12 08:35:48 +00:00
rq - > flowId = flowId ;
2016-03-18 21:16:07 +00:00
rq - > timestamp = now ;
rq - > packetId = fragmentPacketId ;
rq - > frags [ fragmentNumber - 1 ] = fragment ;
rq - > totalFragments = totalFragments ; // total fragment count is known
rq - > haveFragments = 1 < < fragmentNumber ; // we have only this fragment
rq - > complete = false ;
} else if ( ! ( rq - > haveFragments & ( 1 < < fragmentNumber ) ) ) {
// We have other fragments and maybe the head, so add this one and check
rq - > frags [ fragmentNumber - 1 ] = fragment ;
rq - > totalFragments = totalFragments ;
if ( Utils : : countBits ( rq - > haveFragments | = ( 1 < < fragmentNumber ) ) = = totalFragments ) {
// We have all fragments -- assemble and process full Packet
for ( unsigned int f = 1 ; f < totalFragments ; + + f )
rq - > frag0 . append ( rq - > frags [ f - 1 ] . payload ( ) , rq - > frags [ f - 1 ] . payloadLength ( ) ) ;
2020-05-12 08:35:48 +00:00
if ( rq - > frag0 . tryDecode ( RR , tPtr , flowId ) ) {
2016-03-18 21:16:07 +00:00
rq - > timestamp = 0 ; // packet decoded, free entry
} else {
rq - > complete = true ; // set complete flag but leave entry since it probably needs WHOIS or something
}
}
} // else this is a duplicate fragment, ignore
}
}
// --------------------------------------------------------------------
} else if ( len > = ZT_PROTO_MIN_PACKET_LENGTH ) { // min length check is important!
// Handle packet head -------------------------------------------------
const Address destination ( reinterpret_cast < const uint8_t * > ( data ) + 8 , ZT_ADDRESS_LENGTH ) ;
const Address source ( reinterpret_cast < const uint8_t * > ( data ) + 13 , ZT_ADDRESS_LENGTH ) ;
2017-02-01 22:18:56 +00:00
if ( source = = RR - > identity . address ( ) )
return ;
2016-03-18 21:16:07 +00:00
if ( destination ! = RR - > identity . address ( ) ) {
2018-01-25 12:11:59 +00:00
if ( ( ! RR - > topology - > amUpstream ( ) ) & & ( ! path - > trustEstablished ( now ) ) & & ( source ! = RR - > identity . address ( ) ) )
2017-01-27 22:05:09 +00:00
return ;
2016-09-13 21:27:18 +00:00
2016-03-18 21:16:07 +00:00
Packet packet ( data , len ) ;
if ( packet . hops ( ) < ZT_RELAY_MAX_HOPS ) {
packet . incrementHops ( ) ;
2017-03-28 00:03:17 +00:00
SharedPtr < Peer > relayTo = RR - > topology - > getPeer ( tPtr , destination ) ;
if ( ( relayTo ) & & ( relayTo - > sendDirect ( tPtr , packet . data ( ) , packet . size ( ) , now , false ) ) ) {
2017-10-25 19:42:14 +00:00
if ( ( source ! = RR - > identity . address ( ) ) & & ( _shouldUnite ( now , source , destination ) ) ) {
2017-03-28 00:03:17 +00:00
const SharedPtr < Peer > sourcePeer ( RR - > topology - > getPeer ( tPtr , source ) ) ;
2017-10-25 19:42:14 +00:00
if ( sourcePeer )
relayTo - > introduce ( tPtr , now , sourcePeer ) ;
2016-03-18 21:16:07 +00:00
}
} else {
2017-08-23 23:42:17 +00:00
relayTo = RR - > topology - > getUpstreamPeer ( ) ;
2017-10-25 19:42:14 +00:00
if ( ( relayTo ) & & ( relayTo - > address ( ) ! = source ) ) {
if ( relayTo - > sendDirect ( tPtr , packet . data ( ) , packet . size ( ) , now , true ) ) {
const SharedPtr < Peer > sourcePeer ( RR - > topology - > getPeer ( tPtr , source ) ) ;
if ( sourcePeer )
relayTo - > introduce ( tPtr , now , sourcePeer ) ;
}
}
2016-03-18 21:16:07 +00:00
}
}
} else if ( ( reinterpret_cast < const uint8_t * > ( data ) [ ZT_PACKET_IDX_FLAGS ] & ZT_PROTO_FLAG_FRAGMENTED ) ! = 0 ) {
// Packet is the head of a fragmented packet series
2017-03-01 22:36:52 +00:00
const uint64_t packetId = (
( ( ( uint64_t ) reinterpret_cast < const uint8_t * > ( data ) [ 0 ] ) < < 56 ) |
( ( ( uint64_t ) reinterpret_cast < const uint8_t * > ( data ) [ 1 ] ) < < 48 ) |
( ( ( uint64_t ) reinterpret_cast < const uint8_t * > ( data ) [ 2 ] ) < < 40 ) |
( ( ( uint64_t ) reinterpret_cast < const uint8_t * > ( data ) [ 3 ] ) < < 32 ) |
( ( ( uint64_t ) reinterpret_cast < const uint8_t * > ( data ) [ 4 ] ) < < 24 ) |
( ( ( uint64_t ) reinterpret_cast < const uint8_t * > ( data ) [ 5 ] ) < < 16 ) |
( ( ( uint64_t ) reinterpret_cast < const uint8_t * > ( data ) [ 6 ] ) < < 8 ) |
( ( uint64_t ) reinterpret_cast < const uint8_t * > ( data ) [ 7 ] )
) ;
2017-08-08 20:21:10 +00:00
RXQueueEntry * const rq = _findRXQueueEntry ( packetId ) ;
2018-07-20 21:01:58 +00:00
Mutex : : Lock rql ( rq - > lock ) ;
2017-08-08 20:21:10 +00:00
if ( rq - > packetId ! = packetId ) {
2016-03-18 21:16:07 +00:00
// If we have no other fragments yet, create an entry and save the head
2020-05-12 08:35:48 +00:00
rq - > flowId = flowId ;
2016-03-18 21:16:07 +00:00
rq - > timestamp = now ;
rq - > packetId = packetId ;
2016-09-02 18:51:33 +00:00
rq - > frag0 . init ( data , len , path , now ) ;
2016-03-18 21:16:07 +00:00
rq - > totalFragments = 0 ;
rq - > haveFragments = 1 ;
rq - > complete = false ;
} else if ( ! ( rq - > haveFragments & 1 ) ) {
// If we have other fragments but no head, see if we are complete with the head
if ( ( rq - > totalFragments > 1 ) & & ( Utils : : countBits ( rq - > haveFragments | = 1 ) = = rq - > totalFragments ) ) {
// We have all fragments -- assemble and process full Packet
2016-09-01 22:43:07 +00:00
rq - > frag0 . init ( data , len , path , now ) ;
2016-03-18 21:16:07 +00:00
for ( unsigned int f = 1 ; f < rq - > totalFragments ; + + f )
rq - > frag0 . append ( rq - > frags [ f - 1 ] . payload ( ) , rq - > frags [ f - 1 ] . payloadLength ( ) ) ;
2020-05-12 08:35:48 +00:00
if ( rq - > frag0 . tryDecode ( RR , tPtr , flowId ) ) {
2016-03-18 21:16:07 +00:00
rq - > timestamp = 0 ; // packet decoded, free entry
} else {
rq - > complete = true ; // set complete flag but leave entry since it probably needs WHOIS or something
}
} else {
// Still waiting on more fragments, but keep the head
2016-09-01 22:43:07 +00:00
rq - > frag0 . init ( data , len , path , now ) ;
2016-03-18 21:16:07 +00:00
}
} // else this is a duplicate head, ignore
} else {
// Packet is unfragmented, so just process it
2016-09-01 22:43:07 +00:00
IncomingPacket packet ( data , len , path , now ) ;
2020-05-12 08:35:48 +00:00
if ( ! packet . tryDecode ( RR , tPtr , flowId ) ) {
2017-08-08 20:21:10 +00:00
RXQueueEntry * const rq = _nextRXQueueEntry ( ) ;
2018-07-20 21:01:58 +00:00
Mutex : : Lock rql ( rq - > lock ) ;
2020-05-12 08:35:48 +00:00
rq - > flowId = flowId ;
2016-03-18 21:16:07 +00:00
rq - > timestamp = now ;
2017-03-01 22:36:52 +00:00
rq - > packetId = packet . packetId ( ) ;
2016-03-18 21:16:07 +00:00
rq - > frag0 = packet ;
rq - > totalFragments = 1 ;
rq - > haveFragments = 1 ;
rq - > complete = true ;
}
}
// --------------------------------------------------------------------
2015-04-03 20:14:37 +00:00
}
2013-07-04 20:56:19 +00:00
}
2017-07-07 23:58:05 +00:00
} catch ( . . . ) { } // sanity check, should be caught elsewhere
2013-07-04 20:56:19 +00:00
}
2017-03-28 00:03:17 +00:00
void Switch : : onLocalEthernet ( void * tPtr , const SharedPtr < Network > & network , const MAC & from , const MAC & to , unsigned int etherType , unsigned int vlanId , const void * data , unsigned int len )
2013-07-04 20:56:19 +00:00
{
2016-04-12 19:49:46 +00:00
if ( ! network - > hasConfig ( ) )
2013-10-18 17:20:34 +00:00
return ;
2014-06-14 00:49:33 +00:00
// Check if this packet is from someone other than the tap -- i.e. bridged in
2016-10-05 17:12:06 +00:00
bool fromBridged ;
if ( ( fromBridged = ( from ! = network - > mac ( ) ) ) ) {
2016-04-12 19:49:46 +00:00
if ( ! network - > config ( ) . permitsBridging ( RR - > identity . address ( ) ) ) {
2017-07-13 17:51:05 +00:00
RR - > t - > outgoingNetworkFrameDropped ( tPtr , network , from , to , etherType , vlanId , len , " not a bridge " ) ;
2014-06-14 00:49:33 +00:00
return ;
}
}
2020-05-12 08:35:48 +00:00
uint8_t qosBucket = ZT_AQM_DEFAULT_BUCKET ;
2018-07-10 23:50:12 +00:00
2020-05-12 08:35:48 +00:00
/**
* A pseudo - unique identifier used by balancing and bonding policies to
* categorize individual flows / conversations for assignment to a specific
* physical path . This identifier consists of the source port and
* destination port of the encapsulated frame .
2019-08-20 04:52:33 +00:00
*
2020-05-12 08:35:48 +00:00
* A flowId of - 1 will indicate that there is no preference for how this
* packet shall be sent . An example of this would be an ICMP packet .
2019-08-20 04:52:33 +00:00
*/
2019-08-21 01:50:38 +00:00
2020-05-12 08:35:48 +00:00
int32_t flowId = ZT_QOS_NO_FLOW ;
if ( etherType = = ZT_ETHERTYPE_IPV4 & & ( len > = 20 ) ) {
uint16_t srcPort = 0 ;
uint16_t dstPort = 0 ;
uint8_t proto = ( reinterpret_cast < const uint8_t * > ( data ) [ 9 ] ) ;
const unsigned int headerLen = 4 * ( reinterpret_cast < const uint8_t * > ( data ) [ 0 ] & 0xf ) ;
switch ( proto ) {
case 0x01 : // ICMP
//flowId = 0x01;
break ;
// All these start with 16-bit source and destination port in that order
case 0x06 : // TCP
case 0x11 : // UDP
case 0x84 : // SCTP
case 0x88 : // UDPLite
if ( len > ( headerLen + 4 ) ) {
unsigned int pos = headerLen + 0 ;
srcPort = ( reinterpret_cast < const uint8_t * > ( data ) [ pos + + ] ) < < 8 ;
srcPort | = ( reinterpret_cast < const uint8_t * > ( data ) [ pos ] ) ;
pos + + ;
dstPort = ( reinterpret_cast < const uint8_t * > ( data ) [ pos + + ] ) < < 8 ;
dstPort | = ( reinterpret_cast < const uint8_t * > ( data ) [ pos ] ) ;
flowId = dstPort ^ srcPort ^ proto ;
}
break ;
2019-08-20 04:52:33 +00:00
}
2020-05-12 08:35:48 +00:00
}
2019-08-20 04:52:33 +00:00
2020-05-12 08:35:48 +00:00
if ( etherType = = ZT_ETHERTYPE_IPV6 & & ( len > = 40 ) ) {
uint16_t srcPort = 0 ;
uint16_t dstPort = 0 ;
unsigned int pos ;
unsigned int proto ;
_ipv6GetPayload ( ( const uint8_t * ) data , len , pos , proto ) ;
switch ( proto ) {
case 0x3A : // ICMPv6
//flowId = 0x3A;
break ;
// All these start with 16-bit source and destination port in that order
case 0x06 : // TCP
case 0x11 : // UDP
case 0x84 : // SCTP
case 0x88 : // UDPLite
if ( len > ( pos + 4 ) ) {
srcPort = ( reinterpret_cast < const uint8_t * > ( data ) [ pos + + ] ) < < 8 ;
srcPort | = ( reinterpret_cast < const uint8_t * > ( data ) [ pos ] ) ;
pos + + ;
dstPort = ( reinterpret_cast < const uint8_t * > ( data ) [ pos + + ] ) < < 8 ;
dstPort | = ( reinterpret_cast < const uint8_t * > ( data ) [ pos ] ) ;
flowId = dstPort ^ srcPort ^ proto ;
}
break ;
default :
break ;
2019-08-20 04:52:33 +00:00
}
}
2014-06-14 00:49:33 +00:00
if ( to . isMulticast ( ) ) {
2016-09-21 04:21:34 +00:00
MulticastGroup multicastGroup ( to , 0 ) ;
2014-03-26 04:38:54 +00:00
2014-06-14 00:49:33 +00:00
if ( to . isBroadcast ( ) ) {
2015-11-03 18:46:41 +00:00
if ( ( etherType = = ZT_ETHERTYPE_ARP ) & & ( len > = 28 ) & & ( ( ( ( const uint8_t * ) data ) [ 2 ] = = 0x08 ) & & ( ( ( const uint8_t * ) data ) [ 3 ] = = 0x00 ) & & ( ( ( const uint8_t * ) data ) [ 4 ] = = 6 ) & & ( ( ( const uint8_t * ) data ) [ 5 ] = = 4 ) & & ( ( ( const uint8_t * ) data ) [ 7 ] = = 0x01 ) ) ) {
/* IPv4 ARP is one of the few special cases that we impose upon what is
* otherwise a straightforward Ethernet switch emulation . Vanilla ARP
* is dumb old broadcast and simply doesn ' t scale . ZeroTier multicast
* groups have an additional field called ADI ( additional distinguishing
2019-08-20 04:52:33 +00:00
* information ) which was added specifically for ARP though it could
2015-11-03 18:46:41 +00:00
* be used for other things too . We then take ARP broadcasts and turn
* them into multicasts by stuffing the IP address being queried into
* the 32 - bit ADI field . In practice this uses our multicast pub / sub
* system to implement a kind of extended / distributed ARP table . */
2016-09-21 04:21:34 +00:00
multicastGroup = MulticastGroup : : deriveMulticastGroupForAddressResolution ( InetAddress ( ( ( const unsigned char * ) data ) + 24 , 4 , 0 ) ) ;
2016-04-12 19:49:46 +00:00
} else if ( ! network - > config ( ) . enableBroadcast ( ) ) {
2014-06-14 00:49:33 +00:00
// Don't transmit broadcasts if this network doesn't want them
2017-07-13 17:51:05 +00:00
RR - > t - > outgoingNetworkFrameDropped ( tPtr , network , from , to , etherType , vlanId , len , " broadcast disabled " ) ;
2014-05-23 21:32:31 +00:00
return ;
2013-09-27 20:03:13 +00:00
}
2015-11-03 18:46:41 +00:00
} else if ( ( etherType = = ZT_ETHERTYPE_IPV6 ) & & ( len > = ( 40 + 8 + 16 ) ) ) {
2016-06-24 11:54:05 +00:00
// IPv6 NDP emulation for certain very special patterns of private IPv6 addresses -- if enabled
2016-06-24 05:41:14 +00:00
if ( ( network - > config ( ) . ndpEmulation ( ) ) & & ( reinterpret_cast < const uint8_t * > ( data ) [ 6 ] = = 0x3a ) & & ( reinterpret_cast < const uint8_t * > ( data ) [ 40 ] = = 0x87 ) ) { // ICMPv6 neighbor solicitation
Address v6EmbeddedAddress ;
const uint8_t * const pkt6 = reinterpret_cast < const uint8_t * > ( data ) + 40 + 8 ;
const uint8_t * my6 = ( const uint8_t * ) 0 ;
2016-06-24 12:21:25 +00:00
// ZT-RFC4193 address: fdNN:NNNN:NNNN:NNNN:NN99:93DD:DDDD:DDDD / 88 (one /128 per actual host)
2016-06-24 05:41:14 +00:00
2016-06-24 12:21:25 +00:00
// ZT-6PLANE address: fcXX:XXXX:XXDD:DDDD:DDDD:####:####:#### / 40 (one /80 per actual host)
2016-06-24 11:54:05 +00:00
// (XX - lower 32 bits of network ID XORed with higher 32 bits)
// For these to work, we must have a ZT-managed address assigned in one of the
// above formats, and the query must match its prefix.
2016-05-06 23:13:11 +00:00
for ( unsigned int sipk = 0 ; sipk < network - > config ( ) . staticIpCount ; + + sipk ) {
2016-06-24 11:54:05 +00:00
const InetAddress * const sip = & ( network - > config ( ) . staticIps [ sipk ] ) ;
if ( sip - > ss_family = = AF_INET6 ) {
2016-06-24 05:41:14 +00:00
my6 = reinterpret_cast < const uint8_t * > ( reinterpret_cast < const struct sockaddr_in6 * > ( & ( * sip ) ) - > sin6_addr . s6_addr ) ;
2016-06-24 11:54:05 +00:00
const unsigned int sipNetmaskBits = Utils : : ntoh ( ( uint16_t ) reinterpret_cast < const struct sockaddr_in6 * > ( & ( * sip ) ) - > sin6_port ) ;
2016-06-24 12:21:25 +00:00
if ( ( sipNetmaskBits = = 88 ) & & ( my6 [ 0 ] = = 0xfd ) & & ( my6 [ 9 ] = = 0x99 ) & & ( my6 [ 10 ] = = 0x93 ) ) { // ZT-RFC4193 /88 ???
2015-11-03 18:46:41 +00:00
unsigned int ptr = 0 ;
while ( ptr ! = 11 ) {
if ( pkt6 [ ptr ] ! = my6 [ ptr ] )
break ;
+ + ptr ;
}
2016-06-24 12:21:25 +00:00
if ( ptr = = 11 ) { // prefix match!
2016-06-24 05:41:14 +00:00
v6EmbeddedAddress . setTo ( pkt6 + ptr , 5 ) ;
break ;
}
2016-06-24 12:21:25 +00:00
} else if ( sipNetmaskBits = = 40 ) { // ZT-6PLANE /40 ???
2016-06-24 11:54:05 +00:00
const uint32_t nwid32 = ( uint32_t ) ( ( network - > id ( ) ^ ( network - > id ( ) > > 32 ) ) & 0xffffffff ) ;
2016-06-24 12:21:25 +00:00
if ( ( my6 [ 0 ] = = 0xfc ) & & ( my6 [ 1 ] = = ( uint8_t ) ( ( nwid32 > > 24 ) & 0xff ) ) & & ( my6 [ 2 ] = = ( uint8_t ) ( ( nwid32 > > 16 ) & 0xff ) ) & & ( my6 [ 3 ] = = ( uint8_t ) ( ( nwid32 > > 8 ) & 0xff ) ) & & ( my6 [ 4 ] = = ( uint8_t ) ( nwid32 & 0xff ) ) ) {
2016-06-24 11:54:05 +00:00
unsigned int ptr = 0 ;
while ( ptr ! = 5 ) {
if ( pkt6 [ ptr ] ! = my6 [ ptr ] )
break ;
+ + ptr ;
}
2016-06-24 12:21:25 +00:00
if ( ptr = = 5 ) { // prefix match!
2016-06-24 11:54:05 +00:00
v6EmbeddedAddress . setTo ( pkt6 + ptr , 5 ) ;
2016-06-24 05:41:14 +00:00
break ;
2016-06-24 11:54:05 +00:00
}
2015-11-03 18:46:41 +00:00
}
}
}
}
2016-06-24 05:41:14 +00:00
if ( ( v6EmbeddedAddress ) & & ( v6EmbeddedAddress ! = RR - > identity . address ( ) ) ) {
const MAC peerMac ( v6EmbeddedAddress , network - > id ( ) ) ;
uint8_t adv [ 72 ] ;
adv [ 0 ] = 0x60 ; adv [ 1 ] = 0x00 ; adv [ 2 ] = 0x00 ; adv [ 3 ] = 0x00 ;
adv [ 4 ] = 0x00 ; adv [ 5 ] = 0x20 ;
adv [ 6 ] = 0x3a ; adv [ 7 ] = 0xff ;
for ( int i = 0 ; i < 16 ; + + i ) adv [ 8 + i ] = pkt6 [ i ] ;
for ( int i = 0 ; i < 16 ; + + i ) adv [ 24 + i ] = my6 [ i ] ;
adv [ 40 ] = 0x88 ; adv [ 41 ] = 0x00 ;
adv [ 42 ] = 0x00 ; adv [ 43 ] = 0x00 ; // future home of checksum
adv [ 44 ] = 0x60 ; adv [ 45 ] = 0x00 ; adv [ 46 ] = 0x00 ; adv [ 47 ] = 0x00 ;
for ( int i = 0 ; i < 16 ; + + i ) adv [ 48 + i ] = pkt6 [ i ] ;
adv [ 64 ] = 0x02 ; adv [ 65 ] = 0x01 ;
adv [ 66 ] = peerMac [ 0 ] ; adv [ 67 ] = peerMac [ 1 ] ; adv [ 68 ] = peerMac [ 2 ] ; adv [ 69 ] = peerMac [ 3 ] ; adv [ 70 ] = peerMac [ 4 ] ; adv [ 71 ] = peerMac [ 5 ] ;
uint16_t pseudo_ [ 36 ] ;
uint8_t * const pseudo = reinterpret_cast < uint8_t * > ( pseudo_ ) ;
for ( int i = 0 ; i < 32 ; + + i ) pseudo [ i ] = adv [ 8 + i ] ;
pseudo [ 32 ] = 0x00 ; pseudo [ 33 ] = 0x00 ; pseudo [ 34 ] = 0x00 ; pseudo [ 35 ] = 0x20 ;
pseudo [ 36 ] = 0x00 ; pseudo [ 37 ] = 0x00 ; pseudo [ 38 ] = 0x00 ; pseudo [ 39 ] = 0x3a ;
for ( int i = 0 ; i < 32 ; + + i ) pseudo [ 40 + i ] = adv [ 40 + i ] ;
uint32_t checksum = 0 ;
for ( int i = 0 ; i < 36 ; + + i ) checksum + = Utils : : hton ( pseudo_ [ i ] ) ;
while ( ( checksum > > 16 ) ) checksum = ( checksum & 0xffff ) + ( checksum > > 16 ) ;
checksum = ~ checksum ;
adv [ 42 ] = ( checksum > > 8 ) & 0xff ;
adv [ 43 ] = checksum & 0xff ;
2017-03-28 00:03:17 +00:00
RR - > node - > putFrame ( tPtr , network - > id ( ) , network - > userPtr ( ) , peerMac , from , ZT_ETHERTYPE_IPV6 , 0 , adv , 72 ) ;
2016-06-24 11:54:05 +00:00
return ; // NDP emulation done. We have forged a "fake" reply, so no need to send actual NDP query.
} // else no NDP emulation
} // else no NDP emulation
2014-06-14 00:49:33 +00:00
}
2013-09-12 16:11:21 +00:00
2016-09-28 19:21:08 +00:00
// Check this after NDP emulation, since that has to be allowed in exactly this case
if ( network - > config ( ) . multicastLimit = = 0 ) {
2017-07-13 17:51:05 +00:00
RR - > t - > outgoingNetworkFrameDropped ( tPtr , network , from , to , etherType , vlanId , len , " multicast disabled " ) ;
2016-09-28 19:21:08 +00:00
return ;
}
2014-06-21 19:19:10 +00:00
/* Learn multicast groups for bridged-in hosts.
* Note that some OSes , most notably Linux , do this for you by learning
* multicast addresses on bridge interfaces and subscribing each slave .
* But in that case this does no harm , as the sets are just merged . */
2014-06-14 00:49:33 +00:00
if ( fromBridged )
2017-03-28 00:03:17 +00:00
network - > learnBridgedMulticastGroup ( tPtr , multicastGroup , RR - > node - > now ( ) ) ;
2014-05-23 21:32:31 +00:00
2016-08-31 23:50:22 +00:00
// First pass sets noTee to false, but noTee is set to true in OutboundMulticast to prevent duplicates.
2018-07-10 23:50:12 +00:00
if ( ! network - > filterOutgoingPacket ( tPtr , false , RR - > identity . address ( ) , Address ( ) , from , to , ( const uint8_t * ) data , len , etherType , vlanId , qosBucket ) ) {
2017-07-13 17:51:05 +00:00
RR - > t - > outgoingNetworkFrameDropped ( tPtr , network , from , to , etherType , vlanId , len , " filter blocked " ) ;
2016-07-25 23:51:10 +00:00
return ;
}
2014-10-01 19:41:48 +00:00
RR - > mc - > send (
2017-03-28 00:03:17 +00:00
tPtr ,
2015-06-02 00:50:44 +00:00
RR - > node - > now ( ) ,
2018-01-27 01:38:44 +00:00
network ,
2018-01-27 02:19:51 +00:00
Address ( ) ,
2016-09-21 04:21:34 +00:00
multicastGroup ,
2014-10-10 00:58:31 +00:00
( fromBridged ) ? from : MAC ( ) ,
2014-10-01 19:41:48 +00:00
etherType ,
2015-04-08 22:26:45 +00:00
data ,
len ) ;
2016-10-05 17:12:06 +00:00
} else if ( to = = network - > mac ( ) ) {
// Destination is this node, so just reinject it
2017-03-28 00:03:17 +00:00
RR - > node - > putFrame ( tPtr , network - > id ( ) , network - > userPtr ( ) , from , to , etherType , vlanId , data , len ) ;
2016-08-24 22:26:18 +00:00
} else if ( to [ 0 ] = = MAC : : firstOctetForNetwork ( network - > id ( ) ) ) {
2015-04-15 20:09:20 +00:00
// Destination is another ZeroTier peer on the same network
2014-06-21 19:19:10 +00:00
2015-07-07 17:00:34 +00:00
Address toZT ( to . toAddress ( network - > id ( ) ) ) ; // since in-network MACs are derived from addresses and network IDs, we can reverse this
2017-03-28 00:03:17 +00:00
SharedPtr < Peer > toPeer ( RR - > topology - > getPeer ( tPtr , toZT ) ) ;
2016-07-25 23:51:10 +00:00
2018-07-10 23:50:12 +00:00
if ( ! network - > filterOutgoingPacket ( tPtr , false , RR - > identity . address ( ) , toZT , from , to , ( const uint8_t * ) data , len , etherType , vlanId , qosBucket ) ) {
2017-07-13 17:51:05 +00:00
RR - > t - > outgoingNetworkFrameDropped ( tPtr , network , from , to , etherType , vlanId , len , " filter blocked " ) ;
2016-07-25 23:51:10 +00:00
return ;
}
2019-06-25 20:42:20 +00:00
network - > pushCredentialsIfNeeded ( tPtr , toZT , RR - > node - > now ( ) ) ;
2020-09-11 17:36:21 +00:00
if ( ! fromBridged ) {
Packet outp ( toZT , RR - > identity . address ( ) , Packet : : VERB_FRAME ) ;
2015-07-07 17:00:34 +00:00
outp . append ( network - > id ( ) ) ;
outp . append ( ( uint16_t ) etherType ) ;
outp . append ( data , len ) ;
2020-09-11 17:36:21 +00:00
// 1.4.8: disable compression for unicast as it almost never helps
//if (!network->config().disableCompression())
// outp.compress();
2019-08-20 04:52:33 +00:00
aqm_enqueue ( tPtr , network , outp , true , qosBucket , flowId ) ;
2013-07-04 20:56:19 +00:00
} else {
2020-09-11 17:36:21 +00:00
Packet outp ( toZT , RR - > identity . address ( ) , Packet : : VERB_EXT_FRAME ) ;
2015-07-07 17:00:34 +00:00
outp . append ( network - > id ( ) ) ;
2020-09-11 17:36:21 +00:00
outp . append ( ( unsigned char ) 0x00 ) ;
to . appendTo ( outp ) ;
from . appendTo ( outp ) ;
2015-07-07 17:00:34 +00:00
outp . append ( ( uint16_t ) etherType ) ;
outp . append ( data , len ) ;
2020-09-11 17:36:21 +00:00
// 1.4.8: disable compression for unicast as it almost never helps
//if (!network->config().disableCompression())
// outp.compress();
2019-08-20 04:52:33 +00:00
aqm_enqueue ( tPtr , network , outp , true , qosBucket , flowId ) ;
2013-07-04 20:56:19 +00:00
}
2016-08-24 22:26:18 +00:00
} else {
2014-09-30 15:38:03 +00:00
// Destination is bridged behind a remote peer
2014-06-21 19:19:10 +00:00
2016-08-24 22:26:18 +00:00
// We filter with a NULL destination ZeroTier address first. Filtrations
// for each ZT destination are also done below. This is the same rationale
// and design as for multicast.
2018-07-10 23:50:12 +00:00
if ( ! network - > filterOutgoingPacket ( tPtr , false , RR - > identity . address ( ) , Address ( ) , from , to , ( const uint8_t * ) data , len , etherType , vlanId , qosBucket ) ) {
2017-07-13 17:51:05 +00:00
RR - > t - > outgoingNetworkFrameDropped ( tPtr , network , from , to , etherType , vlanId , len , " filter blocked " ) ;
2016-08-24 22:26:18 +00:00
return ;
}
2014-06-21 19:19:10 +00:00
Address bridges [ ZT_MAX_BRIDGE_SPAM ] ;
unsigned int numBridges = 0 ;
2015-07-06 19:46:27 +00:00
/* Create an array of up to ZT_MAX_BRIDGE_SPAM recipients for this bridged frame. */
2014-06-21 19:19:10 +00:00
bridges [ 0 ] = network - > findBridgeTo ( to ) ;
2016-04-12 19:49:46 +00:00
std : : vector < Address > activeBridges ( network - > config ( ) . activeBridges ( ) ) ;
if ( ( bridges [ 0 ] ) & & ( bridges [ 0 ] ! = RR - > identity . address ( ) ) & & ( network - > config ( ) . permitsBridging ( bridges [ 0 ] ) ) ) {
2015-07-06 19:46:27 +00:00
/* We have a known bridge route for this MAC, send it there. */
2014-06-21 19:19:10 +00:00
+ + numBridges ;
2016-04-12 19:49:46 +00:00
} else if ( ! activeBridges . empty ( ) ) {
2014-06-21 19:19:10 +00:00
/* If there is no known route, spam to up to ZT_MAX_BRIDGE_SPAM active
2015-07-06 19:46:27 +00:00
* bridges . If someone responds , we ' ll learn the route . */
2016-04-12 19:49:46 +00:00
std : : vector < Address > : : const_iterator ab ( activeBridges . begin ( ) ) ;
if ( activeBridges . size ( ) < = ZT_MAX_BRIDGE_SPAM ) {
2014-06-21 19:19:10 +00:00
// If there are <= ZT_MAX_BRIDGE_SPAM active bridges, spam them all
2016-04-12 19:49:46 +00:00
while ( ab ! = activeBridges . end ( ) ) {
2015-07-07 17:00:34 +00:00
bridges [ numBridges + + ] = * ab ;
2014-06-21 19:19:10 +00:00
+ + ab ;
}
} else {
// Otherwise pick a random set of them
while ( numBridges < ZT_MAX_BRIDGE_SPAM ) {
2016-04-12 19:49:46 +00:00
if ( ab = = activeBridges . end ( ) )
ab = activeBridges . begin ( ) ;
if ( ( ( unsigned long ) RR - > node - > prng ( ) % ( unsigned long ) activeBridges . size ( ) ) = = 0 ) {
2015-07-07 17:00:34 +00:00
bridges [ numBridges + + ] = * ab ;
2014-06-21 19:19:10 +00:00
+ + ab ;
} else + + ab ;
}
2014-06-14 00:49:33 +00:00
}
}
2014-06-21 19:19:10 +00:00
for ( unsigned int b = 0 ; b < numBridges ; + + b ) {
2018-07-10 23:50:12 +00:00
if ( network - > filterOutgoingPacket ( tPtr , true , RR - > identity . address ( ) , bridges [ b ] , from , to , ( const uint8_t * ) data , len , etherType , vlanId , qosBucket ) ) {
2016-08-24 22:26:18 +00:00
Packet outp ( bridges [ b ] , RR - > identity . address ( ) , Packet : : VERB_EXT_FRAME ) ;
outp . append ( network - > id ( ) ) ;
outp . append ( ( uint8_t ) 0x00 ) ;
to . appendTo ( outp ) ;
from . appendTo ( outp ) ;
outp . append ( ( uint16_t ) etherType ) ;
outp . append ( data , len ) ;
2020-09-11 17:36:21 +00:00
// 1.4.8: disable compression for unicast as it almost never helps
//if (!network->config().disableCompression())
// outp.compress();
2019-08-20 04:52:33 +00:00
aqm_enqueue ( tPtr , network , outp , true , qosBucket , flowId ) ;
2016-08-24 22:26:18 +00:00
} else {
2017-07-13 17:51:05 +00:00
RR - > t - > outgoingNetworkFrameDropped ( tPtr , network , from , to , etherType , vlanId , len , " filter blocked (bridge replication) " ) ;
2016-08-24 22:26:18 +00:00
}
2014-06-21 15:59:08 +00:00
}
2013-07-04 20:56:19 +00:00
}
}
2020-05-12 08:35:48 +00:00
void Switch : : aqm_enqueue ( void * tPtr , const SharedPtr < Network > & network , Packet & packet , bool encrypt , int qosBucket , int32_t flowId )
2018-07-10 23:50:12 +00:00
{
2019-03-14 21:29:15 +00:00
if ( ! network - > qosEnabled ( ) ) {
2019-08-20 04:52:33 +00:00
send ( tPtr , packet , encrypt , flowId ) ;
2018-07-10 23:50:12 +00:00
return ;
}
NetworkQoSControlBlock * nqcb = _netQueueControlBlock [ network - > id ( ) ] ;
if ( ! nqcb ) {
nqcb = new NetworkQoSControlBlock ( ) ;
_netQueueControlBlock [ network - > id ( ) ] = nqcb ;
// Initialize ZT_QOS_NUM_BUCKETS queues and place them in the INACTIVE list
// These queues will be shuffled between the new/old/inactive lists by the enqueue/dequeue algorithm
2020-05-12 08:35:48 +00:00
for ( int i = 0 ; i < ZT_AQM_NUM_BUCKETS ; i + + ) {
2018-07-10 23:50:12 +00:00
nqcb - > inactiveQueues . push_back ( new ManagedQueue ( i ) ) ;
}
}
2019-08-20 04:52:33 +00:00
// Don't apply QoS scheduling to ZT protocol traffic
2018-07-10 23:50:12 +00:00
if ( packet . verb ( ) ! = Packet : : VERB_FRAME & & packet . verb ( ) ! = Packet : : VERB_EXT_FRAME ) {
2019-08-20 04:52:33 +00:00
send ( tPtr , packet , encrypt , flowId ) ;
2019-08-23 16:23:39 +00:00
}
2018-07-10 23:50:12 +00:00
_aqm_m . lock ( ) ;
// Enqueue packet and move queue to appropriate list
const Address dest ( packet . destination ( ) ) ;
2019-08-20 04:52:33 +00:00
TXQueueEntry * txEntry = new TXQueueEntry ( dest , RR - > node - > now ( ) , packet , encrypt , flowId ) ;
2020-05-12 08:35:48 +00:00
2018-07-10 23:50:12 +00:00
ManagedQueue * selectedQueue = nullptr ;
2020-05-12 08:35:48 +00:00
for ( size_t i = 0 ; i < ZT_AQM_NUM_BUCKETS ; i + + ) {
2018-07-10 23:50:12 +00:00
if ( i < nqcb - > oldQueues . size ( ) ) { // search old queues first (I think this is best since old would imply most recent usage of the queue)
if ( nqcb - > oldQueues [ i ] - > id = = qosBucket ) {
selectedQueue = nqcb - > oldQueues [ i ] ;
}
} if ( i < nqcb - > newQueues . size ( ) ) { // search new queues (this would imply not often-used queues)
if ( nqcb - > newQueues [ i ] - > id = = qosBucket ) {
selectedQueue = nqcb - > newQueues [ i ] ;
}
} if ( i < nqcb - > inactiveQueues . size ( ) ) { // search inactive queues
if ( nqcb - > inactiveQueues [ i ] - > id = = qosBucket ) {
selectedQueue = nqcb - > inactiveQueues [ i ] ;
// move queue to end of NEW queue list
2020-05-12 08:35:48 +00:00
selectedQueue - > byteCredit = ZT_AQM_QUANTUM ;
2018-07-10 23:50:12 +00:00
// DEBUG_INFO("moving q=%p from INACTIVE to NEW list", selectedQueue);
nqcb - > newQueues . push_back ( selectedQueue ) ;
nqcb - > inactiveQueues . erase ( nqcb - > inactiveQueues . begin ( ) + i ) ;
}
}
}
if ( ! selectedQueue ) {
return ;
}
selectedQueue - > q . push_back ( txEntry ) ;
selectedQueue - > byteLength + = txEntry - > packet . payloadLength ( ) ;
nqcb - > _currEnqueuedPackets + + ;
// DEBUG_INFO("nq=%2lu, oq=%2lu, iq=%2lu, nqcb.size()=%3d, bucket=%2d, q=%p", nqcb->newQueues.size(), nqcb->oldQueues.size(), nqcb->inactiveQueues.size(), nqcb->_currEnqueuedPackets, qosBucket, selectedQueue);
// Drop a packet if necessary
ManagedQueue * selectedQueueToDropFrom = nullptr ;
2020-05-12 08:35:48 +00:00
if ( nqcb - > _currEnqueuedPackets > ZT_AQM_MAX_ENQUEUED_PACKETS )
2018-07-10 23:50:12 +00:00
{
// DEBUG_INFO("too many enqueued packets (%d), finding packet to drop", nqcb->_currEnqueuedPackets);
int maxQueueLength = 0 ;
2020-05-12 08:35:48 +00:00
for ( size_t i = 0 ; i < ZT_AQM_NUM_BUCKETS ; i + + ) {
2018-07-10 23:50:12 +00:00
if ( i < nqcb - > oldQueues . size ( ) ) {
if ( nqcb - > oldQueues [ i ] - > byteLength > maxQueueLength ) {
maxQueueLength = nqcb - > oldQueues [ i ] - > byteLength ;
selectedQueueToDropFrom = nqcb - > oldQueues [ i ] ;
}
} if ( i < nqcb - > newQueues . size ( ) ) {
if ( nqcb - > newQueues [ i ] - > byteLength > maxQueueLength ) {
maxQueueLength = nqcb - > newQueues [ i ] - > byteLength ;
selectedQueueToDropFrom = nqcb - > newQueues [ i ] ;
}
} if ( i < nqcb - > inactiveQueues . size ( ) ) {
if ( nqcb - > inactiveQueues [ i ] - > byteLength > maxQueueLength ) {
maxQueueLength = nqcb - > inactiveQueues [ i ] - > byteLength ;
selectedQueueToDropFrom = nqcb - > inactiveQueues [ i ] ;
}
}
}
if ( selectedQueueToDropFrom ) {
// DEBUG_INFO("dropping packet from head of largest queue (%d payload bytes)", maxQueueLength);
int sizeOfDroppedPacket = selectedQueueToDropFrom - > q . front ( ) - > packet . payloadLength ( ) ;
delete selectedQueueToDropFrom - > q . front ( ) ;
selectedQueueToDropFrom - > q . pop_front ( ) ;
selectedQueueToDropFrom - > byteLength - = sizeOfDroppedPacket ;
nqcb - > _currEnqueuedPackets - - ;
}
}
_aqm_m . unlock ( ) ;
aqm_dequeue ( tPtr ) ;
}
uint64_t Switch : : control_law ( uint64_t t , int count )
{
2020-05-12 08:35:48 +00:00
return ( uint64_t ) ( t + ZT_AQM_INTERVAL / sqrt ( count ) ) ;
2018-07-10 23:50:12 +00:00
}
2019-08-23 16:23:39 +00:00
Switch : : dqr Switch : : dodequeue ( ManagedQueue * q , uint64_t now )
2018-07-10 23:50:12 +00:00
{
dqr r ;
r . ok_to_drop = false ;
r . p = q - > q . front ( ) ;
if ( r . p = = NULL ) {
q - > first_above_time = 0 ;
return r ;
}
uint64_t sojourn_time = now - r . p - > creationTime ;
2020-05-12 08:35:48 +00:00
if ( sojourn_time < ZT_AQM_TARGET | | q - > byteLength < = ZT_DEFAULT_MTU ) {
2018-07-10 23:50:12 +00:00
// went below - stay below for at least interval
q - > first_above_time = 0 ;
} else {
if ( q - > first_above_time = = 0 ) {
// just went above from below. if still above at
// first_above_time, will say it's ok to drop.
2020-05-12 08:35:48 +00:00
q - > first_above_time = now + ZT_AQM_INTERVAL ;
2018-07-10 23:50:12 +00:00
} else if ( now > = q - > first_above_time ) {
r . ok_to_drop = true ;
}
}
return r ;
}
Switch : : TXQueueEntry * Switch : : CoDelDequeue ( ManagedQueue * q , bool isNew , uint64_t now )
{
dqr r = dodequeue ( q , now ) ;
if ( q - > dropping ) {
if ( ! r . ok_to_drop ) {
q - > dropping = false ;
}
while ( now > = q - > drop_next & & q - > dropping ) {
q - > q . pop_front ( ) ; // drop
r = dodequeue ( q , now ) ;
if ( ! r . ok_to_drop ) {
// leave dropping state
q - > dropping = false ;
} else {
+ + ( q - > count ) ;
// schedule the next drop.
q - > drop_next = control_law ( q - > drop_next , q - > count ) ;
}
}
} else if ( r . ok_to_drop ) {
q - > q . pop_front ( ) ; // drop
r = dodequeue ( q , now ) ;
q - > dropping = true ;
2020-05-12 08:35:48 +00:00
q - > count = ( q - > count > 2 & & now - q - > drop_next < 8 * ZT_AQM_INTERVAL ) ?
2018-07-10 23:50:12 +00:00
q - > count - 2 : 1 ;
q - > drop_next = control_law ( now , q - > count ) ;
}
return r . p ;
}
void Switch : : aqm_dequeue ( void * tPtr )
{
// Cycle through network-specific QoS control blocks
for ( std : : map < uint64_t , NetworkQoSControlBlock * > : : iterator nqcb ( _netQueueControlBlock . begin ( ) ) ; nqcb ! = _netQueueControlBlock . end ( ) ; ) {
if ( ! ( * nqcb ) . second - > _currEnqueuedPackets ) {
return ;
}
uint64_t now = RR - > node - > now ( ) ;
TXQueueEntry * entryToEmit = nullptr ;
std : : vector < ManagedQueue * > * currQueues = & ( ( * nqcb ) . second - > newQueues ) ;
std : : vector < ManagedQueue * > * oldQueues = & ( ( * nqcb ) . second - > oldQueues ) ;
std : : vector < ManagedQueue * > * inactiveQueues = & ( ( * nqcb ) . second - > inactiveQueues ) ;
_aqm_m . lock ( ) ;
// Attempt dequeue from queues in NEW list
bool examiningNewQueues = true ;
while ( currQueues - > size ( ) ) {
ManagedQueue * queueAtFrontOfList = currQueues - > front ( ) ;
if ( queueAtFrontOfList - > byteCredit < 0 ) {
2020-05-12 08:35:48 +00:00
queueAtFrontOfList - > byteCredit + = ZT_AQM_QUANTUM ;
2018-07-10 23:50:12 +00:00
// Move to list of OLD queues
// DEBUG_INFO("moving q=%p from NEW to OLD list", queueAtFrontOfList);
oldQueues - > push_back ( queueAtFrontOfList ) ;
currQueues - > erase ( currQueues - > begin ( ) ) ;
} else {
entryToEmit = CoDelDequeue ( queueAtFrontOfList , examiningNewQueues , now ) ;
if ( ! entryToEmit ) {
// Move to end of list of OLD queues
// DEBUG_INFO("moving q=%p from NEW to OLD list", queueAtFrontOfList);
oldQueues - > push_back ( queueAtFrontOfList ) ;
currQueues - > erase ( currQueues - > begin ( ) ) ;
}
else {
int len = entryToEmit - > packet . payloadLength ( ) ;
queueAtFrontOfList - > byteLength - = len ;
queueAtFrontOfList - > byteCredit - = len ;
// Send the packet!
queueAtFrontOfList - > q . pop_front ( ) ;
2019-08-20 04:52:33 +00:00
send ( tPtr , entryToEmit - > packet , entryToEmit - > encrypt , entryToEmit - > flowId ) ;
2018-07-10 23:50:12 +00:00
( * nqcb ) . second - > _currEnqueuedPackets - - ;
}
if ( queueAtFrontOfList ) {
//DEBUG_INFO("dequeuing from q=%p, len=%lu in NEW list (byteCredit=%d)", queueAtFrontOfList, queueAtFrontOfList->q.size(), queueAtFrontOfList->byteCredit);
}
break ;
}
}
// Attempt dequeue from queues in OLD list
examiningNewQueues = false ;
currQueues = & ( ( * nqcb ) . second - > oldQueues ) ;
while ( currQueues - > size ( ) ) {
ManagedQueue * queueAtFrontOfList = currQueues - > front ( ) ;
if ( queueAtFrontOfList - > byteCredit < 0 ) {
2020-05-12 08:35:48 +00:00
queueAtFrontOfList - > byteCredit + = ZT_AQM_QUANTUM ;
2018-07-10 23:50:12 +00:00
oldQueues - > push_back ( queueAtFrontOfList ) ;
currQueues - > erase ( currQueues - > begin ( ) ) ;
} else {
entryToEmit = CoDelDequeue ( queueAtFrontOfList , examiningNewQueues , now ) ;
if ( ! entryToEmit ) {
//DEBUG_INFO("moving q=%p from OLD to INACTIVE list", queueAtFrontOfList);
// Move to inactive list of queues
inactiveQueues - > push_back ( queueAtFrontOfList ) ;
currQueues - > erase ( currQueues - > begin ( ) ) ;
}
else {
int len = entryToEmit - > packet . payloadLength ( ) ;
queueAtFrontOfList - > byteLength - = len ;
queueAtFrontOfList - > byteCredit - = len ;
queueAtFrontOfList - > q . pop_front ( ) ;
2019-08-20 04:52:33 +00:00
send ( tPtr , entryToEmit - > packet , entryToEmit - > encrypt , entryToEmit - > flowId ) ;
2018-07-10 23:50:12 +00:00
( * nqcb ) . second - > _currEnqueuedPackets - - ;
}
if ( queueAtFrontOfList ) {
//DEBUG_INFO("dequeuing from q=%p, len=%lu in OLD list (byteCredit=%d)", queueAtFrontOfList, queueAtFrontOfList->q.size(), queueAtFrontOfList->byteCredit);
}
break ;
}
}
nqcb + + ;
_aqm_m . unlock ( ) ;
}
}
void Switch : : removeNetworkQoSControlBlock ( uint64_t nwid )
{
NetworkQoSControlBlock * nq = _netQueueControlBlock [ nwid ] ;
if ( nq ) {
_netQueueControlBlock . erase ( nwid ) ;
delete nq ;
nq = NULL ;
}
}
2020-05-12 08:35:48 +00:00
void Switch : : send ( void * tPtr , Packet & packet , bool encrypt , int32_t flowId )
2013-07-04 20:56:19 +00:00
{
2017-08-24 00:14:06 +00:00
const Address dest ( packet . destination ( ) ) ;
if ( dest = = RR - > identity . address ( ) )
2013-07-13 02:07:48 +00:00
return ;
2019-08-20 04:52:33 +00:00
if ( ! _trySend ( tPtr , packet , encrypt , flowId ) ) {
2017-08-24 00:14:06 +00:00
{
Mutex : : Lock _l ( _txQueue_m ) ;
2018-06-07 19:58:07 +00:00
if ( _txQueue . size ( ) > = ZT_TX_QUEUE_SIZE ) {
_txQueue . pop_front ( ) ;
}
2019-08-20 04:52:33 +00:00
_txQueue . push_back ( TXQueueEntry ( dest , RR - > node - > now ( ) , packet , encrypt , flowId ) ) ;
2017-08-24 00:14:06 +00:00
}
if ( ! RR - > topology - > getPeer ( tPtr , dest ) )
requestWhois ( tPtr , RR - > node - > now ( ) , dest ) ;
2013-07-04 20:56:19 +00:00
}
}
2017-10-02 22:52:57 +00:00
void Switch : : requestWhois ( void * tPtr , const int64_t now , const Address & addr )
2013-07-11 20:19:06 +00:00
{
2017-07-07 23:58:05 +00:00
if ( addr = = RR - > identity . address ( ) )
return ;
2017-08-23 23:42:17 +00:00
2013-07-11 21:52:04 +00:00
{
2017-08-23 23:42:17 +00:00
Mutex : : Lock _l ( _lastSentWhoisRequest_m ) ;
2017-10-02 22:52:57 +00:00
int64_t & last = _lastSentWhoisRequest [ addr ] ;
2017-08-23 23:42:17 +00:00
if ( ( now - last ) < ZT_WHOIS_RETRY_DELAY )
return ;
else last = now ;
}
const SharedPtr < Peer > upstream ( RR - > topology - > getUpstreamPeer ( ) ) ;
if ( upstream ) {
2020-05-12 08:35:48 +00:00
int32_t flowId = ZT_QOS_NO_FLOW ;
2017-08-23 23:42:17 +00:00
Packet outp ( upstream - > address ( ) , RR - > identity . address ( ) , Packet : : VERB_WHOIS ) ;
addr . appendTo ( outp ) ;
2019-08-20 04:52:33 +00:00
send ( tPtr , outp , true , flowId ) ;
2013-07-11 21:52:04 +00:00
}
2013-07-11 20:19:06 +00:00
}
2017-03-28 00:03:17 +00:00
void Switch : : doAnythingWaitingForPeer ( void * tPtr , const SharedPtr < Peer > & peer )
2013-07-04 20:56:19 +00:00
{
2017-08-23 23:42:17 +00:00
{
Mutex : : Lock _l ( _lastSentWhoisRequest_m ) ;
_lastSentWhoisRequest . erase ( peer - > address ( ) ) ;
2013-07-11 21:52:04 +00:00
}
2017-10-02 22:52:57 +00:00
const int64_t now = RR - > node - > now ( ) ;
2017-08-08 20:21:10 +00:00
for ( unsigned int ptr = 0 ; ptr < ZT_RX_QUEUE_SIZE ; + + ptr ) {
RXQueueEntry * const rq = & ( _rxQueue [ ptr ] ) ;
2018-07-20 21:01:58 +00:00
Mutex : : Lock rql ( rq - > lock ) ;
2017-08-08 20:21:10 +00:00
if ( ( rq - > timestamp ) & & ( rq - > complete ) ) {
2020-05-12 08:35:48 +00:00
if ( ( rq - > frag0 . tryDecode ( RR , tPtr , rq - > flowId ) ) | | ( ( now - rq - > timestamp ) > ZT_RECEIVE_QUEUE_TIMEOUT ) )
2017-08-08 20:21:10 +00:00
rq - > timestamp = 0 ;
2013-07-11 21:52:04 +00:00
}
}
2017-08-23 23:42:17 +00:00
{
2013-07-11 21:52:04 +00:00
Mutex : : Lock _l ( _txQueue_m ) ;
2015-09-04 21:56:39 +00:00
for ( std : : list < TXQueueEntry > : : iterator txi ( _txQueue . begin ( ) ) ; txi ! = _txQueue . end ( ) ; ) {
if ( txi - > dest = = peer - > address ( ) ) {
2019-08-20 04:52:33 +00:00
if ( _trySend ( tPtr , txi - > packet , txi - > encrypt , txi - > flowId ) ) {
2015-09-04 21:56:39 +00:00
_txQueue . erase ( txi + + ) ;
2014-09-24 20:45:58 +00:00
} else {
2017-08-23 23:42:17 +00:00
+ + txi ;
2014-09-24 20:45:58 +00:00
}
2015-04-03 23:52:53 +00:00
} else {
2017-08-23 23:42:17 +00:00
+ + txi ;
2015-04-03 23:52:53 +00:00
}
2014-09-24 20:45:58 +00:00
}
}
2017-08-23 23:42:17 +00:00
}
2014-09-24 20:45:58 +00:00
2017-10-02 22:52:57 +00:00
unsigned long Switch : : doTimerTasks ( void * tPtr , int64_t now )
2017-08-23 23:42:17 +00:00
{
const uint64_t timeSinceLastCheck = now - _lastCheckedQueues ;
if ( timeSinceLastCheck < ZT_WHOIS_RETRY_DELAY )
return ( unsigned long ) ( ZT_WHOIS_RETRY_DELAY - timeSinceLastCheck ) ;
_lastCheckedQueues = now ;
2017-08-23 23:59:31 +00:00
std : : vector < Address > needWhois ;
2017-08-23 23:42:17 +00:00
{
2014-09-24 20:45:58 +00:00
Mutex : : Lock _l ( _txQueue_m ) ;
2018-07-10 23:50:12 +00:00
2015-09-04 21:56:39 +00:00
for ( std : : list < TXQueueEntry > : : iterator txi ( _txQueue . begin ( ) ) ; txi ! = _txQueue . end ( ) ; ) {
2019-08-20 04:52:33 +00:00
if ( _trySend ( tPtr , txi - > packet , txi - > encrypt , txi - > flowId ) ) {
2015-09-04 21:56:39 +00:00
_txQueue . erase ( txi + + ) ;
2017-08-23 21:00:08 +00:00
} else if ( ( now - txi - > creationTime ) > ZT_TRANSMIT_QUEUE_TIMEOUT ) {
2017-08-23 23:55:22 +00:00
_txQueue . erase ( txi + + ) ;
2017-08-23 23:42:17 +00:00
} else {
2017-08-23 23:59:31 +00:00
if ( ! RR - > topology - > getPeer ( tPtr , txi - > dest ) )
needWhois . push_back ( txi - > dest ) ;
2017-08-23 23:42:17 +00:00
+ + txi ;
}
}
}
2017-08-23 23:59:31 +00:00
for ( std : : vector < Address > : : const_iterator i ( needWhois . begin ( ) ) ; i ! = needWhois . end ( ) ; + + i )
requestWhois ( tPtr , now , * i ) ;
2017-08-23 23:42:17 +00:00
for ( unsigned int ptr = 0 ; ptr < ZT_RX_QUEUE_SIZE ; + + ptr ) {
RXQueueEntry * const rq = & ( _rxQueue [ ptr ] ) ;
2018-07-20 21:01:58 +00:00
Mutex : : Lock rql ( rq - > lock ) ;
2017-08-23 23:42:17 +00:00
if ( ( rq - > timestamp ) & & ( rq - > complete ) ) {
2020-05-12 08:35:48 +00:00
if ( ( rq - > frag0 . tryDecode ( RR , tPtr , rq - > flowId ) ) | | ( ( now - rq - > timestamp ) > ZT_RECEIVE_QUEUE_TIMEOUT ) ) {
2017-08-23 23:42:17 +00:00
rq - > timestamp = 0 ;
} else {
const Address src ( rq - > frag0 . source ( ) ) ;
if ( ! RR - > topology - > getPeer ( tPtr , src ) )
requestWhois ( tPtr , now , src ) ;
}
2014-09-24 20:45:58 +00:00
}
}
2017-08-23 23:42:17 +00:00
{
2015-09-04 22:21:22 +00:00
Mutex : : Lock _l ( _lastUniteAttempt_m ) ;
Hashtable < _LastUniteKey , uint64_t > : : Iterator i ( _lastUniteAttempt ) ;
_LastUniteKey * k = ( _LastUniteKey * ) 0 ;
uint64_t * v = ( uint64_t * ) 0 ;
while ( i . next ( k , v ) ) {
2015-11-02 23:38:53 +00:00
if ( ( now - * v ) > = ( ZT_MIN_UNITE_INTERVAL * 8 ) )
2015-09-04 22:21:22 +00:00
_lastUniteAttempt . erase ( * k ) ;
}
}
2017-08-23 23:42:17 +00:00
{
Mutex : : Lock _l ( _lastSentWhoisRequest_m ) ;
2017-10-02 22:52:57 +00:00
Hashtable < Address , int64_t > : : Iterator i ( _lastSentWhoisRequest ) ;
2017-08-23 23:42:17 +00:00
Address * a = ( Address * ) 0 ;
2017-10-02 22:52:57 +00:00
int64_t * ts = ( int64_t * ) 0 ;
2017-08-23 23:42:17 +00:00
while ( i . next ( a , ts ) ) {
if ( ( now - * ts ) > ( ZT_WHOIS_RETRY_DELAY * 2 ) )
_lastSentWhoisRequest . erase ( * a ) ;
}
}
return ZT_WHOIS_RETRY_DELAY ;
2014-09-24 20:45:58 +00:00
}
2017-10-02 22:52:57 +00:00
bool Switch : : _shouldUnite ( const int64_t now , const Address & source , const Address & destination )
2017-02-04 08:04:44 +00:00
{
Mutex : : Lock _l ( _lastUniteAttempt_m ) ;
uint64_t & ts = _lastUniteAttempt [ _LastUniteKey ( source , destination ) ] ;
if ( ( now - ts ) > = ZT_MIN_UNITE_INTERVAL ) {
ts = now ;
return true ;
}
return false ;
}
2020-05-12 08:35:48 +00:00
bool Switch : : _trySend ( void * tPtr , Packet & packet , bool encrypt , int32_t flowId )
2013-07-04 20:56:19 +00:00
{
2017-02-01 20:00:25 +00:00
SharedPtr < Path > viaPath ;
2017-10-02 22:52:57 +00:00
const int64_t now = RR - > node - > now ( ) ;
2017-02-01 20:00:25 +00:00
const Address destination ( packet . destination ( ) ) ;
2017-02-01 21:52:53 +00:00
2017-03-28 00:03:17 +00:00
const SharedPtr < Peer > peer ( RR - > topology - > getPeer ( tPtr , destination ) ) ;
2017-02-01 20:00:25 +00:00
if ( peer ) {
2021-09-02 04:37:49 +00:00
if ( ( peer - > bondingPolicy ( ) = = ZT_BOND_POLICY_BROADCAST )
2020-05-12 08:35:48 +00:00
& & ( packet . verb ( ) = = Packet : : VERB_FRAME | | packet . verb ( ) = = Packet : : VERB_EXT_FRAME ) ) {
const SharedPtr < Peer > relay ( RR - > topology - > getUpstreamPeer ( ) ) ;
Mutex : : Lock _l ( peer - > _paths_m ) ;
for ( int i = 0 ; i < ZT_MAX_PEER_NETWORK_PATHS ; + + i ) {
if ( peer - > _paths [ i ] . p & & peer - > _paths [ i ] . p - > alive ( now ) ) {
_sendViaSpecificPath ( tPtr , peer , peer - > _paths [ i ] . p , now , packet , encrypt , flowId ) ;
}
}
return true ;
2019-08-20 04:52:33 +00:00
}
else {
viaPath = peer - > getAppropriatePath ( now , false , flowId ) ;
if ( ! viaPath ) {
peer - > tryMemorizedPath ( tPtr , now ) ; // periodically attempt memorized or statically defined paths, if any are known
const SharedPtr < Peer > relay ( RR - > topology - > getUpstreamPeer ( ) ) ;
if ( ( ! relay ) | | ( ! ( viaPath = relay - > getAppropriatePath ( now , false , flowId ) ) ) ) {
if ( ! ( viaPath = peer - > getAppropriatePath ( now , true , flowId ) ) )
return false ;
}
2017-02-01 21:52:53 +00:00
}
2020-05-12 08:35:48 +00:00
if ( viaPath ) {
_sendViaSpecificPath ( tPtr , peer , viaPath , now , packet , encrypt , flowId ) ;
return true ;
}
2017-02-01 21:52:53 +00:00
}
2017-02-01 20:00:25 +00:00
}
2020-05-12 08:35:48 +00:00
return false ;
}
2015-07-07 17:00:34 +00:00
2020-05-12 08:35:48 +00:00
void Switch : : _sendViaSpecificPath ( void * tPtr , SharedPtr < Peer > peer , SharedPtr < Path > viaPath , int64_t now , Packet & packet , bool encrypt , int32_t flowId )
{
unsigned int mtu = ZT_DEFAULT_PHYSMTU ;
uint64_t trustedPathId = 0 ;
RR - > topology - > getOutboundPathInfo ( viaPath - > address ( ) , mtu , trustedPathId ) ;
2013-07-04 20:56:19 +00:00
2020-05-12 08:35:48 +00:00
unsigned int chunkSize = std : : min ( packet . size ( ) , mtu ) ;
packet . setFragmented ( chunkSize < packet . size ( ) ) ;
2018-05-31 00:45:29 +00:00
2020-05-12 08:35:48 +00:00
if ( trustedPathId ) {
packet . setTrusted ( trustedPathId ) ;
} else {
2020-08-21 21:23:31 +00:00
packet . armor ( peer - > key ( ) , encrypt , peer - > aesKeysIfSupported ( ) ) ;
RR - > node - > expectReplyTo ( packet . packetId ( ) ) ;
2020-05-12 08:35:48 +00:00
}
2020-08-21 21:23:31 +00:00
peer - > recordOutgoingPacket ( viaPath , packet . packetId ( ) , packet . payloadLength ( ) , packet . verb ( ) , flowId , now ) ;
2020-05-12 08:35:48 +00:00
if ( viaPath - > send ( RR , tPtr , packet . data ( ) , chunkSize , now ) ) {
if ( chunkSize < packet . size ( ) ) {
// Too big for one packet, fragment the rest
unsigned int fragStart = chunkSize ;
unsigned int remaining = packet . size ( ) - chunkSize ;
unsigned int fragsRemaining = ( remaining / ( mtu - ZT_PROTO_MIN_FRAGMENT_LENGTH ) ) ;
if ( ( fragsRemaining * ( mtu - ZT_PROTO_MIN_FRAGMENT_LENGTH ) ) < remaining )
+ + fragsRemaining ;
const unsigned int totalFragments = fragsRemaining + 1 ;
for ( unsigned int fno = 1 ; fno < totalFragments ; + + fno ) {
chunkSize = std : : min ( remaining , ( unsigned int ) ( mtu - ZT_PROTO_MIN_FRAGMENT_LENGTH ) ) ;
Packet : : Fragment frag ( packet , fragStart , chunkSize , fno , totalFragments ) ;
viaPath - > send ( RR , tPtr , frag . data ( ) , frag . size ( ) , now ) ;
fragStart + = chunkSize ;
remaining - = chunkSize ;
2013-07-04 20:56:19 +00:00
}
}
2014-10-29 00:25:34 +00:00
}
2013-07-04 20:56:19 +00:00
}
} // namespace ZeroTier