2014-09-19 01:28:14 +00:00
/*
* ZeroTier One - Global Peer to Peer Ethernet
* Copyright ( C ) 2011 - 2014 ZeroTier Networks LLC
*
* This program is free software : you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation , either version 3 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*
* - -
*
* ZeroTier may be used and distributed under the terms of the GPLv3 , which
* are available at : http : //www.gnu.org/licenses/gpl-3.0.html
*
* If you would like to embed ZeroTier into a commercial application or
* redistribute it in a modified binary form , please contact ZeroTier Networks
* LLC . Start here : http : //www.zerotier.com/
*/
# include <algorithm>
# include "Constants.hpp"
2014-10-10 00:58:31 +00:00
# include "RuntimeEnvironment.hpp"
2014-09-25 22:57:43 +00:00
# include "SharedPtr.hpp"
2014-09-24 21:02:16 +00:00
# include "Multicaster.hpp"
2014-09-19 01:28:14 +00:00
# include "Topology.hpp"
2014-09-25 22:08:29 +00:00
# include "Switch.hpp"
# include "Packet.hpp"
2014-09-25 22:57:43 +00:00
# include "Peer.hpp"
2014-09-30 23:28:25 +00:00
# include "CMWC4096.hpp"
2014-10-04 20:46:29 +00:00
# include "C25519.hpp"
2014-10-10 01:32:05 +00:00
# include "NodeConfig.hpp"
2014-09-26 21:18:25 +00:00
# include "CertificateOfMembership.hpp"
2014-10-10 00:58:31 +00:00
# include "Logger.hpp"
2014-09-19 01:28:14 +00:00
namespace ZeroTier {
2014-10-01 21:05:25 +00:00
Multicaster : : Multicaster ( const RuntimeEnvironment * renv ) :
RR ( renv )
2014-09-19 01:28:14 +00:00
{
}
2014-09-24 21:02:16 +00:00
Multicaster : : ~ Multicaster ( )
2014-09-19 01:28:14 +00:00
{
}
2014-10-05 17:34:25 +00:00
unsigned int Multicaster : : gather ( const Address & queryingPeer , uint64_t nwid , const MulticastGroup & mg , Packet & appendTo , unsigned int limit ) const
2014-09-30 23:28:25 +00:00
{
unsigned char * p ;
2014-10-10 01:32:05 +00:00
unsigned int added = 0 , i , k , rptr , totalKnown = 0 ;
uint64_t a , picked [ ( ZT_PROTO_MAX_PACKET_LENGTH / 5 ) + 1 ] ;
2014-09-30 23:28:25 +00:00
2014-10-10 01:32:05 +00:00
if ( ! limit )
2014-09-30 23:28:25 +00:00
return 0 ;
2014-10-10 01:32:05 +00:00
if ( limit > 0xffff ) // TODO: multiple return packets not yet supported
limit = 0xffff ;
{ // Return myself if I am a member of this group
SharedPtr < Network > network ( RR - > nc - > network ( nwid ) ) ;
if ( ( network ) & & ( network - > subscribedToMulticastGroup ( mg ) ) ) {
RR - > identity . address ( ) . appendTo ( appendTo ) ;
+ + totalKnown ;
+ + added ;
}
2014-09-30 23:28:25 +00:00
}
2014-10-10 01:32:05 +00:00
Mutex : : Lock _l ( _groups_m ) ;
2014-09-30 23:28:25 +00:00
2014-10-10 01:32:05 +00:00
const unsigned int totalAt = appendTo . size ( ) ;
2014-10-04 05:03:19 +00:00
appendTo . addSize ( 4 ) ; // sizeof(uint32_t)
2014-10-10 01:32:05 +00:00
const unsigned int addedAt = appendTo . size ( ) ;
2014-10-04 05:03:19 +00:00
appendTo . addSize ( 2 ) ; // sizeof(uint16_t)
2014-09-30 23:28:25 +00:00
2014-10-10 01:32:05 +00:00
std : : map < std : : pair < uint64_t , MulticastGroup > , MulticastGroupStatus > : : const_iterator gs ( _groups . find ( std : : pair < uint64_t , MulticastGroup > ( nwid , mg ) ) ) ;
if ( ( gs ! = _groups . end ( ) ) & & ( ! gs - > second . members . empty ( ) ) ) {
2014-10-19 19:56:39 +00:00
totalKnown + = ( unsigned int ) gs - > second . members . size ( ) ;
2014-10-10 01:32:05 +00:00
// Members are returned in random order so that repeated gather queries
// will return different subsets of a large multicast group.
k = 0 ;
while ( ( added < limit ) & & ( k < gs - > second . members . size ( ) ) & & ( ( appendTo . size ( ) + ZT_ADDRESS_LENGTH ) < = ZT_PROTO_MAX_PACKET_LENGTH ) ) {
rptr = ( unsigned int ) RR - > prng - > next32 ( ) ;
2014-09-30 23:28:25 +00:00
restart_member_scan :
2014-10-10 01:32:05 +00:00
a = gs - > second . members [ rptr % ( unsigned int ) gs - > second . members . size ( ) ] . address . toInt ( ) ;
for ( i = 0 ; i < k ; + + i ) {
if ( picked [ i ] = = a ) {
+ + rptr ;
goto restart_member_scan ;
}
}
picked [ k + + ] = a ;
if ( queryingPeer . toInt ( ) ! = a ) { // do not return the peer that is making the request as a result
p = ( unsigned char * ) appendTo . appendField ( ZT_ADDRESS_LENGTH ) ;
* ( p + + ) = ( unsigned char ) ( ( a > > 32 ) & 0xff ) ;
* ( p + + ) = ( unsigned char ) ( ( a > > 24 ) & 0xff ) ;
* ( p + + ) = ( unsigned char ) ( ( a > > 16 ) & 0xff ) ;
* ( p + + ) = ( unsigned char ) ( ( a > > 8 ) & 0xff ) ;
* p = ( unsigned char ) ( a & 0xff ) ;
+ + added ;
2014-09-30 23:28:25 +00:00
}
2014-10-04 01:42:41 +00:00
}
2014-09-30 23:28:25 +00:00
}
2014-10-10 01:32:05 +00:00
appendTo . setAt ( totalAt , ( uint32_t ) totalKnown ) ;
appendTo . setAt ( addedAt , ( uint16_t ) added ) ;
2014-10-10 00:58:31 +00:00
//TRACE("..MC Multicaster::gather() attached %u of %u peers for %.16llx/%s (2)",n,(unsigned int)(gs->second.members.size() - skipped),nwid,mg.toString().c_str());
2014-09-30 23:28:25 +00:00
2014-10-10 01:32:05 +00:00
return added ;
2014-09-30 23:28:25 +00:00
}
2014-10-11 22:49:31 +00:00
std : : vector < Address > Multicaster : : getMembers ( uint64_t nwid , const MulticastGroup & mg , unsigned int limit ) const
2014-10-05 17:34:25 +00:00
{
std : : vector < Address > ls ;
Mutex : : Lock _l ( _groups_m ) ;
std : : map < std : : pair < uint64_t , MulticastGroup > , MulticastGroupStatus > : : const_iterator gs ( _groups . find ( std : : pair < uint64_t , MulticastGroup > ( nwid , mg ) ) ) ;
if ( gs = = _groups . end ( ) )
return ls ;
2014-10-11 22:49:31 +00:00
for ( std : : vector < MulticastGroupMember > : : const_reverse_iterator m ( gs - > second . members . rbegin ( ) ) ; m ! = gs - > second . members . rend ( ) ; + + m ) {
ls . push_back ( m - > address ) ;
if ( ls . size ( ) > = limit )
break ;
2014-10-05 17:34:25 +00:00
}
return ls ;
}
2014-09-30 23:28:25 +00:00
void Multicaster : : send (
const CertificateOfMembership * com ,
unsigned int limit ,
uint64_t now ,
uint64_t nwid ,
2014-10-04 20:15:02 +00:00
const std : : vector < Address > & alwaysSendTo ,
2014-09-30 23:28:25 +00:00
const MulticastGroup & mg ,
const MAC & src ,
unsigned int etherType ,
const void * data ,
unsigned int len )
2014-09-22 20:18:24 +00:00
{
2014-09-23 17:26:30 +00:00
Mutex : : Lock _l ( _groups_m ) ;
2014-09-30 23:28:25 +00:00
MulticastGroupStatus & gs = _groups [ std : : pair < uint64_t , MulticastGroup > ( nwid , mg ) ] ;
2014-09-22 20:18:24 +00:00
2014-09-25 22:08:29 +00:00
if ( gs . members . size ( ) > = limit ) {
2014-10-04 20:15:02 +00:00
// If we already have enough members, just send and we're done. We can
// skip the TX queue and skip the overhead of maintaining a send log by
// using sendOnly().
2014-09-25 22:08:29 +00:00
OutboundMulticast out ;
2014-09-24 20:45:58 +00:00
2014-10-01 21:05:25 +00:00
out . init (
2014-10-10 00:58:31 +00:00
RR ,
2014-10-01 21:05:25 +00:00
now ,
nwid ,
com ,
limit ,
2014-10-02 18:35:37 +00:00
0 ,
2014-10-01 21:05:25 +00:00
src ,
mg ,
etherType ,
data ,
len ) ;
2014-09-25 22:57:43 +00:00
unsigned int count = 0 ;
2014-10-04 20:15:02 +00:00
for ( std : : vector < Address > : : const_iterator ast ( alwaysSendTo . begin ( ) ) ; ast ! = alwaysSendTo . end ( ) ; + + ast ) {
2014-10-05 17:34:25 +00:00
{ // TODO / LEGACY: don't send new multicast frame to old peers (if we know their version)
SharedPtr < Peer > p ( RR - > topology - > getPeer ( * ast ) ) ;
if ( ( p ) & & ( p - > remoteVersionKnown ( ) ) & & ( p - > remoteVersionMajor ( ) < 1 ) )
continue ;
}
2014-10-04 20:15:02 +00:00
if ( count + + > = limit )
2014-09-25 22:57:43 +00:00
break ;
2014-10-09 19:42:25 +00:00
out . sendOnly ( RR , * ast ) ;
2014-09-25 22:57:43 +00:00
}
2014-10-04 05:30:10 +00:00
2014-10-04 20:15:02 +00:00
for ( std : : vector < MulticastGroupMember > : : const_reverse_iterator m ( gs . members . rbegin ( ) ) ; m ! = gs . members . rend ( ) ; + + m ) {
2014-10-05 17:34:25 +00:00
{ // TODO / LEGACY: don't send new multicast frame to old peers (if we know their version)
SharedPtr < Peer > p ( RR - > topology - > getPeer ( m - > address ) ) ;
if ( ( p ) & & ( p - > remoteVersionKnown ( ) ) & & ( p - > remoteVersionMajor ( ) < 1 ) )
continue ;
}
2014-10-04 20:15:02 +00:00
if ( count + + > = limit )
break ;
if ( std : : find ( alwaysSendTo . begin ( ) , alwaysSendTo . end ( ) , m - > address ) = = alwaysSendTo . end ( ) )
2014-10-09 19:42:25 +00:00
out . sendOnly ( RR , m - > address ) ;
2014-10-04 05:30:10 +00:00
}
2014-09-22 20:18:24 +00:00
} else {
2014-10-02 18:35:37 +00:00
unsigned int gatherLimit = ( limit - ( unsigned int ) gs . members . size ( ) ) + 1 ;
2014-09-25 22:08:29 +00:00
2014-10-04 05:30:10 +00:00
if ( ( now - gs . lastExplicitGather ) > = ZT_MULTICAST_EXPLICIT_GATHER_DELAY ) {
2014-09-25 22:57:43 +00:00
gs . lastExplicitGather = now ;
SharedPtr < Peer > sn ( RR - > topology - > getBestSupernode ( ) ) ;
if ( sn ) {
2014-10-10 00:58:31 +00:00
TRACE ( " >>MC GATHER up to %u in %.16llx/%s " , gatherLimit , nwid , mg . toString ( ) . c_str ( ) ) ;
2014-09-25 22:57:43 +00:00
Packet outp ( sn - > address ( ) , RR - > identity . address ( ) , Packet : : VERB_MULTICAST_GATHER ) ;
outp . append ( nwid ) ;
2014-09-30 15:38:03 +00:00
outp . append ( ( uint8_t ) 0 ) ;
2014-09-25 22:57:43 +00:00
mg . mac ( ) . appendTo ( outp ) ;
outp . append ( ( uint32_t ) mg . adi ( ) ) ;
2014-10-02 18:35:37 +00:00
outp . append ( ( uint32_t ) gatherLimit ) ; // +1 just means we'll have an extra in the queue if available
2014-09-25 22:57:43 +00:00
outp . armor ( sn - > key ( ) , true ) ;
sn - > send ( RR , outp . data ( ) , outp . size ( ) , now ) ;
}
2014-10-05 17:34:25 +00:00
gatherLimit = 0 ; // implicit not needed
} else if ( ( now - gs . lastImplicitGather ) > ZT_MULTICAST_IMPLICIT_GATHER_DELAY ) {
2014-10-04 05:30:10 +00:00
gs . lastImplicitGather = now ;
2014-10-05 17:34:25 +00:00
} else {
gatherLimit = 0 ;
}
2014-10-04 05:30:10 +00:00
2014-10-02 18:35:37 +00:00
gs . txQueue . push_back ( OutboundMulticast ( ) ) ;
OutboundMulticast & out = gs . txQueue . back ( ) ;
out . init (
2014-10-10 00:58:31 +00:00
RR ,
2014-10-02 18:35:37 +00:00
now ,
nwid ,
com ,
limit ,
gatherLimit ,
src ,
mg ,
etherType ,
data ,
len ) ;
2014-10-05 17:34:25 +00:00
for ( std : : vector < Address > : : const_iterator ast ( alwaysSendTo . begin ( ) ) ; ast ! = alwaysSendTo . end ( ) ; + + ast ) {
{ // TODO / LEGACY: don't send new multicast frame to old peers (if we know their version)
SharedPtr < Peer > p ( RR - > topology - > getPeer ( * ast ) ) ;
if ( ( p ) & & ( p - > remoteVersionKnown ( ) ) & & ( p - > remoteVersionMajor ( ) < 1 ) )
continue ;
}
2014-10-09 19:42:25 +00:00
out . sendAndLog ( RR , * ast ) ;
2014-10-05 17:34:25 +00:00
}
2014-10-04 05:30:10 +00:00
2014-10-04 20:15:02 +00:00
for ( std : : vector < MulticastGroupMember > : : const_reverse_iterator m ( gs . members . rbegin ( ) ) ; m ! = gs . members . rend ( ) ; + + m ) {
2014-10-05 17:34:25 +00:00
{ // TODO / LEGACY: don't send new multicast frame to old peers (if we know their version)
SharedPtr < Peer > p ( RR - > topology - > getPeer ( m - > address ) ) ;
if ( ( p ) & & ( p - > remoteVersionKnown ( ) ) & & ( p - > remoteVersionMajor ( ) < 1 ) )
continue ;
}
2014-10-04 20:15:02 +00:00
if ( std : : find ( alwaysSendTo . begin ( ) , alwaysSendTo . end ( ) , m - > address ) = = alwaysSendTo . end ( ) )
2014-10-09 19:42:25 +00:00
out . sendAndLog ( RR , m - > address ) ;
2014-10-04 05:30:10 +00:00
}
2014-09-22 20:18:24 +00:00
}
2014-10-04 20:46:29 +00:00
// DEPRECATED / LEGACY / TODO:
// Currently we also always send a legacy P5_MULTICAST_FRAME packet to our
2014-10-05 17:34:25 +00:00
// supernode. Our supernode then takes care of relaying it down to <1.0.0
2014-10-04 20:46:29 +00:00
// nodes. This code can go away (along with support for P5_MULTICAST_FRAME)
// once there are no more such nodes on the network.
{
SharedPtr < Peer > sn ( RR - > topology - > getBestSupernode ( ) ) ;
if ( sn ) {
uint32_t rn = RR - > prng - > next32 ( ) ;
Packet outp ( sn - > address ( ) , RR - > identity . address ( ) , Packet : : VERB_P5_MULTICAST_FRAME ) ;
outp . append ( ( uint16_t ) 0xffff ) ; // do not forward
outp . append ( ( unsigned char ) 0 , 320 + 1024 ) ; // empty queue and bloom filter
2014-10-14 19:37:35 +00:00
outp . append ( ( unsigned char ) ( ( com ) ? ZT_PROTO_VERB_P5_MULTICAST_FRAME_FLAGS_HAS_MEMBERSHIP_CERTIFICATE : 0 ) ) ;
2014-10-04 20:46:29 +00:00
outp . append ( ( uint64_t ) nwid ) ;
outp . append ( ( uint16_t ) 0 ) ;
outp . append ( ( unsigned char ) 0 ) ;
outp . append ( ( unsigned char ) 0 ) ;
RR - > identity . address ( ) . appendTo ( outp ) ;
outp . append ( ( const void * ) & rn , 3 ) ; // random multicast ID
2014-10-11 22:49:31 +00:00
if ( src )
src . appendTo ( outp ) ;
else MAC ( RR - > identity . address ( ) , nwid ) . appendTo ( outp ) ;
2014-10-04 20:46:29 +00:00
mg . mac ( ) . appendTo ( outp ) ;
outp . append ( ( uint32_t ) mg . adi ( ) ) ;
outp . append ( ( uint16_t ) etherType ) ;
outp . append ( ( uint16_t ) len ) ;
outp . append ( data , len ) ;
2014-10-14 19:37:35 +00:00
unsigned int signedPortionLen = outp . size ( ) - ZT_PROTO_VERB_P5_MULTICAST_FRAME_IDX__START_OF_SIGNED_PORTION ;
2014-10-04 20:46:29 +00:00
2014-10-14 19:37:35 +00:00
C25519 : : Signature sig ( RR - > identity . sign ( outp . field ( ZT_PROTO_VERB_P5_MULTICAST_FRAME_IDX__START_OF_SIGNED_PORTION , signedPortionLen ) , signedPortionLen ) ) ;
2014-10-04 20:46:29 +00:00
outp . append ( ( uint16_t ) sig . size ( ) ) ;
2014-10-19 19:56:39 +00:00
outp . append ( sig . data , ( unsigned int ) sig . size ( ) ) ;
2014-10-04 20:46:29 +00:00
if ( com ) com - > serialize ( outp ) ;
outp . compress ( ) ;
outp . armor ( sn - > key ( ) , true ) ;
sn - > send ( RR , outp . data ( ) , outp . size ( ) , now ) ;
}
}
2014-09-22 20:18:24 +00:00
}
2014-10-01 21:05:25 +00:00
void Multicaster : : clean ( uint64_t now )
2014-09-22 20:18:24 +00:00
{
2014-09-23 17:26:30 +00:00
Mutex : : Lock _l ( _groups_m ) ;
2014-09-30 23:28:25 +00:00
for ( std : : map < std : : pair < uint64_t , MulticastGroup > , MulticastGroupStatus > : : iterator mm ( _groups . begin ( ) ) ; mm ! = _groups . end ( ) ; ) {
2014-09-25 22:08:29 +00:00
// Remove expired outgoing multicasts from multicast TX queue
for ( std : : list < OutboundMulticast > : : iterator tx ( mm - > second . txQueue . begin ( ) ) ; tx ! = mm - > second . txQueue . end ( ) ; ) {
2014-10-01 21:05:25 +00:00
if ( ( tx - > expired ( now ) ) | | ( tx - > atLimit ( ) ) )
2014-09-25 22:08:29 +00:00
mm - > second . txQueue . erase ( tx + + ) ;
else + + tx ;
}
// Remove expired members from membership list, and update rank
// so that remaining members can be sorted in ascending order of
// transmit priority.
2014-09-22 20:18:24 +00:00
std : : vector < MulticastGroupMember > : : iterator reader ( mm - > second . members . begin ( ) ) ;
std : : vector < MulticastGroupMember > : : iterator writer ( mm - > second . members . begin ( ) ) ;
unsigned int count = 0 ;
while ( reader ! = mm - > second . members . end ( ) ) {
2014-09-19 01:28:14 +00:00
if ( ( now - reader - > timestamp ) < ZT_MULTICAST_LIKE_EXPIRE ) {
* writer = * reader ;
2014-09-22 20:18:24 +00:00
/* We rank in ascending order of most recent relevant activity. For peers we've learned
2014-09-19 01:28:14 +00:00
* about by direct LIKEs , we do this in order of their own activity . For indirectly
* acquired peers we do this minus a constant to place these categorically below directly
* learned peers . For peers with no active Peer record , we use the time we last learned
* about them minus one day ( a large constant ) to put these at the bottom of the list .
* List is sorted in ascending order of rank and multicasts are sent last - to - first . */
if ( writer - > learnedFrom ) {
2014-09-26 05:08:52 +00:00
SharedPtr < Peer > p ( RR - > topology - > getPeer ( writer - > learnedFrom ) ) ;
2014-09-19 01:28:14 +00:00
if ( p )
writer - > rank = p - > lastUnicastFrame ( ) - ZT_MULTICAST_LIKE_EXPIRE ;
2014-09-23 17:26:30 +00:00
else writer - > rank = writer - > timestamp - ( 86400000 + ZT_MULTICAST_LIKE_EXPIRE ) ;
2014-09-19 01:28:14 +00:00
} else {
2014-09-26 05:08:52 +00:00
SharedPtr < Peer > p ( RR - > topology - > getPeer ( writer - > address ) ) ;
2014-09-19 01:28:14 +00:00
if ( p )
writer - > rank = p - > lastUnicastFrame ( ) ;
else writer - > rank = writer - > timestamp - 86400000 ;
}
+ + writer ;
+ + count ;
}
+ + reader ;
}
if ( count ) {
2014-09-25 22:08:29 +00:00
// There are remaining members, so re-sort them by rank and resize the vector
2014-09-22 20:18:24 +00:00
std : : sort ( mm - > second . members . begin ( ) , writer ) ; // sorts in ascending order of rank
mm - > second . members . resize ( count ) ; // trim off the ones we cut, after writer
2014-09-19 01:28:14 +00:00
+ + mm ;
2014-09-25 22:08:29 +00:00
} else if ( mm - > second . txQueue . empty ( ) ) {
// There are no remaining members and no pending multicasts, so erase the entry
_groups . erase ( mm + + ) ;
} else + + mm ;
}
}
2014-10-10 00:58:31 +00:00
void Multicaster : : _add ( uint64_t now , uint64_t nwid , const MulticastGroup & mg , MulticastGroupStatus & gs , const Address & learnedFrom , const Address & member )
2014-09-25 22:08:29 +00:00
{
// assumes _groups_m is locked
2014-09-26 05:08:52 +00:00
2014-10-04 01:42:41 +00:00
// Do not add self -- even if someone else returns it
if ( member = = RR - > identity . address ( ) )
return ;
2014-09-26 05:08:52 +00:00
// Update timestamp and learnedFrom if existing
2014-09-25 22:57:43 +00:00
for ( std : : vector < MulticastGroupMember > : : iterator m ( gs . members . begin ( ) ) ; m ! = gs . members . end ( ) ; + + m ) {
if ( m - > address = = member ) {
2014-10-02 20:50:37 +00:00
// learnedFrom is NULL (zero) if we've learned this directly via MULTICAST_LIKE, at which
// point this becomes a first-order connection.
2014-09-25 22:57:43 +00:00
if ( m - > learnedFrom )
2014-10-02 20:50:37 +00:00
m - > learnedFrom = learnedFrom ;
2014-09-25 22:57:43 +00:00
m - > timestamp = now ;
return ;
}
2014-09-19 01:28:14 +00:00
}
2014-09-26 05:08:52 +00:00
// If not existing, add to end of list (highest priority) -- these will
// be resorted on next clean(). In the future we might want to insert
// this somewhere else but we'll try this for now.
2014-09-25 22:57:43 +00:00
gs . members . push_back ( MulticastGroupMember ( member , learnedFrom , now ) ) ;
2014-10-01 21:05:25 +00:00
2014-10-10 01:32:05 +00:00
//TRACE("..MC %s joined multicast group %.16llx/%s via %s",member.toString().c_str(),nwid,mg.toString().c_str(),((learnedFrom) ? learnedFrom.toString().c_str() : "(direct)"));
2014-10-10 00:58:31 +00:00
2014-10-01 21:05:25 +00:00
// Try to send to any outgoing multicasts that are waiting for more recipients
2014-10-19 22:20:19 +00:00
// TODO / LEGACY: don't send new multicast frame to old peers (if we know their version)
SharedPtr < Peer > p ( RR - > topology - > getPeer ( member ) ) ;
if ( ( ! p ) | | ( ! p - > remoteVersionKnown ( ) ) | | ( p - > remoteVersionMajor ( ) > = 1 ) ) {
for ( std : : list < OutboundMulticast > : : iterator tx ( gs . txQueue . begin ( ) ) ; tx ! = gs . txQueue . end ( ) ; ) {
tx - > sendIfNew ( RR , member ) ;
if ( tx - > atLimit ( ) )
gs . txQueue . erase ( tx + + ) ;
else + + tx ;
2014-10-05 17:34:25 +00:00
}
2014-10-01 21:05:25 +00:00
}
2014-09-19 01:28:14 +00:00
}
} // namespace ZeroTier