2014-09-19 01:28:14 +00:00
|
|
|
/*
|
2019-08-23 16:23:39 +00:00
|
|
|
* Copyright (c)2019 ZeroTier, Inc.
|
2014-09-19 01:28:14 +00:00
|
|
|
*
|
2019-08-23 16:23:39 +00:00
|
|
|
* Use of this software is governed by the Business Source License included
|
|
|
|
* in the LICENSE.TXT file in the project's root directory.
|
2014-09-19 01:28:14 +00:00
|
|
|
*
|
2019-08-23 16:23:39 +00:00
|
|
|
* Change Date: 2023-01-01
|
2014-09-19 01:28:14 +00:00
|
|
|
*
|
2019-08-23 16:23:39 +00:00
|
|
|
* On the date above, in accordance with the Business Source License, use
|
|
|
|
* of this software will be governed by version 2.0 of the Apache License.
|
2014-09-19 01:28:14 +00:00
|
|
|
*/
|
2019-08-23 16:23:39 +00:00
|
|
|
/****/
|
2014-09-19 01:28:14 +00:00
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
|
|
|
|
#include "Constants.hpp"
|
2014-10-10 00:58:31 +00:00
|
|
|
#include "RuntimeEnvironment.hpp"
|
2014-09-24 21:02:16 +00:00
|
|
|
#include "Multicaster.hpp"
|
2019-09-09 22:49:17 +00:00
|
|
|
#include "Network.hpp"
|
2019-09-10 23:20:28 +00:00
|
|
|
#include "Membership.hpp"
|
2014-09-19 01:28:14 +00:00
|
|
|
#include "Topology.hpp"
|
2014-09-25 22:08:29 +00:00
|
|
|
#include "Switch.hpp"
|
2014-09-19 01:28:14 +00:00
|
|
|
|
|
|
|
namespace ZeroTier {
|
|
|
|
|
2014-10-01 21:05:25 +00:00
|
|
|
Multicaster::Multicaster(const RuntimeEnvironment *renv) :
|
2015-08-27 23:17:21 +00:00
|
|
|
RR(renv),
|
2019-09-08 02:15:21 +00:00
|
|
|
_groups(32) {}
|
2014-09-19 01:28:14 +00:00
|
|
|
|
2019-09-08 02:15:21 +00:00
|
|
|
Multicaster::~Multicaster() {}
|
2014-09-19 01:28:14 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
unsigned int Multicaster::send(
|
2017-03-28 00:03:17 +00:00
|
|
|
void *tPtr,
|
2017-10-02 22:52:57 +00:00
|
|
|
int64_t now,
|
2018-01-27 01:38:44 +00:00
|
|
|
const SharedPtr<Network> &network,
|
2014-09-30 23:28:25 +00:00
|
|
|
const MulticastGroup &mg,
|
|
|
|
const MAC &src,
|
|
|
|
unsigned int etherType,
|
2019-09-09 22:49:17 +00:00
|
|
|
const unsigned int existingBloomMultiplier,
|
|
|
|
const uint8_t existingBloom[ZT_MULTICAST_BLOOM_FILTER_SIZE_BITS / 8],
|
|
|
|
const void *const data,
|
2014-09-30 23:28:25 +00:00
|
|
|
unsigned int len)
|
2014-09-22 20:18:24 +00:00
|
|
|
{
|
2019-09-10 23:20:28 +00:00
|
|
|
static const unsigned int PRIMES[16] = { 3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59 }; // 2 is skipped as it's even
|
2019-09-09 22:49:17 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
std::vector< std::pair<int64_t,Address> > recipients;
|
2019-09-09 22:49:17 +00:00
|
|
|
|
|
|
|
const NetworkConfig &config = network->config();
|
2019-09-11 22:52:18 +00:00
|
|
|
if (config.multicastLimit == 0) return 0; // multicast disabled
|
2019-09-10 23:20:28 +00:00
|
|
|
|
|
|
|
Address specialists[ZT_MAX_NETWORK_SPECIALISTS],multicastReplicators[ZT_MAX_NETWORK_SPECIALISTS];
|
|
|
|
unsigned int specialistCount = 0,multicastReplicatorCount = 0,bridgeCount = 0;
|
|
|
|
bool amMulticastReplicator = false;
|
2019-09-09 22:49:17 +00:00
|
|
|
for(unsigned int i=0;i<config.specialistCount;++i) {
|
2019-09-10 23:20:28 +00:00
|
|
|
if (RR->identity.address() == config.specialists[i]) {
|
|
|
|
amMulticastReplicator |= ((config.specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_MULTICAST_REPLICATOR) != 0);
|
|
|
|
} else {
|
|
|
|
specialists[specialistCount++] = config.specialists[i];
|
|
|
|
if ((config.specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE) != 0) {
|
|
|
|
recipients.push_back(std::pair<int64_t,Address>(0,config.specialists[i]));
|
|
|
|
++bridgeCount;
|
|
|
|
} if ((config.specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_MULTICAST_REPLICATOR) != 0) {
|
|
|
|
multicastReplicators[multicastReplicatorCount++] = config.specialists[i];
|
|
|
|
}
|
|
|
|
}
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
2019-09-10 23:20:28 +00:00
|
|
|
std::sort(&(specialists[0]),&(specialists[specialistCount])); // for binary search
|
2015-11-02 23:15:20 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
int64_t lastGather = 0;
|
|
|
|
_K groupKey(network->id(),mg);
|
2019-09-09 22:49:17 +00:00
|
|
|
{
|
2019-09-10 23:20:28 +00:00
|
|
|
Mutex::Lock l(_groups_l);
|
|
|
|
const _G *const g = _groups.get(groupKey);
|
|
|
|
if (g) {
|
|
|
|
lastGather = g->lastGather;
|
|
|
|
recipients.reserve(recipients.size() + g->members.size());
|
|
|
|
Hashtable< Address,int64_t >::Iterator mi(const_cast<_G *>(g)->members);
|
|
|
|
Address *mik = nullptr;
|
|
|
|
int64_t *miv = nullptr;
|
|
|
|
while (mi.next(mik,miv)) {
|
|
|
|
if (!std::binary_search(&(specialists[0]),&(specialists[specialistCount]),*mik))
|
|
|
|
recipients.push_back(std::pair<int64_t,Address>(*miv,*mik));
|
|
|
|
}
|
|
|
|
}
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
2019-09-10 23:20:28 +00:00
|
|
|
|
|
|
|
// Sort recipients, maintaining bridges first in list
|
2019-09-09 22:49:17 +00:00
|
|
|
std::sort(recipients.begin() + bridgeCount,recipients.end(),std::greater< std::pair<int64_t,Address> >());
|
2019-09-10 23:20:28 +00:00
|
|
|
|
|
|
|
// Gather new recipients periodically, being more aggressive if we have none.
|
|
|
|
if ((now - lastGather) > (recipients.empty() ? 5000 : ZT_MULTICAST_GATHER_PERIOD)) {
|
|
|
|
{
|
|
|
|
Mutex::Lock l(_groups_l);
|
|
|
|
_groups[groupKey].lastGather = now;
|
|
|
|
}
|
|
|
|
|
|
|
|
Packet outp(network->controller(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER);
|
|
|
|
outp.append(network->id());
|
|
|
|
outp.append((uint8_t)0);
|
|
|
|
mg.mac().appendTo(outp);
|
|
|
|
outp.append(mg.adi());
|
|
|
|
outp.append((uint32_t)0xffffffff);
|
|
|
|
RR->sw->send(tPtr,outp,true);
|
|
|
|
|
|
|
|
for(unsigned int i=0;i<specialistCount;++i) {
|
|
|
|
outp.newInitializationVector();
|
|
|
|
outp.setDestination(specialists[i]);
|
|
|
|
RR->sw->send(tPtr,outp,true);
|
|
|
|
}
|
|
|
|
|
|
|
|
// LEGACY: roots may know about older versions' multicast subscriptions but
|
|
|
|
// the root's role here is being phased out.
|
|
|
|
SharedPtr<Peer> root(RR->topology->root(now));
|
|
|
|
if (root) {
|
|
|
|
outp.newInitializationVector();
|
|
|
|
outp.setDestination(root->address());
|
|
|
|
outp.armor(root->key(),true);
|
|
|
|
root->sendDirect(tPtr,outp.data(),outp.size(),now,true);
|
|
|
|
}
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
2015-11-02 23:15:20 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
if (recipients.empty())
|
|
|
|
return 0;
|
2019-09-09 22:49:17 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
unsigned int sentCount = 0;
|
2019-09-09 22:49:17 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
uint64_t bloomFilter[ZT_MULTICAST_BLOOM_FILTER_SIZE_BITS / 64];
|
|
|
|
unsigned int bloomMultiplier;
|
2019-09-09 22:49:17 +00:00
|
|
|
if (existingBloom) {
|
2019-09-10 23:20:28 +00:00
|
|
|
memcpy(bloomFilter,existingBloom,sizeof(bloomFilter));
|
|
|
|
bloomMultiplier = existingBloomMultiplier;
|
2019-09-09 22:49:17 +00:00
|
|
|
} else {
|
2019-09-10 23:20:28 +00:00
|
|
|
memset(bloomFilter,0,sizeof(bloomFilter));
|
|
|
|
bloomMultiplier = 1;
|
2019-09-09 22:49:17 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
// Iteratively search for a bloom multiplier that results in no collisions
|
|
|
|
// among known recipients. Usually the first iteration is good unless
|
|
|
|
// the recipient set is quite large.
|
2019-09-09 22:49:17 +00:00
|
|
|
if (recipients.size() > 1) {
|
2019-09-10 23:20:28 +00:00
|
|
|
unsigned long bestMultColl = 0xffffffff;
|
2019-09-09 22:49:17 +00:00
|
|
|
for(int k=0;k<16;++k) { // 16 == arbitrary limit on iterations for this search, also must be <= size of PRIMES
|
2019-09-10 23:20:28 +00:00
|
|
|
const unsigned int mult = PRIMES[k];
|
|
|
|
unsigned long coll = 0;
|
2019-09-09 22:49:17 +00:00
|
|
|
for(std::vector< std::pair<int64_t,Address> >::const_iterator r(recipients.begin());r!=recipients.end();++r) {
|
|
|
|
const unsigned int bfi = mult * (unsigned int)r->second.toInt();
|
2019-09-10 23:20:28 +00:00
|
|
|
const unsigned int byte = (bfi >> 3) % sizeof(bloomFilter);
|
2019-09-09 22:49:17 +00:00
|
|
|
const uint8_t bit = 1 << (bfi & 7);
|
2019-09-10 23:20:28 +00:00
|
|
|
coll += ((((uint8_t *)bloomFilter)[byte] & bit) != 0);
|
|
|
|
((uint8_t *)bloomFilter)[byte] |= bit;
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
2019-09-10 23:20:28 +00:00
|
|
|
memset(bloomFilter,0,sizeof(bloomFilter));
|
2015-11-02 23:15:20 +00:00
|
|
|
|
2019-09-09 22:49:17 +00:00
|
|
|
if (coll <= bestMultColl) {
|
2019-09-10 23:20:28 +00:00
|
|
|
bloomMultiplier = mult;
|
2019-09-09 22:49:17 +00:00
|
|
|
if (coll == 0) // perfect score, no need to continue searching
|
2015-11-02 23:15:20 +00:00
|
|
|
break;
|
2019-09-09 22:49:17 +00:00
|
|
|
bestMultColl = coll;
|
2015-11-02 23:15:20 +00:00
|
|
|
}
|
2019-08-14 17:52:21 +00:00
|
|
|
}
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-14 17:52:21 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
// See if there is a multicast replicator, trying to pick the fastest/best one.
|
|
|
|
Address bestReplicator;
|
2019-09-09 22:49:17 +00:00
|
|
|
if (multicastReplicatorCount > 0) {
|
2019-09-10 23:20:28 +00:00
|
|
|
unsigned int bestReplicatorLatency = 0xffff;
|
|
|
|
for(unsigned int i=0;i<multicastReplicatorCount;++i) {
|
|
|
|
const unsigned int bfi = bloomMultiplier * (unsigned int)multicastReplicators[i].toInt();
|
|
|
|
if ((((uint8_t *)bloomFilter)[(bfi >> 3) % sizeof(bloomFilter)] & (1 << (bfi & 7))) == 0) {
|
|
|
|
SharedPtr<Peer> peer(RR->topology->get(multicastReplicators[i]));
|
|
|
|
if (peer) {
|
|
|
|
const unsigned int lat = peer->latency(now);
|
|
|
|
if (lat <= bestReplicatorLatency) {
|
|
|
|
bestReplicator = peer->address();
|
|
|
|
bestReplicatorLatency = lat;
|
|
|
|
}
|
|
|
|
} else if (!bestReplicator) {
|
|
|
|
bestReplicator = multicastReplicators[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
2018-01-27 01:38:44 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
// If this is a multicast replicator, aggressively replicate. Multicast
|
|
|
|
// replicators are not subject to send count limits.
|
|
|
|
if (amMulticastReplicator) {
|
|
|
|
std::vector< std::pair< int,Address > > byLatency;
|
|
|
|
for(std::vector< std::pair<int64_t,Address> >::const_iterator r(recipients.begin());r!=recipients.end();++r) {
|
|
|
|
const unsigned int bfi = bloomMultiplier * (unsigned int)r->second.toInt();
|
|
|
|
if ((((uint8_t *)bloomFilter)[(bfi >> 3) % sizeof(bloomFilter)] & (1 << (bfi & 7))) == 0) {
|
|
|
|
SharedPtr<Peer> peer(RR->topology->get(r->second));
|
|
|
|
byLatency.push_back(std::pair< int,Address >((peer) ? (int)peer->latency(now) : 0xffff,r->second));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
std::sort(byLatency.begin(),byLatency.end());
|
|
|
|
|
|
|
|
unsigned long cnt = byLatency.size();
|
|
|
|
if (bestReplicator)
|
|
|
|
cnt /= 2; // send to only the best half of the latency-sorted population if there are more replicators
|
|
|
|
for(unsigned long i=0;i<cnt;++i) {
|
|
|
|
const unsigned int bfi = bloomMultiplier * (unsigned int)byLatency[i].second.toInt();
|
|
|
|
((uint8_t *)bloomFilter)[(bfi >> 3) % sizeof(bloomFilter)] |= 1 << (bfi & 7);
|
|
|
|
|
|
|
|
Packet outp(byLatency[i].second,RR->identity.address(),Packet::VERB_MULTICAST_FRAME);
|
|
|
|
outp.append(network->id());
|
|
|
|
outp.append((uint8_t)0x04);
|
|
|
|
src.appendTo(outp);
|
|
|
|
mg.mac().appendTo(outp);
|
|
|
|
outp.append(mg.adi());
|
|
|
|
outp.append((uint16_t)etherType);
|
|
|
|
outp.append(data,len);
|
|
|
|
outp.compress();
|
|
|
|
RR->sw->send(tPtr,outp,true);
|
|
|
|
|
|
|
|
++sentCount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Forward to the next multicast replicator, if any.
|
|
|
|
if (bestReplicator) {
|
|
|
|
const unsigned int bfi = bloomMultiplier * (unsigned int)bestReplicator.toInt();
|
|
|
|
((uint8_t *)bloomFilter)[(bfi >> 3) % sizeof(bloomFilter)] |= 1 << (bfi & 7);
|
|
|
|
|
|
|
|
Packet outp(bestReplicator,RR->identity.address(),Packet::VERB_MULTICAST_FRAME);
|
|
|
|
outp.append((uint8_t)(0x04 | 0x08));
|
|
|
|
RR->identity.address().appendTo(outp);
|
|
|
|
outp.append((uint16_t)bloomMultiplier);
|
|
|
|
outp.append((uint16_t)sizeof(bloomFilter));
|
|
|
|
outp.append(((uint8_t *)bloomFilter),sizeof(bloomFilter));
|
|
|
|
src.appendTo(outp);
|
|
|
|
mg.mac().appendTo(outp);
|
|
|
|
outp.append(mg.adi());
|
|
|
|
outp.append((uint16_t)etherType);
|
|
|
|
outp.append(data,len);
|
|
|
|
outp.compress();
|
|
|
|
RR->sw->send(tPtr,outp,true);
|
|
|
|
|
|
|
|
++sentCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a multicast replicator, we've already replicated.
|
|
|
|
if (amMulticastReplicator)
|
|
|
|
return (unsigned int)recipients.size();
|
|
|
|
|
|
|
|
// Find the two best next hops (that have never seen this multicast)
|
|
|
|
// that are newer version nodes.
|
|
|
|
SharedPtr<Peer> nextHops[2];
|
2019-09-09 22:49:17 +00:00
|
|
|
unsigned int nextHopsBestLatency[2] = { 0xffff,0xffff };
|
2019-09-10 23:20:28 +00:00
|
|
|
for(std::vector< std::pair<int64_t,Address> >::iterator r(recipients.begin());r!=recipients.end();++r) {
|
|
|
|
if (r->first >= 0) {
|
|
|
|
const unsigned int bfi = bloomMultiplier * (unsigned int)r->second.toInt();
|
|
|
|
if ((((uint8_t *)bloomFilter)[(bfi >> 3) % sizeof(bloomFilter)] & (1 << (bfi & 7))) == 0) {
|
|
|
|
const SharedPtr<Peer> peer(RR->topology->get(r->second));
|
|
|
|
if ((peer)&&(peer->remoteVersionProtocol() >= 11)) {
|
|
|
|
r->first = -1; // use this field now to flag as non-legacy
|
2019-09-09 22:49:17 +00:00
|
|
|
const unsigned int lat = peer->latency(now);
|
|
|
|
for(unsigned int nh=0;nh<2;++nh) {
|
|
|
|
if (lat <= nextHopsBestLatency[nh]) {
|
|
|
|
nextHopsBestLatency[nh] = lat;
|
|
|
|
nextHops[nh] = peer;
|
2018-01-27 01:38:44 +00:00
|
|
|
break;
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
2016-08-23 18:29:02 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-28 18:43:09 +00:00
|
|
|
}
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
|
|
|
}
|
2014-10-04 05:30:10 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
// Set bits for next hops in bloom filter
|
2019-09-09 22:49:17 +00:00
|
|
|
for(unsigned int nh=0;nh<2;++nh) {
|
|
|
|
if (nextHops[nh]) {
|
2019-09-10 23:20:28 +00:00
|
|
|
const unsigned int bfi = bloomMultiplier * (unsigned int)nextHops[nh]->address().toInt();
|
|
|
|
((uint8_t *)bloomFilter)[(bfi >> 3) % sizeof(bloomFilter)] |= 1 << (bfi & 7);
|
|
|
|
++sentCount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send to legacy peers and flag these in bloom filter
|
|
|
|
const unsigned int limit = config.multicastLimit + bridgeCount;
|
|
|
|
for(std::vector< std::pair<int64_t,Address> >::const_iterator r(recipients.begin());(r!=recipients.end())&&(sentCount<limit);++r) {
|
|
|
|
if (r->first >= 0) {
|
|
|
|
const unsigned int bfi = bloomMultiplier * (unsigned int)r->second.toInt();
|
|
|
|
((uint8_t *)bloomFilter)[(bfi >> 3) % sizeof(bloomFilter)] |= 1 << (bfi & 7);
|
|
|
|
|
|
|
|
Packet outp(r->second,RR->identity.address(),Packet::VERB_MULTICAST_FRAME);
|
|
|
|
outp.append(network->id());
|
|
|
|
outp.append((uint8_t)0x04);
|
|
|
|
src.appendTo(outp);
|
|
|
|
mg.mac().appendTo(outp);
|
|
|
|
outp.append(mg.adi());
|
|
|
|
outp.append((uint16_t)etherType);
|
|
|
|
outp.append(data,len);
|
|
|
|
outp.compress();
|
|
|
|
RR->sw->send(tPtr,outp,true);
|
|
|
|
|
|
|
|
++sentCount;
|
2014-11-21 19:27:53 +00:00
|
|
|
}
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
2014-10-04 20:46:29 +00:00
|
|
|
|
2019-09-10 23:20:28 +00:00
|
|
|
// Send to next hops for P2P propagation
|
2019-09-09 22:49:17 +00:00
|
|
|
for(unsigned int nh=0;nh<2;++nh) {
|
|
|
|
if (nextHops[nh]) {
|
2019-09-10 23:20:28 +00:00
|
|
|
Packet outp(nextHops[nh]->address(),RR->identity.address(),Packet::VERB_MULTICAST_FRAME);
|
|
|
|
outp.append((uint8_t)(0x04 | 0x08));
|
|
|
|
RR->identity.address().appendTo(outp);
|
|
|
|
outp.append((uint16_t)bloomMultiplier);
|
|
|
|
outp.append((uint16_t)sizeof(bloomFilter));
|
|
|
|
outp.append(((uint8_t *)bloomFilter),sizeof(bloomFilter));
|
|
|
|
src.appendTo(outp);
|
|
|
|
mg.mac().appendTo(outp);
|
|
|
|
outp.append(mg.adi());
|
|
|
|
outp.append((uint16_t)etherType);
|
|
|
|
outp.append(data,len);
|
|
|
|
outp.compress();
|
|
|
|
RR->sw->send(tPtr,outp,true);
|
2019-09-09 22:49:17 +00:00
|
|
|
}
|
|
|
|
}
|
2019-09-10 23:20:28 +00:00
|
|
|
|
|
|
|
return (unsigned int)recipients.size();
|
2014-09-22 20:18:24 +00:00
|
|
|
}
|
|
|
|
|
2017-10-02 22:52:57 +00:00
|
|
|
void Multicaster::clean(int64_t now)
|
2014-09-22 20:18:24 +00:00
|
|
|
{
|
2014-09-19 01:28:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace ZeroTier
|