mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2024-12-19 04:57:53 +00:00
Query both root and network controller for multicast last resort GATHER.
This commit is contained in:
parent
53731acf51
commit
451b8aa7b2
@ -55,33 +55,6 @@
|
||||
#include "Node.hpp"
|
||||
#include "Array.hpp"
|
||||
|
||||
/**
|
||||
* Chunk size for allocating queue entries
|
||||
*
|
||||
* Queue entries are allocated in chunks of this many and are added to a pool.
|
||||
* ZT_CLUSTER_MAX_QUEUE_GLOBAL must be evenly divisible by this.
|
||||
*/
|
||||
#define ZT_CLUSTER_QUEUE_CHUNK_SIZE 32
|
||||
|
||||
/**
|
||||
* Maximum number of chunks to ever allocate
|
||||
*
|
||||
* This is a global sanity limit to prevent resource exhaustion attacks. It
|
||||
* works out to about 600mb of RAM. You'll never see this on a normal edge
|
||||
* node. We're unlikely to see this on a root server unless someone is DOSing
|
||||
* us. In that case cluster relaying will be affected but other functions
|
||||
* should continue to operate normally.
|
||||
*/
|
||||
#define ZT_CLUSTER_MAX_QUEUE_CHUNKS 8194
|
||||
|
||||
/**
|
||||
* Max data per queue entry
|
||||
*
|
||||
* If we ever support larger transport MTUs this must be increased. The plus
|
||||
* 16 is just a small margin and has no special meaning.
|
||||
*/
|
||||
#define ZT_CLUSTER_SEND_QUEUE_DATA_MAX (ZT_UDP_DEFAULT_PAYLOAD_MTU + 16)
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)
|
||||
|
@ -73,6 +73,33 @@
|
||||
*/
|
||||
#define ZT_CLUSTER_QUEUE_EXPIRATION 5000
|
||||
|
||||
/**
|
||||
* Chunk size for allocating queue entries
|
||||
*
|
||||
* Queue entries are allocated in chunks of this many and are added to a pool.
|
||||
* ZT_CLUSTER_MAX_QUEUE_GLOBAL must be evenly divisible by this.
|
||||
*/
|
||||
#define ZT_CLUSTER_QUEUE_CHUNK_SIZE 32
|
||||
|
||||
/**
|
||||
* Maximum number of chunks to ever allocate
|
||||
*
|
||||
* This is a global sanity limit to prevent resource exhaustion attacks. It
|
||||
* works out to about 600mb of RAM. You'll never see this on a normal edge
|
||||
* node. We're unlikely to see this on a root server unless someone is DOSing
|
||||
* us. In that case cluster relaying will be affected but other functions
|
||||
* should continue to operate normally.
|
||||
*/
|
||||
#define ZT_CLUSTER_MAX_QUEUE_CHUNKS 8194
|
||||
|
||||
/**
|
||||
* Max data per queue entry
|
||||
*
|
||||
* If we ever support larger transport MTUs this must be increased. The plus
|
||||
* 16 is just a small margin and has no special meaning.
|
||||
*/
|
||||
#define ZT_CLUSTER_SEND_QUEUE_DATA_MAX (ZT_UDP_DEFAULT_PAYLOAD_MTU + 16)
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
class RuntimeEnvironment;
|
||||
|
@ -235,21 +235,26 @@ void Multicaster::send(
|
||||
|
||||
if ((gs.members.empty())||((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY)) {
|
||||
gs.lastExplicitGather = now;
|
||||
SharedPtr<Peer> r(RR->topology->getBestRoot());
|
||||
if (r) {
|
||||
TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
|
||||
SharedPtr<Peer> explicitGatherPeers[2];
|
||||
explicitGatherPeers[0] = RR->topology->getBestRoot();
|
||||
explicitGatherPeers[1] = RR->topology->getPeer(Network::controllerFor(nwid));
|
||||
for(unsigned int k=0;k<2;++k) {
|
||||
const SharedPtr<Peer> &p = explicitGatherPeers[k];
|
||||
if (!p)
|
||||
continue;
|
||||
//TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
|
||||
|
||||
const CertificateOfMembership *com = (CertificateOfMembership *)0;
|
||||
{
|
||||
SharedPtr<Network> nw(RR->node->network(nwid));
|
||||
if (nw) {
|
||||
SharedPtr<NetworkConfig> nconf(nw->config2());
|
||||
if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(r->needsOurNetworkMembershipCertificate(nwid,now,true)))
|
||||
if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(p->needsOurNetworkMembershipCertificate(nwid,now,true)))
|
||||
com = &(nconf->com());
|
||||
}
|
||||
}
|
||||
|
||||
Packet outp(r->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER);
|
||||
Packet outp(p->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER);
|
||||
outp.append(nwid);
|
||||
outp.append((uint8_t)(com ? 0x01 : 0x00));
|
||||
mg.mac().appendTo(outp);
|
||||
@ -257,8 +262,7 @@ void Multicaster::send(
|
||||
outp.append((uint32_t)gatherLimit);
|
||||
if (com)
|
||||
com->serialize(outp);
|
||||
outp.armor(r->key(),true);
|
||||
r->send(RR,outp.data(),outp.size(),now);
|
||||
RR->sw->send(outp,true,0);
|
||||
}
|
||||
gatherLimit = 0;
|
||||
}
|
||||
|
@ -1265,7 +1265,7 @@ public:
|
||||
_phy.setIp4UdpTtl(_v4UpnpUdpSocket,ttl);
|
||||
const int result = ((_phy.udpSend(_v4UpnpUdpSocket,(const struct sockaddr *)addr,data,len) != 0) ? 0 : -1);
|
||||
if (ttl)
|
||||
_phy.setIp4UdlTtl(_v4UpnpUdpSocket,255);
|
||||
_phy.setIp4UdpTtl(_v4UpnpUdpSocket,255);
|
||||
return result;
|
||||
} else {
|
||||
return -1;
|
||||
|
Loading…
Reference in New Issue
Block a user