Query both root and network controller for multicast last resort GATHER.

This commit is contained in:
Adam Ierymenko 2015-11-10 17:37:38 -08:00
parent 53731acf51
commit 451b8aa7b2
4 changed files with 39 additions and 35 deletions

View File

@ -55,33 +55,6 @@
#include "Node.hpp" #include "Node.hpp"
#include "Array.hpp" #include "Array.hpp"
/**
* Chunk size for allocating queue entries
*
* Queue entries are allocated in chunks of this many and are added to a pool.
* ZT_CLUSTER_MAX_QUEUE_GLOBAL must be evenly divisible by this.
*/
#define ZT_CLUSTER_QUEUE_CHUNK_SIZE 32
/**
* Maximum number of chunks to ever allocate
*
* This is a global sanity limit to prevent resource exhaustion attacks. It
* works out to about 600mb of RAM. You'll never see this on a normal edge
* node. We're unlikely to see this on a root server unless someone is DOSing
* us. In that case cluster relaying will be affected but other functions
* should continue to operate normally.
*/
#define ZT_CLUSTER_MAX_QUEUE_CHUNKS 8194
/**
* Max data per queue entry
*
* If we ever support larger transport MTUs this must be increased. The plus
* 16 is just a small margin and has no special meaning.
*/
#define ZT_CLUSTER_SEND_QUEUE_DATA_MAX (ZT_UDP_DEFAULT_PAYLOAD_MTU + 16)
namespace ZeroTier { namespace ZeroTier {
static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2) static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2)

View File

@ -73,6 +73,33 @@
*/ */
#define ZT_CLUSTER_QUEUE_EXPIRATION 5000 #define ZT_CLUSTER_QUEUE_EXPIRATION 5000
/**
* Chunk size for allocating queue entries
*
* Queue entries are allocated in chunks of this many and are added to a pool.
* ZT_CLUSTER_MAX_QUEUE_GLOBAL must be evenly divisible by this.
*/
#define ZT_CLUSTER_QUEUE_CHUNK_SIZE 32
/**
* Maximum number of chunks to ever allocate
*
* This is a global sanity limit to prevent resource exhaustion attacks. It
* works out to about 600mb of RAM. You'll never see this on a normal edge
* node. We're unlikely to see this on a root server unless someone is DOSing
* us. In that case cluster relaying will be affected but other functions
* should continue to operate normally.
*/
#define ZT_CLUSTER_MAX_QUEUE_CHUNKS 8194
/**
* Max data per queue entry
*
* If we ever support larger transport MTUs this must be increased. The plus
* 16 is just a small margin and has no special meaning.
*/
#define ZT_CLUSTER_SEND_QUEUE_DATA_MAX (ZT_UDP_DEFAULT_PAYLOAD_MTU + 16)
namespace ZeroTier { namespace ZeroTier {
class RuntimeEnvironment; class RuntimeEnvironment;

View File

@ -235,21 +235,26 @@ void Multicaster::send(
if ((gs.members.empty())||((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY)) { if ((gs.members.empty())||((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY)) {
gs.lastExplicitGather = now; gs.lastExplicitGather = now;
SharedPtr<Peer> r(RR->topology->getBestRoot()); SharedPtr<Peer> explicitGatherPeers[2];
if (r) { explicitGatherPeers[0] = RR->topology->getBestRoot();
TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str()); explicitGatherPeers[1] = RR->topology->getPeer(Network::controllerFor(nwid));
for(unsigned int k=0;k<2;++k) {
const SharedPtr<Peer> &p = explicitGatherPeers[k];
if (!p)
continue;
//TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str());
const CertificateOfMembership *com = (CertificateOfMembership *)0; const CertificateOfMembership *com = (CertificateOfMembership *)0;
{ {
SharedPtr<Network> nw(RR->node->network(nwid)); SharedPtr<Network> nw(RR->node->network(nwid));
if (nw) { if (nw) {
SharedPtr<NetworkConfig> nconf(nw->config2()); SharedPtr<NetworkConfig> nconf(nw->config2());
if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(r->needsOurNetworkMembershipCertificate(nwid,now,true))) if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(p->needsOurNetworkMembershipCertificate(nwid,now,true)))
com = &(nconf->com()); com = &(nconf->com());
} }
} }
Packet outp(r->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER); Packet outp(p->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER);
outp.append(nwid); outp.append(nwid);
outp.append((uint8_t)(com ? 0x01 : 0x00)); outp.append((uint8_t)(com ? 0x01 : 0x00));
mg.mac().appendTo(outp); mg.mac().appendTo(outp);
@ -257,8 +262,7 @@ void Multicaster::send(
outp.append((uint32_t)gatherLimit); outp.append((uint32_t)gatherLimit);
if (com) if (com)
com->serialize(outp); com->serialize(outp);
outp.armor(r->key(),true); RR->sw->send(outp,true,0);
r->send(RR,outp.data(),outp.size(),now);
} }
gatherLimit = 0; gatherLimit = 0;
} }

View File

@ -1265,7 +1265,7 @@ public:
_phy.setIp4UdpTtl(_v4UpnpUdpSocket,ttl); _phy.setIp4UdpTtl(_v4UpnpUdpSocket,ttl);
const int result = ((_phy.udpSend(_v4UpnpUdpSocket,(const struct sockaddr *)addr,data,len) != 0) ? 0 : -1); const int result = ((_phy.udpSend(_v4UpnpUdpSocket,(const struct sockaddr *)addr,data,len) != 0) ? 0 : -1);
if (ttl) if (ttl)
_phy.setIp4UdlTtl(_v4UpnpUdpSocket,255); _phy.setIp4UdpTtl(_v4UpnpUdpSocket,255);
return result; return result;
} else { } else {
return -1; return -1;