From 451b8aa7b293be0bdd62a0cc1c292bfe5b3de751 Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Tue, 10 Nov 2015 17:37:38 -0800 Subject: [PATCH] Query both root and network controller for multicast last resort GATHER. --- node/Cluster.cpp | 27 --------------------------- node/Cluster.hpp | 27 +++++++++++++++++++++++++++ node/Multicaster.cpp | 18 +++++++++++------- service/OneService.cpp | 2 +- 4 files changed, 39 insertions(+), 35 deletions(-) diff --git a/node/Cluster.cpp b/node/Cluster.cpp index 0a7de93d4..af9ab8543 100644 --- a/node/Cluster.cpp +++ b/node/Cluster.cpp @@ -55,33 +55,6 @@ #include "Node.hpp" #include "Array.hpp" -/** - * Chunk size for allocating queue entries - * - * Queue entries are allocated in chunks of this many and are added to a pool. - * ZT_CLUSTER_MAX_QUEUE_GLOBAL must be evenly divisible by this. - */ -#define ZT_CLUSTER_QUEUE_CHUNK_SIZE 32 - -/** - * Maximum number of chunks to ever allocate - * - * This is a global sanity limit to prevent resource exhaustion attacks. It - * works out to about 600mb of RAM. You'll never see this on a normal edge - * node. We're unlikely to see this on a root server unless someone is DOSing - * us. In that case cluster relaying will be affected but other functions - * should continue to operate normally. - */ -#define ZT_CLUSTER_MAX_QUEUE_CHUNKS 8194 - -/** - * Max data per queue entry - * - * If we ever support larger transport MTUs this must be increased. The plus - * 16 is just a small margin and has no special meaning. - */ -#define ZT_CLUSTER_SEND_QUEUE_DATA_MAX (ZT_UDP_DEFAULT_PAYLOAD_MTU + 16) - namespace ZeroTier { static inline double _dist3d(int x1,int y1,int z1,int x2,int y2,int z2) diff --git a/node/Cluster.hpp b/node/Cluster.hpp index 87452b6f3..ccf0c12a4 100644 --- a/node/Cluster.hpp +++ b/node/Cluster.hpp @@ -73,6 +73,33 @@ */ #define ZT_CLUSTER_QUEUE_EXPIRATION 5000 +/** + * Chunk size for allocating queue entries + * + * Queue entries are allocated in chunks of this many and are added to a pool. + * ZT_CLUSTER_MAX_QUEUE_GLOBAL must be evenly divisible by this. + */ +#define ZT_CLUSTER_QUEUE_CHUNK_SIZE 32 + +/** + * Maximum number of chunks to ever allocate + * + * This is a global sanity limit to prevent resource exhaustion attacks. It + * works out to about 600mb of RAM. You'll never see this on a normal edge + * node. We're unlikely to see this on a root server unless someone is DOSing + * us. In that case cluster relaying will be affected but other functions + * should continue to operate normally. + */ +#define ZT_CLUSTER_MAX_QUEUE_CHUNKS 8194 + +/** + * Max data per queue entry + * + * If we ever support larger transport MTUs this must be increased. The plus + * 16 is just a small margin and has no special meaning. + */ +#define ZT_CLUSTER_SEND_QUEUE_DATA_MAX (ZT_UDP_DEFAULT_PAYLOAD_MTU + 16) + namespace ZeroTier { class RuntimeEnvironment; diff --git a/node/Multicaster.cpp b/node/Multicaster.cpp index 41838552d..fa9487eff 100644 --- a/node/Multicaster.cpp +++ b/node/Multicaster.cpp @@ -235,21 +235,26 @@ void Multicaster::send( if ((gs.members.empty())||((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY)) { gs.lastExplicitGather = now; - SharedPtr r(RR->topology->getBestRoot()); - if (r) { - TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str()); + SharedPtr explicitGatherPeers[2]; + explicitGatherPeers[0] = RR->topology->getBestRoot(); + explicitGatherPeers[1] = RR->topology->getPeer(Network::controllerFor(nwid)); + for(unsigned int k=0;k<2;++k) { + const SharedPtr &p = explicitGatherPeers[k]; + if (!p) + continue; + //TRACE(">>MC upstream GATHER up to %u for group %.16llx/%s",gatherLimit,nwid,mg.toString().c_str()); const CertificateOfMembership *com = (CertificateOfMembership *)0; { SharedPtr nw(RR->node->network(nwid)); if (nw) { SharedPtr nconf(nw->config2()); - if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(r->needsOurNetworkMembershipCertificate(nwid,now,true))) + if ((nconf)&&(nconf->com())&&(nconf->isPrivate())&&(p->needsOurNetworkMembershipCertificate(nwid,now,true))) com = &(nconf->com()); } } - Packet outp(r->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER); + Packet outp(p->address(),RR->identity.address(),Packet::VERB_MULTICAST_GATHER); outp.append(nwid); outp.append((uint8_t)(com ? 0x01 : 0x00)); mg.mac().appendTo(outp); @@ -257,8 +262,7 @@ void Multicaster::send( outp.append((uint32_t)gatherLimit); if (com) com->serialize(outp); - outp.armor(r->key(),true); - r->send(RR,outp.data(),outp.size(),now); + RR->sw->send(outp,true,0); } gatherLimit = 0; } diff --git a/service/OneService.cpp b/service/OneService.cpp index 44926a94e..87f4136ca 100644 --- a/service/OneService.cpp +++ b/service/OneService.cpp @@ -1265,7 +1265,7 @@ public: _phy.setIp4UdpTtl(_v4UpnpUdpSocket,ttl); const int result = ((_phy.udpSend(_v4UpnpUdpSocket,(const struct sockaddr *)addr,data,len) != 0) ? 0 : -1); if (ttl) - _phy.setIp4UdlTtl(_v4UpnpUdpSocket,255); + _phy.setIp4UdpTtl(_v4UpnpUdpSocket,255); return result; } else { return -1;