From 62da7e67b64712fc5cfce771ff944057abff705b Mon Sep 17 00:00:00 2001 From: Adam Ierymenko Date: Fri, 3 Oct 2014 22:30:10 -0700 Subject: [PATCH] Add some rate limiting to implicit gathers, and always send multicasts to at least one supernode so they can broadcast to old peers (temporary). --- node/Constants.hpp | 7 ++++++- node/Multicaster.cpp | 33 +++++++++++++++++++++++++++++++-- node/Multicaster.hpp | 5 +++-- 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/node/Constants.hpp b/node/Constants.hpp index 55cc4777a..cae9fb0d8 100644 --- a/node/Constants.hpp +++ b/node/Constants.hpp @@ -240,7 +240,12 @@ /** * Delay between explicit MULTICAST_GATHER requests for a given multicast channel */ -#define ZT_MULTICAST_GATHER_DELAY (ZT_MULTICAST_LIKE_EXPIRE / 10) +#define ZT_MULTICAST_EXPLICIT_GATHER_DELAY (ZT_MULTICAST_LIKE_EXPIRE / 10) + +/** + * Minimum delay between implicit gathers via MULTICAST_FRAME + */ +#define ZT_MULTICAST_IMPLICIT_GATHER_DELAY 30000 /** * Timeout for outgoing multicasts diff --git a/node/Multicaster.cpp b/node/Multicaster.cpp index 6f3c43951..16c6304a8 100644 --- a/node/Multicaster.cpp +++ b/node/Multicaster.cpp @@ -124,6 +124,14 @@ void Multicaster::send( Mutex::Lock _l(_groups_m); MulticastGroupStatus &gs = _groups[std::pair(nwid,mg)]; + // TODO / DEPRECATED: + // Right now we also send all multicasts to at least one supernode. + // This supernode then relays them via the old multicast message + // type to pre 1.0.0 peers. We'll keep doing this until there aren't + // any of these on the network. Costs a bit of bandwidth, but maintains + // backward compability while people upgrade. Then this code can die. + bool gotASupernode = false; + if (gs.members.size() >= limit) { // If we already have enough members, just send and we're done -- no need for TX queue OutboundMulticast out; @@ -144,13 +152,21 @@ void Multicaster::send( unsigned int count = 0; for(std::vector::const_reverse_iterator m(gs.members.rbegin());m!=gs.members.rend();++m) { out.sendOnly(*(RR->sw),m->address); // sendOnly() avoids overhead of creating sent log since we're going to discard this immediately + if (RR->topology->isSupernode(m->address)) + gotASupernode = true; if (++count >= limit) break; } + + if (!gotASupernode) { + SharedPtr sn(RR->topology->getBestSupernode()); + if (sn) + out.sendOnly(*(RR->sw),sn->address()); + } } else { unsigned int gatherLimit = (limit - (unsigned int)gs.members.size()) + 1; - if ((now - gs.lastExplicitGather) >= ZT_MULTICAST_GATHER_DELAY) { + if ((now - gs.lastExplicitGather) >= ZT_MULTICAST_EXPLICIT_GATHER_DELAY) { gs.lastExplicitGather = now; // TODO / INPROGRESS: right now supernodes track multicast LIKEs, a relic @@ -173,6 +189,10 @@ void Multicaster::send( gatherLimit = 0; // once we've done this we don't need to do it implicitly } + if ((gatherLimit > 0)&&((now - gs.lastImplicitGather) > ZT_MULTICAST_IMPLICIT_GATHER_DELAY)) + gs.lastImplicitGather = now; + else gatherLimit = 0; + gs.txQueue.push_back(OutboundMulticast()); OutboundMulticast &out = gs.txQueue.back(); @@ -189,8 +209,17 @@ void Multicaster::send( data, len); - for(std::vector::const_reverse_iterator m(gs.members.rbegin());m!=gs.members.rend();++m) + for(std::vector::const_reverse_iterator m(gs.members.rbegin());m!=gs.members.rend();++m) { out.sendAndLog(*(RR->sw),m->address); + if (RR->topology->isSupernode(m->address)) + gotASupernode = true; + } + + if (!gotASupernode) { + SharedPtr sn(RR->topology->getBestSupernode()); + if (sn) + out.sendAndLog(*(RR->sw),sn->address()); + } } } diff --git a/node/Multicaster.hpp b/node/Multicaster.hpp index 1fba5f64b..6c117a10e 100644 --- a/node/Multicaster.hpp +++ b/node/Multicaster.hpp @@ -72,9 +72,10 @@ private: struct MulticastGroupStatus { - MulticastGroupStatus() : lastExplicitGather(0) {} + MulticastGroupStatus() : lastExplicitGather(0),lastImplicitGather(0) {} - uint64_t lastExplicitGather; // time we last gathered members explicitly + uint64_t lastExplicitGather; + uint64_t lastImplicitGather; std::list txQueue; // pending outbound multicasts std::vector members; // members of this group };