Fixes for lock ordering -- GitHub issue #573

This commit is contained in:
Adam Ierymenko 2018-01-11 14:29:57 -08:00
parent 0574a70fac
commit f87326fc21
4 changed files with 26 additions and 7 deletions

View File

@ -38,7 +38,8 @@
namespace ZeroTier {
#if defined(__GNUC__) && (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
//#if defined(__GNUC__) && (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
#if 0
// Inline ticket lock on x64 systems with GCC and CLANG (Mac, Linux) -- this is really fast as long as locking durations are very short
class Mutex : NonCopyable

View File

@ -284,7 +284,7 @@ private:
Address _remoteTraceTarget;
enum Trace::Level _remoteTraceLevel;
int64_t _now;
volatile int64_t _now;
int64_t _lastPingCheck;
int64_t _lastHousekeepingRun;
int64_t _lastMemoizedTraceSettings;

View File

@ -162,8 +162,8 @@ SharedPtr<Peer> Topology::getUpstreamPeer()
unsigned int bestq = ~((unsigned int)0);
const SharedPtr<Peer> *best = (const SharedPtr<Peer> *)0;
Mutex::Lock _l1(_peers_m);
Mutex::Lock _l2(_upstreams_m);
Mutex::Lock _l2(_peers_m);
Mutex::Lock _l1(_upstreams_m);
for(std::vector<Address>::const_iterator a(_upstreamAddresses.begin());a!=_upstreamAddresses.end();++a) {
const SharedPtr<Peer> *p = _peers.get(*a);
@ -252,8 +252,8 @@ bool Topology::addWorld(void *tPtr,const World &newWorld,bool alwaysAcceptNew)
if ((newWorld.type() != World::TYPE_PLANET)&&(newWorld.type() != World::TYPE_MOON))
return false;
Mutex::Lock _l1(_upstreams_m);
Mutex::Lock _l2(_peers_m);
Mutex::Lock _l1(_upstreams_m);
World *existing = (World *)0;
switch(newWorld.type()) {
@ -341,8 +341,8 @@ void Topology::addMoon(void *tPtr,const uint64_t id,const Address &seed)
void Topology::removeMoon(void *tPtr,const uint64_t id)
{
Mutex::Lock _l1(_upstreams_m);
Mutex::Lock _l2(_peers_m);
Mutex::Lock _l1(_upstreams_m);
std::vector<World> nm;
for(std::vector<World>::const_iterator m(_moons.begin());m!=_moons.end();++m) {

View File

@ -825,7 +825,24 @@ public:
// Sync multicast group memberships
if ((now - lastTapMulticastGroupCheck) >= ZT_TAP_CHECK_MULTICAST_INTERVAL) {
lastTapMulticastGroupCheck = now;
Mutex::Lock _l(_nets_m);
std::vector< std::pair< uint64_t,std::pair< std::vector<MulticastGroup>,std::vector<MulticastGroup> > > > mgChanges;
{
Mutex::Lock _l(_nets_m);
mgChanges.reserve(_nets.size() + 1);
for(std::map<uint64_t,NetworkState>::const_iterator n(_nets.begin());n!=_nets.end();++n) {
if (n->second.tap) {
mgChanges.push_back(std::pair< uint64_t,std::pair< std::vector<MulticastGroup>,std::vector<MulticastGroup> > >(n->first,std::pair< std::vector<MulticastGroup>,std::vector<MulticastGroup> >()));
n->second.tap->scanMulticastGroups(mgChanges.back().second.first,mgChanges.back().second.second);
}
}
}
for(std::vector< std::pair< uint64_t,std::pair< std::vector<MulticastGroup>,std::vector<MulticastGroup> > > >::iterator c(mgChanges.begin());c!=mgChanges.end();++c) {
for(std::vector<MulticastGroup>::iterator m(c->second.first.begin());m!=c->second.first.end();++m)
_node->multicastSubscribe((void *)0,c->first,m->mac().toInt(),m->adi());
for(std::vector<MulticastGroup>::iterator m(c->second.second.begin());m!=c->second.second.end();++m)
_node->multicastUnsubscribe(c->first,m->mac().toInt(),m->adi());
}
/*
for(std::map<uint64_t,NetworkState>::const_iterator n(_nets.begin());n!=_nets.end();++n) {
if (n->second.tap) {
std::vector<MulticastGroup> added,removed;
@ -836,6 +853,7 @@ public:
_node->multicastUnsubscribe(n->first,m->mac().toInt(),m->adi());
}
}
*/
}
// Sync information about physical network interfaces