diff --git a/node/Peer.hpp b/node/Peer.hpp index 482c0a828..0988561a8 100644 --- a/node/Peer.hpp +++ b/node/Peer.hpp @@ -53,6 +53,10 @@ #include "Mutex.hpp" #include "NonCopyable.hpp" +// Very rough computed estimate: (8 + 256 + 80 + (16 * 64) + (128 * 256) + (128 * 16)) +// 1048576 provides tons of headroom -- overflow would just cause peer not to be persisted +#define ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE 1048576 + namespace ZeroTier { /** @@ -450,7 +454,7 @@ public: { Mutex::Lock _l(_lock); - const unsigned int lengthAt = b.size(); + const unsigned int atPos = b.size(); b.addSize(4); // space for uint32_t field length b.append((uint32_t)1); // version of serialized Peer data @@ -498,7 +502,7 @@ public: } } - b.setAt(lengthAt,(uint32_t)((b.size() - 4) - lengthAt)); // set size, not including size field itself + b.setAt(atPos,(uint32_t)(b.size() - atPos)); // set size } /** @@ -512,9 +516,10 @@ public: template static inline SharedPtr deserializeNew(const Identity &myIdentity,const Buffer &b,unsigned int &p) { - const uint32_t recSize = b.template at(p); p += 4; + const uint32_t recSize = b.template at(p); if ((p + recSize) > b.size()) return SharedPtr(); // size invalid + p += 4; if (b.template at(p) != 1) return SharedPtr(); // version mismatch p += 4; @@ -540,7 +545,7 @@ public: np->_vRevision = b.template at(p); p += 2; np->_latency = b.template at(p); p += 4; - const unsigned int numPaths = b.template at(p); p += 2; + const unsigned int numPaths = b.template at(p); p += 4; for(unsigned int i=0;i_paths[np->_numPaths++].deserialize(b,p); @@ -564,6 +569,8 @@ public: const uint64_t ts = b.template at(p); p += 8; np->_lastPushedComs.set(nwid,ts); } + + return np; } private: diff --git a/node/RemotePath.hpp b/node/RemotePath.hpp index 9a8a3ff8f..d2f999971 100644 --- a/node/RemotePath.hpp +++ b/node/RemotePath.hpp @@ -163,8 +163,8 @@ public: _lastSend = b.template at(p); p += 8; _lastReceived = b.template at(p); p += 8; p += _localAddress.deserialize(b,p); - _flags = b.template at(p); p += 4; - return (startAt - p); + _flags = b.template at(p); p += 2; + return (p - startAt); } protected: diff --git a/node/Topology.cpp b/node/Topology.cpp index b2b4ebbda..65abfc6f0 100644 --- a/node/Topology.cpp +++ b/node/Topology.cpp @@ -31,6 +31,7 @@ #include "Defaults.hpp" #include "Dictionary.hpp" #include "Node.hpp" +#include "Buffer.hpp" namespace ZeroTier { @@ -38,10 +39,68 @@ Topology::Topology(const RuntimeEnvironment *renv) : RR(renv), _amRoot(false) { + std::string alls(RR->node->dataStoreGet("peers.save")); + const uint8_t *all = reinterpret_cast(alls.data()); + RR->node->dataStoreDelete("peers.save"); + + unsigned int ptr = 0; + while ((ptr + 4) < alls.size()) { + // Each Peer serializes itself prefixed by a record length (not including the size of the length itself) + unsigned int reclen = (unsigned int)all[ptr] & 0xff; + reclen <<= 8; + reclen |= (unsigned int)all[ptr + 1] & 0xff; + reclen <<= 8; + reclen |= (unsigned int)all[ptr + 2] & 0xff; + reclen <<= 8; + reclen |= (unsigned int)all[ptr + 3] & 0xff; + + if (((ptr + reclen) > alls.size())||(reclen > ZT_PEER_SUGGESTED_SERIALIZATION_BUFFER_SIZE)) + break; + + try { + unsigned int pos = 0; + SharedPtr p(Peer::deserializeNew(RR->identity,Buffer(all + ptr,reclen),pos)); + if (pos != reclen) + break; + ptr += pos; + if ((p)&&(p->address() != RR->identity.address())) { + _peers[p->address()] = p; + } else { + break; // stop if invalid records + } + } catch (std::exception &exc) { + break; + } catch ( ... ) { + break; // stop if invalid records + } + } + + clean(RR->node->now()); } Topology::~Topology() { + Buffer pbuf; + std::string all; + + Address *a = (Address *)0; + SharedPtr *p = (SharedPtr *)0; + Hashtable< Address,SharedPtr >::Iterator i(_peers); + while (i.next(a,p)) { + if (std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end()) { + pbuf.clear(); + try { + (*p)->serialize(pbuf); + try { + all.append((const char *)pbuf.data(),pbuf.size()); + } catch ( ... ) { + return; // out of memory? just skip + } + } catch ( ... ) {} // peer too big? shouldn't happen, but it so skip + } + } + + RR->node->dataStorePut("peers.save",all,true); } void Topology::setRootServers(const std::map< Identity,std::vector > &sn) @@ -58,7 +117,7 @@ void Topology::setRootServers(const std::map< Identity,std::vector for(std::map< Identity,std::vector >::const_iterator i(sn.begin());i!=sn.end();++i) { if (i->first != RR->identity) { // do not add self as a peer - SharedPtr &p = _activePeers[i->first.address()]; + SharedPtr &p = _peers[i->first.address()]; if (!p) p = SharedPtr(new Peer(RR->identity,i->first)); for(std::vector::const_iterator j(i->second.begin());j!=i->second.end();++j) @@ -103,7 +162,7 @@ SharedPtr Topology::addPeer(const SharedPtr &peer) const uint64_t now = RR->node->now(); Mutex::Lock _l(_lock); - SharedPtr &p = _activePeers.set(peer->address(),peer); + SharedPtr &p = _peers.set(peer->address(),peer); p->use(now); _saveIdentity(p->identity()); @@ -120,7 +179,7 @@ SharedPtr Topology::getPeer(const Address &zta) const uint64_t now = RR->node->now(); Mutex::Lock _l(_lock); - SharedPtr &ap = _activePeers[zta]; + SharedPtr &ap = _peers[zta]; if (ap) { ap->use(now); @@ -136,7 +195,7 @@ SharedPtr Topology::getPeer(const Address &zta) } catch ( ... ) {} // invalid identity? } - _activePeers.erase(zta); + _peers.erase(zta); return SharedPtr(); } @@ -160,7 +219,7 @@ SharedPtr Topology::getBestRoot(const Address *avoid,unsigned int avoidCou if (++sna == _rootAddresses.end()) sna = _rootAddresses.begin(); // wrap around at end if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order - SharedPtr *p = _activePeers.get(*sna); + SharedPtr *p = _peers.get(*sna); if ((p)&&((*p)->hasActiveDirectPath(now))) { bestRoot = *p; break; @@ -249,12 +308,12 @@ bool Topology::isRoot(const Identity &id) const void Topology::clean(uint64_t now) { Mutex::Lock _l(_lock); - Hashtable< Address,SharedPtr >::Iterator i(_activePeers); + Hashtable< Address,SharedPtr >::Iterator i(_peers); Address *a = (Address *)0; SharedPtr *p = (SharedPtr *)0; while (i.next(a,p)) { if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) { - _activePeers.erase(*a); + _peers.erase(*a); } else { (*p)->clean(RR,now); } diff --git a/node/Topology.hpp b/node/Topology.hpp index 3066b50c9..4df545e1f 100644 --- a/node/Topology.hpp +++ b/node/Topology.hpp @@ -164,7 +164,7 @@ public: inline void eachPeer(F f) { Mutex::Lock _l(_lock); - Hashtable< Address,SharedPtr >::Iterator i(_activePeers); + Hashtable< Address,SharedPtr >::Iterator i(_peers); Address *a = (Address *)0; SharedPtr *p = (SharedPtr *)0; while (i.next(a,p)) @@ -177,7 +177,7 @@ public: inline std::vector< std::pair< Address,SharedPtr > > allPeers() const { Mutex::Lock _l(_lock); - return _activePeers.entries(); + return _peers.entries(); } /** @@ -194,7 +194,7 @@ private: const RuntimeEnvironment *RR; - Hashtable< Address,SharedPtr > _activePeers; + Hashtable< Address,SharedPtr > _peers; std::map< Identity,std::vector > _roots; std::vector< Address > _rootAddresses; std::vector< SharedPtr > _rootPeers;