/* * ZeroTier One - Global Peer to Peer Ethernet * Copyright (C) 2012-2013 ZeroTier Networks LLC * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * -- * * ZeroTier may be used and distributed under the terms of the GPLv3, which * are available at: http://www.gnu.org/licenses/gpl-3.0.html * * If you would like to embed ZeroTier into a commercial application or * redistribute it in a modified binary form, please contact ZeroTier Networks * LLC. Start here: http://www.zerotier.com/ */ #ifndef _ZT_TOPOLOGY_HPP #define _ZT_TOPOLOGY_HPP #include #include #include #include #include #include #include #include "Address.hpp" #include "Peer.hpp" #include "Mutex.hpp" #include "Condition.hpp" #include "InetAddress.hpp" #include "Constants.hpp" #include "Thread.hpp" #include "MulticastGroup.hpp" #include "Utils.hpp" #include "../ext/kissdb/kissdb.h" namespace ZeroTier { class RuntimeEnvironment; /** * Database of network topology */ class Topology : protected Thread { public: /** * Result of peer add/verify */ enum PeerVerifyResult { PEER_VERIFY_ACCEPTED_NEW, /* new peer */ PEER_VERIFY_ACCEPTED_ALREADY_HAVE, /* we already knew ye */ PEER_VERIFY_ACCEPTED_DISPLACED_INVALID_ADDRESS, /* you booted out an impostor */ PEER_VERIFY_REJECTED_INVALID_IDENTITY, /* identity is invalid or validation failed */ PEER_VERIFY_REJECTED_DUPLICATE, /* someone equally valid already has your address */ PEER_VERIFY_REJECTED_DUPLICATE_TRIAGED /* you look duplicate and I'm too busy to deep verify */ }; Topology(const RuntimeEnvironment *renv,const char *dbpath) throw(std::runtime_error); virtual ~Topology(); /** * Set up supernodes for this network * * @param sn Supernodes for this network */ void setSupernodes(const std::map< Identity,std::vector > &sn); /** * Add a peer to this network * * Verification and adding actually occurs in the background, since in * rare cases it can be somewhat CPU-intensive. The callback will be * called (from the background thread) when add is complete. * * The peer given to the callback may not be the same object provided * as a candidate if the candidate was an exact duplicate of a peer we * already have. * * @param candidate New candidate peer to be added * @param callback Callback to call when peer verification is complete * @param arg First argument to callback * @return Verification result or PEER_VERIFY__IN_PROGRESS if occurring in background */ void addPeer(const SharedPtr &candidate,void (*callback)(void *,const SharedPtr &,PeerVerifyResult),void *arg); /** * Get a peer from its address * * @param zta ZeroTier address of peer * @return Peer or NULL if not found */ SharedPtr getPeer(const Address &zta); /** * @return Current network supernodes */ inline std::map< Identity,std::vector > supernodes() const { Mutex::Lock _l(_supernodes_m); return _supernodes; } /** * @return Vector of peers that are supernodes */ inline std::vector< SharedPtr > supernodePeers() const { Mutex::Lock _l(_supernodes_m); return _supernodePeers; } /** * Get the current favorite supernode * * @return Supernode with lowest latency or NULL if none */ inline SharedPtr getBestSupernode() const { return getBestSupernode((const Address *)0,0); } /** * Get the best supernode, avoiding supernodes listed in an array * * This will get the best supernode (lowest latency, etc.) but will * try to avoid the listed supernodes, only using them if no others * are available. * * @param avoid Nodes to avoid * @param avoidCount Number of nodes to avoid * @return Supernode or NULL if none */ SharedPtr getBestSupernode(const Address *avoid,unsigned int avoidCount) const; /** * @param zta ZeroTier address * @return True if this is a designated supernode */ inline bool isSupernode(const Address &zta) const throw() { Mutex::Lock _l(_supernodes_m); return (_supernodeAddresses.count(zta) > 0); } /** * Clean and flush database now (runs in the background) */ void clean(); /** * Apply a function or function object to all peers * * @param f Function to apply * @tparam F Function or function object type */ template inline void eachPeer(F f) { Mutex::Lock _l(_activePeers_m); for(std::map< Address,SharedPtr >::const_iterator p(_activePeers.begin());p!=_activePeers.end();++p) f(*this,p->second); } /** * Function object to collect peers that need a firewall opener sent */ class CollectPeersThatNeedFirewallOpener { public: CollectPeersThatNeedFirewallOpener(std::vector< SharedPtr > &v) : _now(Utils::now()), _v(v) { } inline void operator()(Topology &t,const SharedPtr &p) { if ((p->hasDirectPath())&&((_now - p->lastFirewallOpener()) >= ZT_FIREWALL_OPENER_DELAY)) _v.push_back(p); } private: uint64_t _now; std::vector< SharedPtr > &_v; }; /** * Function object to collect peers that need a ping sent */ class CollectPeersThatNeedPing { public: CollectPeersThatNeedPing(std::vector< SharedPtr > &v) : _now(Utils::now()), _v(v) { } inline void operator()(Topology &t,const SharedPtr &p) { if (((p->hasActiveDirectPath(_now))||(t.isSupernode(p->address())))&&((_now - p->lastDirectSend()) >= ZT_PEER_DIRECT_PING_DELAY)) _v.push_back(p); } private: uint64_t _now; std::vector< SharedPtr > &_v; }; /** * Function object to collect peers with active links (and supernodes) */ class CollectPeersWithActiveDirectPath { public: CollectPeersWithActiveDirectPath(std::vector< SharedPtr > &v) : _now(Utils::now()), _v(v) { } inline void operator()(Topology &t,const SharedPtr &p) { if ((p->hasActiveDirectPath(_now))||(t.isSupernode(p->address()))) _v.push_back(p); } private: uint64_t _now; std::vector< SharedPtr > &_v; }; /** * Function object to collect peers with any known direct path */ class CollectPeersWithDirectPath { public: CollectPeersWithDirectPath(std::vector< SharedPtr > &v) : _v(v) { } inline void operator()(Topology &t,const SharedPtr &p) { if (p->hasDirectPath()) _v.push_back(p); } private: std::vector< SharedPtr > &_v; }; /** * Dump peer I/O statistics to an open FILE (for status reporting and debug) */ class DumpPeerStatistics { public: DumpPeerStatistics(FILE *out) : _out(out), _now(Utils::now()) { fprintf(_out,"Peer Direct IPv4 Direct IPv6 Latency(ms)"ZT_EOL_S); } inline void operator()(Topology &t,const SharedPtr &p) { InetAddress v4(p->ipv4ActivePath(_now)); InetAddress v6(p->ipv6ActivePath(_now)); fprintf(_out,"%-10s %-21s %-51s %u"ZT_EOL_S, p->address().toString().c_str(), ((v4) ? v4.toString().c_str() : "(none)"), ((v6) ? v6.toString().c_str() : "(none)"), p->latency()); } private: FILE *_out; uint64_t _now; }; protected: virtual void main() throw(); private: void _reallyAddPeer(const SharedPtr &p); // A job for the background deep verify thread (also does cache cleaning, flushing, etc.) struct _PeerDeepVerifyJob { void (*callback)(void *,const SharedPtr &,Topology::PeerVerifyResult); void *arg; SharedPtr candidate; enum { VERIFY_PEER, CLEAN_CACHE, EXIT_THREAD } type; }; const RuntimeEnvironment *const _r; std::map< Address,SharedPtr > _activePeers; Mutex _activePeers_m; std::list< _PeerDeepVerifyJob > _peerDeepVerifyJobs; Mutex _peerDeepVerifyJobs_m; Condition _peerDeepVerifyJobs_c; std::map< Identity,std::vector > _supernodes; std::set< Address > _supernodeAddresses; std::vector< SharedPtr > _supernodePeers; Mutex _supernodes_m; KISSDB _dbm; Mutex _dbm_m; }; } // namespace ZeroTier #endif