2015-04-07 17:57:59 +00:00
/*
* ZeroTier One - Network Virtualization Everywhere
2016-01-12 22:04:55 +00:00
* Copyright ( C ) 2011 - 2016 ZeroTier , Inc . https : //www.zerotier.com/
2015-04-07 17:57:59 +00:00
*
* This program is free software : you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation , either version 3 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2015-04-07 18:56:10 +00:00
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
2016-02-11 02:41:39 +00:00
# include <set>
# include <vector>
2015-04-07 17:57:59 +00:00
# include "Constants.hpp"
# include "SelfAwareness.hpp"
# include "RuntimeEnvironment.hpp"
# include "Node.hpp"
# include "Topology.hpp"
# include "Packet.hpp"
# include "Peer.hpp"
2015-10-28 16:32:07 +00:00
# include "Switch.hpp"
2015-04-07 17:57:59 +00:00
2015-05-05 01:34:30 +00:00
// Entry timeout -- make it fairly long since this is just to prevent stale buildup
# define ZT_SELFAWARENESS_ENTRY_TIMEOUT 3600000
2015-04-07 17:57:59 +00:00
namespace ZeroTier {
2015-04-07 18:56:10 +00:00
class _ResetWithinScope
{
public :
2016-01-06 00:41:54 +00:00
_ResetWithinScope ( uint64_t now , InetAddress : : IpScope scope ) :
2015-04-07 18:56:10 +00:00
_now ( now ) ,
_scope ( scope ) { }
2015-05-01 04:09:41 +00:00
inline void operator ( ) ( Topology & t , const SharedPtr < Peer > & p )
{
2016-01-06 00:41:54 +00:00
if ( p - > resetWithinScope ( _scope , _now ) )
2015-05-01 04:09:41 +00:00
peersReset . push_back ( p ) ;
}
std : : vector < SharedPtr < Peer > > peersReset ;
2015-04-07 18:56:10 +00:00
private :
uint64_t _now ;
InetAddress : : IpScope _scope ;
} ;
2015-04-07 17:57:59 +00:00
SelfAwareness : : SelfAwareness ( const RuntimeEnvironment * renv ) :
2015-10-28 16:32:07 +00:00
RR ( renv ) ,
_phy ( 32 )
2015-04-07 17:57:59 +00:00
{
}
SelfAwareness : : ~ SelfAwareness ( )
{
}
2016-02-22 17:47:50 +00:00
void SelfAwareness : : iam ( const Address & reporter , const InetAddress & receivedOnLocalAddress , const InetAddress & reporterPhysicalAddress , const InetAddress & myPhysicalAddress , bool trusted , uint64_t now )
2015-04-07 17:57:59 +00:00
{
2015-05-05 01:34:30 +00:00
const InetAddress : : IpScope scope = myPhysicalAddress . ipScope ( ) ;
2016-02-11 02:41:39 +00:00
if ( ( scope ! = reporterPhysicalAddress . ipScope ( ) ) | | ( scope = = InetAddress : : IP_SCOPE_NONE ) | | ( scope = = InetAddress : : IP_SCOPE_LOOPBACK ) | | ( scope = = InetAddress : : IP_SCOPE_MULTICAST ) )
2015-10-28 16:32:07 +00:00
return ;
2015-05-05 01:34:30 +00:00
Mutex : : Lock _l ( _phy_m ) ;
2016-02-22 17:47:50 +00:00
PhySurfaceEntry & entry = _phy [ PhySurfaceKey ( reporter , receivedOnLocalAddress , reporterPhysicalAddress , scope ) ] ;
2015-05-05 01:34:30 +00:00
2016-02-11 02:41:39 +00:00
if ( ( trusted ) & & ( ( now - entry . ts ) < ZT_SELFAWARENESS_ENTRY_TIMEOUT ) & & ( ! entry . mySurface . ipsEqual ( myPhysicalAddress ) ) ) {
// Changes to external surface reported by trusted peers causes path reset in this scope
2016-08-10 00:00:01 +00:00
TRACE ( " physical address %s for scope %u as seen from %s(%s) differs from %s, resetting paths in scope " , myPhysicalAddress . toString ( ) . c_str ( ) , ( unsigned int ) scope , reporter . toString ( ) . c_str ( ) , reporterPhysicalAddress . toString ( ) . c_str ( ) , entry . mySurface . toString ( ) . c_str ( ) ) ;
2015-05-05 01:34:30 +00:00
entry . mySurface = myPhysicalAddress ;
entry . ts = now ;
2016-08-10 00:00:01 +00:00
entry . trusted = trusted ;
2015-05-05 01:34:30 +00:00
2015-10-28 16:32:07 +00:00
// Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
2015-10-23 20:03:34 +00:00
// due to multiple reports of endpoint change.
// Don't use 'entry' after this since hash table gets modified.
2015-09-04 21:24:31 +00:00
{
Hashtable < PhySurfaceKey , PhySurfaceEntry > : : Iterator i ( _phy ) ;
PhySurfaceKey * k = ( PhySurfaceKey * ) 0 ;
PhySurfaceEntry * e = ( PhySurfaceEntry * ) 0 ;
while ( i . next ( k , e ) ) {
2015-10-23 20:03:34 +00:00
if ( ( k - > reporterPhysicalAddress ! = reporterPhysicalAddress ) & & ( k - > scope = = scope ) )
2015-09-04 21:24:31 +00:00
_phy . erase ( * k ) ;
}
2015-05-05 02:02:36 +00:00
}
2015-10-28 16:32:07 +00:00
// Reset all paths within this scope
2016-01-06 00:41:54 +00:00
_ResetWithinScope rset ( now , ( InetAddress : : IpScope ) scope ) ;
2015-05-05 01:34:30 +00:00
RR - > topology - > eachPeer < _ResetWithinScope & > ( rset ) ;
2015-10-28 16:32:07 +00:00
// Send a NOP to all peers for whom we forgot a path. This will cause direct
// links to be re-established if possible, possibly using a root server or some
// other relay.
for ( std : : vector < SharedPtr < Peer > > : : const_iterator p ( rset . peersReset . begin ( ) ) ; p ! = rset . peersReset . end ( ) ; + + p ) {
2015-11-02 23:38:53 +00:00
if ( ( * p ) - > activelyTransferringFrames ( now ) ) {
2015-10-28 16:32:07 +00:00
Packet outp ( ( * p ) - > address ( ) , RR - > identity . address ( ) , Packet : : VERB_NOP ) ;
2016-08-09 22:45:26 +00:00
RR - > sw - > send ( outp , true ) ;
2015-04-07 18:58:41 +00:00
}
2015-04-07 18:56:10 +00:00
}
2015-05-05 02:02:36 +00:00
} else {
2016-02-11 02:41:39 +00:00
// Otherwise just update DB to use to determine external surface info
2015-10-28 16:32:07 +00:00
entry . mySurface = myPhysicalAddress ;
2015-05-05 02:02:36 +00:00
entry . ts = now ;
2016-08-10 00:00:01 +00:00
entry . trusted = trusted ;
2015-05-05 02:02:36 +00:00
}
2015-04-07 17:57:59 +00:00
}
2015-05-05 01:34:30 +00:00
void SelfAwareness : : clean ( uint64_t now )
{
Mutex : : Lock _l ( _phy_m ) ;
2015-09-04 21:24:31 +00:00
Hashtable < PhySurfaceKey , PhySurfaceEntry > : : Iterator i ( _phy ) ;
PhySurfaceKey * k = ( PhySurfaceKey * ) 0 ;
PhySurfaceEntry * e = ( PhySurfaceEntry * ) 0 ;
while ( i . next ( k , e ) ) {
if ( ( now - e - > ts ) > = ZT_SELFAWARENESS_ENTRY_TIMEOUT )
_phy . erase ( * k ) ;
2015-07-28 18:28:47 +00:00
}
}
2016-02-11 02:41:39 +00:00
std : : vector < InetAddress > SelfAwareness : : getSymmetricNatPredictions ( )
{
2016-02-22 17:47:50 +00:00
/* This is based on ideas and strategies found here:
* https : //tools.ietf.org/html/draft-takeda-symmetric-nat-traversal-00
*
* In short : a great many symmetric NATs allocate ports sequentially .
* This is common on enterprise and carrier grade NATs as well as consumer
* devices . This code generates a list of " you might try this " addresses by
* extrapolating likely port assignments from currently known external
* global IPv4 surfaces . These can then be included in a PUSH_DIRECT_PATHS
* message to another peer , causing it to possibly try these addresses and
* bust our local symmetric NAT . It works often enough to be worth the
* extra bit of code and does no harm in cases where it fails . */
// Gather unique surfaces indexed by local received-on address and flag
// us as behind a symmetric NAT if there is more than one.
std : : map < InetAddress , std : : set < InetAddress > > surfaces ;
bool symmetric = false ;
2016-02-11 02:41:39 +00:00
{
Mutex : : Lock _l ( _phy_m ) ;
2016-08-10 00:00:01 +00:00
2016-02-11 02:41:39 +00:00
Hashtable < PhySurfaceKey , PhySurfaceEntry > : : Iterator i ( _phy ) ;
PhySurfaceKey * k = ( PhySurfaceKey * ) 0 ;
PhySurfaceEntry * e = ( PhySurfaceEntry * ) 0 ;
2016-08-10 00:00:01 +00:00
InetAddress lastTrustedSurface ;
2016-02-11 02:41:39 +00:00
while ( i . next ( k , e ) ) {
if ( ( e - > mySurface . ss_family = = AF_INET ) & & ( e - > mySurface . ipScope ( ) = = InetAddress : : IP_SCOPE_GLOBAL ) ) {
2016-02-22 17:47:50 +00:00
std : : set < InetAddress > & s = surfaces [ k - > receivedOnLocalAddress ] ;
2016-08-10 00:00:01 +00:00
/* MINOR SECURITY FIX:
*
* If the surface was not reported by a trusted ( upstream ) peer , we do
* not use its report of our surface IP for symmetric NAT prediction .
* Otherwise a peer could poison our external surface cache and then
* use this to coax us into suggesting their IP as an endpoint . This
* in turn could allow them to relay traffic for us . They could not
* decrypt or otherwise mess with it , but they could DOS us or record
* meta - data without anything appearing amiss .
*
* So for surfaces reported by untrusted peers we use the IP reported
* by a trusted peer and then just use the port .
*
* As far as we know this has never been exploited . We discovered it
* because certain weird configurations , such as load balancers and
* gateways that do not preserve IP information , can coax a node into
* reporting back false surface information . */
if ( e - > trusted ) {
s . insert ( e - > mySurface ) ;
lastTrustedSurface = e - > mySurface ;
} else if ( lastTrustedSurface ) {
InetAddress tmp ( lastTrustedSurface ) ;
tmp . setPort ( e - > mySurface . port ( ) ) ;
s . insert ( tmp ) ;
}
2016-02-22 17:47:50 +00:00
symmetric = symmetric | | ( s . size ( ) > 1 ) ;
2016-02-11 02:41:39 +00:00
}
}
}
2016-08-10 00:00:01 +00:00
/* If we appear to be symmetrically NATed, generate and return extrapolations
* of those surfaces . Since PUSH_DIRECT_PATHS is sent multiple times , we
* probabilistically generate extrapolations of anywhere from + 1 to + 5 to
* increase the odds that it will work " eventually " . */
2016-02-22 17:47:50 +00:00
if ( symmetric ) {
2016-02-11 02:41:39 +00:00
std : : vector < InetAddress > r ;
2016-02-22 17:47:50 +00:00
for ( std : : map < InetAddress , std : : set < InetAddress > > : : iterator si ( surfaces . begin ( ) ) ; si ! = surfaces . end ( ) ; + + si ) {
for ( std : : set < InetAddress > : : iterator i ( si - > second . begin ( ) ) ; i ! = si - > second . end ( ) ; + + i ) {
InetAddress ipp ( * i ) ;
2016-04-05 17:49:14 +00:00
unsigned int p = ipp . port ( ) + 1 + ( ( unsigned int ) RR - > node - > prng ( ) & 3 ) ;
2016-02-22 17:47:50 +00:00
if ( p > = 65535 )
p - = 64510 ; // NATs seldom use ports <=1024 so wrap to 1025
ipp . setPort ( p ) ;
if ( ( si - > second . count ( ipp ) = = 0 ) & & ( std : : find ( r . begin ( ) , r . end ( ) , ipp ) = = r . end ( ) ) ) {
r . push_back ( ipp ) ;
}
}
2016-02-11 02:41:39 +00:00
}
return r ;
}
return std : : vector < InetAddress > ( ) ;
}
2015-04-07 17:57:59 +00:00
} // namespace ZeroTier