2015-04-07 17:57:59 +00:00
/*
* ZeroTier One - Network Virtualization Everywhere
2017-04-28 03:47:25 +00:00
* Copyright ( C ) 2011 - 2017 ZeroTier , Inc . https : //www.zerotier.com/
2015-04-07 17:57:59 +00:00
*
* This program is free software : you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation , either version 3 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
2017-04-28 03:47:25 +00:00
*
* - -
*
* You can be released from the requirements of the license by purchasing
* a commercial license . Buying such a license is mandatory as soon as you
* develop commercial closed - source software that incorporates or links
* directly against ZeroTier software without disclosing the source code
* of your own application .
2015-04-07 17:57:59 +00:00
*/
2015-04-07 18:56:10 +00:00
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
2016-02-11 02:41:39 +00:00
# include <set>
# include <vector>
2015-04-07 17:57:59 +00:00
# include "Constants.hpp"
# include "SelfAwareness.hpp"
# include "RuntimeEnvironment.hpp"
# include "Node.hpp"
# include "Topology.hpp"
# include "Packet.hpp"
# include "Peer.hpp"
2015-10-28 16:32:07 +00:00
# include "Switch.hpp"
2015-04-07 17:57:59 +00:00
2015-05-05 01:34:30 +00:00
// Entry timeout -- make it fairly long since this is just to prevent stale buildup
2016-09-07 18:13:17 +00:00
# define ZT_SELFAWARENESS_ENTRY_TIMEOUT 600000
2015-05-05 01:34:30 +00:00
2015-04-07 17:57:59 +00:00
namespace ZeroTier {
2015-04-07 18:56:10 +00:00
class _ResetWithinScope
{
public :
2017-03-28 00:03:17 +00:00
_ResetWithinScope ( void * tPtr , uint64_t now , int inetAddressFamily , InetAddress : : IpScope scope ) :
2015-04-07 18:56:10 +00:00
_now ( now ) ,
2017-03-28 00:03:17 +00:00
_tPtr ( tPtr ) ,
2016-09-07 18:13:17 +00:00
_family ( inetAddressFamily ) ,
2015-04-07 18:56:10 +00:00
_scope ( scope ) { }
2015-05-01 04:09:41 +00:00
2017-03-28 00:03:17 +00:00
inline void operator ( ) ( Topology & t , const SharedPtr < Peer > & p ) { p - > resetWithinScope ( _tPtr , _scope , _family , _now ) ; }
2015-05-01 04:09:41 +00:00
2015-04-07 18:56:10 +00:00
private :
uint64_t _now ;
2017-03-28 00:03:17 +00:00
void * _tPtr ;
2016-09-07 18:13:17 +00:00
int _family ;
2015-04-07 18:56:10 +00:00
InetAddress : : IpScope _scope ;
} ;
2015-04-07 17:57:59 +00:00
SelfAwareness : : SelfAwareness ( const RuntimeEnvironment * renv ) :
2015-10-28 16:32:07 +00:00
RR ( renv ) ,
2016-09-07 18:13:17 +00:00
_phy ( 128 )
2015-04-07 17:57:59 +00:00
{
}
2017-03-28 00:03:17 +00:00
void SelfAwareness : : iam ( void * tPtr , const Address & reporter , const InetAddress & receivedOnLocalAddress , const InetAddress & reporterPhysicalAddress , const InetAddress & myPhysicalAddress , bool trusted , uint64_t now )
2015-04-07 17:57:59 +00:00
{
2015-05-05 01:34:30 +00:00
const InetAddress : : IpScope scope = myPhysicalAddress . ipScope ( ) ;
2016-02-11 02:41:39 +00:00
if ( ( scope ! = reporterPhysicalAddress . ipScope ( ) ) | | ( scope = = InetAddress : : IP_SCOPE_NONE ) | | ( scope = = InetAddress : : IP_SCOPE_LOOPBACK ) | | ( scope = = InetAddress : : IP_SCOPE_MULTICAST ) )
2015-10-28 16:32:07 +00:00
return ;
2015-05-05 01:34:30 +00:00
Mutex : : Lock _l ( _phy_m ) ;
2016-02-22 17:47:50 +00:00
PhySurfaceEntry & entry = _phy [ PhySurfaceKey ( reporter , receivedOnLocalAddress , reporterPhysicalAddress , scope ) ] ;
2015-05-05 01:34:30 +00:00
2016-02-11 02:41:39 +00:00
if ( ( trusted ) & & ( ( now - entry . ts ) < ZT_SELFAWARENESS_ENTRY_TIMEOUT ) & & ( ! entry . mySurface . ipsEqual ( myPhysicalAddress ) ) ) {
// Changes to external surface reported by trusted peers causes path reset in this scope
2016-08-10 00:00:01 +00:00
TRACE ( " physical address %s for scope %u as seen from %s(%s) differs from %s, resetting paths in scope " , myPhysicalAddress . toString ( ) . c_str ( ) , ( unsigned int ) scope , reporter . toString ( ) . c_str ( ) , reporterPhysicalAddress . toString ( ) . c_str ( ) , entry . mySurface . toString ( ) . c_str ( ) ) ;
2016-08-10 17:28:54 +00:00
2015-05-05 01:34:30 +00:00
entry . mySurface = myPhysicalAddress ;
entry . ts = now ;
2016-08-10 00:00:01 +00:00
entry . trusted = trusted ;
2015-05-05 01:34:30 +00:00
2015-10-28 16:32:07 +00:00
// Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
2015-10-23 20:03:34 +00:00
// due to multiple reports of endpoint change.
// Don't use 'entry' after this since hash table gets modified.
2015-09-04 21:24:31 +00:00
{
Hashtable < PhySurfaceKey , PhySurfaceEntry > : : Iterator i ( _phy ) ;
PhySurfaceKey * k = ( PhySurfaceKey * ) 0 ;
PhySurfaceEntry * e = ( PhySurfaceEntry * ) 0 ;
while ( i . next ( k , e ) ) {
2015-10-23 20:03:34 +00:00
if ( ( k - > reporterPhysicalAddress ! = reporterPhysicalAddress ) & & ( k - > scope = = scope ) )
2015-09-04 21:24:31 +00:00
_phy . erase ( * k ) ;
}
2015-05-05 02:02:36 +00:00
}
2016-09-07 18:13:17 +00:00
// Reset all paths within this scope and address family
2017-03-28 00:03:17 +00:00
_ResetWithinScope rset ( tPtr , now , myPhysicalAddress . ss_family , ( InetAddress : : IpScope ) scope ) ;
2015-05-05 01:34:30 +00:00
RR - > topology - > eachPeer < _ResetWithinScope & > ( rset ) ;
2015-05-05 02:02:36 +00:00
} else {
2016-02-11 02:41:39 +00:00
// Otherwise just update DB to use to determine external surface info
2015-10-28 16:32:07 +00:00
entry . mySurface = myPhysicalAddress ;
2015-05-05 02:02:36 +00:00
entry . ts = now ;
2016-08-10 00:00:01 +00:00
entry . trusted = trusted ;
2015-05-05 02:02:36 +00:00
}
2015-04-07 17:57:59 +00:00
}
2015-05-05 01:34:30 +00:00
void SelfAwareness : : clean ( uint64_t now )
{
Mutex : : Lock _l ( _phy_m ) ;
2015-09-04 21:24:31 +00:00
Hashtable < PhySurfaceKey , PhySurfaceEntry > : : Iterator i ( _phy ) ;
PhySurfaceKey * k = ( PhySurfaceKey * ) 0 ;
PhySurfaceEntry * e = ( PhySurfaceEntry * ) 0 ;
while ( i . next ( k , e ) ) {
if ( ( now - e - > ts ) > = ZT_SELFAWARENESS_ENTRY_TIMEOUT )
_phy . erase ( * k ) ;
2015-07-28 18:28:47 +00:00
}
}
2016-02-11 02:41:39 +00:00
std : : vector < InetAddress > SelfAwareness : : getSymmetricNatPredictions ( )
{
2016-02-22 17:47:50 +00:00
/* This is based on ideas and strategies found here:
* https : //tools.ietf.org/html/draft-takeda-symmetric-nat-traversal-00
*
2016-08-10 17:28:54 +00:00
* For each IP address reported by a trusted ( upstream ) peer , we find
* the external port most recently reported by ANY peer for that IP .
*
* We only do any of this for global IPv4 addresses since private IPs
* and IPv6 are not going to have symmetric NAT .
*
* SECURITY NOTE :
*
* We never use IPs reported by non - trusted peers , since this could lead
* to a minor vulnerability whereby a peer could poison our cache with
* bad external surface reports via OK ( HELLO ) and then possibly coax us
* into suggesting their IP to other peers via PUSH_DIRECT_PATHS . This
* in turn could allow them to MITM flows .
*
* Since flows are encrypted and authenticated they could not actually
* read or modify traffic , but they could gather meta - data for forensics
* purpsoes or use this as a DOS attack vector . */
std : : map < uint32_t , std : : pair < uint64_t , unsigned int > > maxPortByIp ;
InetAddress theOneTrueSurface ;
2016-02-22 17:47:50 +00:00
bool symmetric = false ;
2016-02-11 02:41:39 +00:00
{
Mutex : : Lock _l ( _phy_m ) ;
2016-08-10 00:00:01 +00:00
2016-08-10 17:28:54 +00:00
{ // First get IPs from only trusted peers, and perform basic NAT type characterization
Hashtable < PhySurfaceKey , PhySurfaceEntry > : : Iterator i ( _phy ) ;
PhySurfaceKey * k = ( PhySurfaceKey * ) 0 ;
PhySurfaceEntry * e = ( PhySurfaceEntry * ) 0 ;
while ( i . next ( k , e ) ) {
if ( ( e - > trusted ) & & ( e - > mySurface . ss_family = = AF_INET ) & & ( e - > mySurface . ipScope ( ) = = InetAddress : : IP_SCOPE_GLOBAL ) ) {
if ( ! theOneTrueSurface )
theOneTrueSurface = e - > mySurface ;
else if ( theOneTrueSurface ! = e - > mySurface )
symmetric = true ;
maxPortByIp [ reinterpret_cast < const struct sockaddr_in * > ( & ( e - > mySurface ) ) - > sin_addr . s_addr ] = std : : pair < uint64_t , unsigned int > ( e - > ts , e - > mySurface . port ( ) ) ;
2016-08-10 00:00:01 +00:00
}
2016-08-10 17:28:54 +00:00
}
}
2016-08-10 00:00:01 +00:00
2016-08-10 17:28:54 +00:00
{ // Then find max port per IP from a trusted peer
Hashtable < PhySurfaceKey , PhySurfaceEntry > : : Iterator i ( _phy ) ;
PhySurfaceKey * k = ( PhySurfaceKey * ) 0 ;
PhySurfaceEntry * e = ( PhySurfaceEntry * ) 0 ;
while ( i . next ( k , e ) ) {
if ( ( e - > mySurface . ss_family = = AF_INET ) & & ( e - > mySurface . ipScope ( ) = = InetAddress : : IP_SCOPE_GLOBAL ) ) {
std : : map < uint32_t , std : : pair < uint64_t , unsigned int > > : : iterator mp ( maxPortByIp . find ( reinterpret_cast < const struct sockaddr_in * > ( & ( e - > mySurface ) ) - > sin_addr . s_addr ) ) ;
if ( ( mp ! = maxPortByIp . end ( ) ) & & ( mp - > second . first < e - > ts ) ) {
mp - > second . first = e - > ts ;
mp - > second . second = e - > mySurface . port ( ) ;
}
}
2016-02-11 02:41:39 +00:00
}
}
}
2016-02-22 17:47:50 +00:00
if ( symmetric ) {
2016-02-11 02:41:39 +00:00
std : : vector < InetAddress > r ;
2016-08-10 17:28:54 +00:00
for ( unsigned int k = 1 ; k < = 3 ; + + k ) {
for ( std : : map < uint32_t , std : : pair < uint64_t , unsigned int > > : : iterator i ( maxPortByIp . begin ( ) ) ; i ! = maxPortByIp . end ( ) ; + + i ) {
unsigned int p = i - > second . second + k ;
if ( p > 65535 ) p - = 64511 ;
InetAddress pred ( & ( i - > first ) , 4 , p ) ;
if ( std : : find ( r . begin ( ) , r . end ( ) , pred ) = = r . end ( ) )
r . push_back ( pred ) ;
2016-02-22 17:47:50 +00:00
}
2016-02-11 02:41:39 +00:00
}
return r ;
}
return std : : vector < InetAddress > ( ) ;
}
2015-04-07 17:57:59 +00:00
} // namespace ZeroTier