mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-02-20 09:46:13 +00:00
Merge branch 'adamierymenko-dev' into android-jni-dev
also update for changed function calls that now accept a local address # Conflicts: # include/ZeroTierOne.h # java/CMakeLists.txt # java/jni/Android.mk # java/jni/ZT1_jnicache.cpp # java/jni/ZT1_jnilookup.h # java/jni/ZT1_jniutils.cpp # java/jni/com_zerotierone_sdk_Node.cpp
This commit is contained in:
commit
e8cdff3eaf
File diff suppressed because it is too large
Load Diff
@ -40,6 +40,9 @@
|
||||
#include "../node/NetworkController.hpp"
|
||||
#include "../node/Mutex.hpp"
|
||||
|
||||
// Number of in-memory last log entries to maintain per user
|
||||
#define ZT_SQLITENETWORKCONTROLLER_IN_MEMORY_LOG_SIZE 32
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
class SqliteNetworkController : public NetworkController
|
||||
@ -54,7 +57,6 @@ public:
|
||||
const Identity &identity,
|
||||
uint64_t nwid,
|
||||
const Dictionary &metaData,
|
||||
uint64_t haveRevision,
|
||||
Dictionary &netconf);
|
||||
|
||||
unsigned int handleControlPlaneHttpGET(
|
||||
@ -94,11 +96,45 @@ private:
|
||||
const std::string &body,
|
||||
std::string &responseBody,
|
||||
std::string &responseContentType);
|
||||
NetworkController::ResultCode _doNetworkConfigRequest(
|
||||
const InetAddress &fromAddr,
|
||||
const Identity &signingId,
|
||||
const Identity &identity,
|
||||
uint64_t nwid,
|
||||
const Dictionary &metaData,
|
||||
Dictionary &netconf);
|
||||
|
||||
std::string _dbPath;
|
||||
std::string _instanceId;
|
||||
|
||||
std::map< std::pair<Address,uint64_t>,uint64_t > _lastRequestTime;
|
||||
// A circular buffer last log
|
||||
struct _LLEntry
|
||||
{
|
||||
_LLEntry()
|
||||
{
|
||||
for(long i=0;i<ZT_SQLITENETWORKCONTROLLER_IN_MEMORY_LOG_SIZE;++i)
|
||||
this->l[i].ts = 0;
|
||||
this->lastRequestTime = 0;
|
||||
this->totalRequests = 0;
|
||||
}
|
||||
|
||||
// Circular buffer of last log entries
|
||||
struct {
|
||||
uint64_t ts; // timestamp or 0 if circular buffer entry unused
|
||||
char version[64];
|
||||
InetAddress fromAddr;
|
||||
bool authorized;
|
||||
} l[ZT_SQLITENETWORKCONTROLLER_IN_MEMORY_LOG_SIZE];
|
||||
|
||||
// Time of last request whether successful or not
|
||||
uint64_t lastRequestTime;
|
||||
|
||||
// Total requests by this address / network ID pair (also serves mod IN_MEMORY_LOG_SIZE as circular buffer ptr)
|
||||
uint64_t totalRequests;
|
||||
};
|
||||
|
||||
// Last log entries by address and network ID pair
|
||||
std::map< std::pair<Address,uint64_t>,_LLEntry > _lastLog;
|
||||
|
||||
sqlite3 *_db;
|
||||
|
||||
@ -106,7 +142,7 @@ private:
|
||||
sqlite3_stmt *_sGetMember;
|
||||
sqlite3_stmt *_sCreateMember;
|
||||
sqlite3_stmt *_sGetNodeIdentity;
|
||||
sqlite3_stmt *_sCreateNode;
|
||||
sqlite3_stmt *_sCreateOrReplaceNode;
|
||||
sqlite3_stmt *_sUpdateNode;
|
||||
sqlite3_stmt *_sUpdateNode2;
|
||||
sqlite3_stmt *_sGetEtherTypesFromRuleTable;
|
||||
@ -137,6 +173,7 @@ private:
|
||||
sqlite3_stmt *_sUpdateMemberAuthorized;
|
||||
sqlite3_stmt *_sUpdateMemberActiveBridge;
|
||||
sqlite3_stmt *_sDeleteMember;
|
||||
sqlite3_stmt *_sDeleteAllNetworkMembers;
|
||||
sqlite3_stmt *_sDeleteNetwork;
|
||||
sqlite3_stmt *_sGetGateways;
|
||||
sqlite3_stmt *_sDeleteGateways;
|
||||
@ -144,9 +181,6 @@ private:
|
||||
sqlite3_stmt *_sIncrementMemberRevisionCounter;
|
||||
sqlite3_stmt *_sGetConfig;
|
||||
sqlite3_stmt *_sSetConfig;
|
||||
sqlite3_stmt *_sPutLog;
|
||||
sqlite3_stmt *_sGetMemberLog;
|
||||
sqlite3_stmt *_sGetRecentMemberLog;
|
||||
|
||||
Mutex _lock;
|
||||
};
|
||||
|
@ -77,19 +77,6 @@ CREATE TABLE Member (
|
||||
CREATE INDEX Member_networkId_activeBridge ON Member(networkId, activeBridge);
|
||||
CREATE INDEX Member_networkId_memberRevision ON Member(networkId, memberRevision);
|
||||
|
||||
CREATE TABLE Log (
|
||||
networkId char(16) NOT NULL,
|
||||
nodeId char(10) NOT NULL,
|
||||
ts integer NOT NULL,
|
||||
authorized integer NOT NULL,
|
||||
authTokenId integer,
|
||||
version varchar(16),
|
||||
fromAddr varchar(64)
|
||||
);
|
||||
|
||||
CREATE INDEX Log_networkId_nodeId ON Log(networkId, nodeId);
|
||||
CREATE INDEX Log_ts ON Log(ts);
|
||||
|
||||
CREATE TABLE Relay (
|
||||
networkId char(16) NOT NULL REFERENCES Network(id) ON DELETE CASCADE,
|
||||
address char(10) NOT NULL,
|
||||
|
@ -78,19 +78,6 @@
|
||||
"CREATE INDEX Member_networkId_activeBridge ON Member(networkId, activeBridge);\n"\
|
||||
"CREATE INDEX Member_networkId_memberRevision ON Member(networkId, memberRevision);\n"\
|
||||
"\n"\
|
||||
"CREATE TABLE Log (\n"\
|
||||
" networkId char(16) NOT NULL,\n"\
|
||||
" nodeId char(10) NOT NULL,\n"\
|
||||
" ts integer NOT NULL,\n"\
|
||||
" authorized integer NOT NULL,\n"\
|
||||
" authTokenId integer,\n"\
|
||||
" version varchar(16),\n"\
|
||||
" fromAddr varchar(64)\n"\
|
||||
");\n"\
|
||||
"\n"\
|
||||
"CREATE INDEX Log_networkId_nodeId ON Log(networkId, nodeId);\n"\
|
||||
"CREATE INDEX Log_ts ON Log(ts);\n"\
|
||||
"\n"\
|
||||
"CREATE TABLE Relay (\n"\
|
||||
" networkId char(16) NOT NULL REFERENCES Network(id) ON DELETE CASCADE,\n"\
|
||||
" address char(10) NOT NULL,\n"\
|
||||
|
@ -26,8 +26,8 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* This defines the external C API for ZeroTier One, the core network
|
||||
* virtualization engine.
|
||||
* This defines the external C API for ZeroTier's core network virtualization
|
||||
* engine.
|
||||
*/
|
||||
|
||||
#ifndef ZT_ZEROTIERONE_H
|
||||
@ -56,9 +56,9 @@ extern "C" {
|
||||
/****************************************************************************/
|
||||
|
||||
/**
|
||||
* Default port for the ZeroTier service
|
||||
* Default UDP port for devices running a ZeroTier endpoint
|
||||
*/
|
||||
#define ZT1_DEFAULT_PORT 9993
|
||||
#define ZT_DEFAULT_PORT 9993
|
||||
|
||||
/**
|
||||
* Maximum MTU for ZeroTier virtual networks
|
||||
@ -83,37 +83,42 @@ extern "C" {
|
||||
* We use 2800, which leaves some room for other payload in other types of
|
||||
* messages such as multicast propagation or future support for bridging.
|
||||
*/
|
||||
#define ZT1_MAX_MTU 2800
|
||||
#define ZT_MAX_MTU 2800
|
||||
|
||||
/**
|
||||
* Maximum length of network short name
|
||||
*/
|
||||
#define ZT1_MAX_NETWORK_SHORT_NAME_LENGTH 255
|
||||
#define ZT_MAX_NETWORK_SHORT_NAME_LENGTH 255
|
||||
|
||||
/**
|
||||
* Maximum number of statically assigned IP addresses per network endpoint using ZT address management (not DHCP)
|
||||
*/
|
||||
#define ZT1_MAX_ZT_ASSIGNED_ADDRESSES 16
|
||||
#define ZT_MAX_ZT_ASSIGNED_ADDRESSES 16
|
||||
|
||||
/**
|
||||
* Maximum number of multicast group subscriptions per network
|
||||
*/
|
||||
#define ZT1_MAX_NETWORK_MULTICAST_SUBSCRIPTIONS 4096
|
||||
#define ZT_MAX_NETWORK_MULTICAST_SUBSCRIPTIONS 4096
|
||||
|
||||
/**
|
||||
* Maximum number of direct network paths to a given peer
|
||||
*/
|
||||
#define ZT1_MAX_PEER_NETWORK_PATHS 4
|
||||
#define ZT_MAX_PEER_NETWORK_PATHS 4
|
||||
|
||||
/**
|
||||
* Feature flag: ZeroTier One was built to be thread-safe -- concurrent processXXX() calls are okay
|
||||
*/
|
||||
#define ZT1_FEATURE_FLAG_THREAD_SAFE 0x00000001
|
||||
#define ZT_FEATURE_FLAG_THREAD_SAFE 0x00000001
|
||||
|
||||
/**
|
||||
* Feature flag: FIPS compliant build (not available yet, but reserved for future use if we ever do this)
|
||||
*/
|
||||
#define ZT1_FEATURE_FLAG_FIPS 0x00000002
|
||||
#define ZT_FEATURE_FLAG_FIPS 0x00000002
|
||||
|
||||
/**
|
||||
* A null/empty sockaddr (all zero) to signify an unspecified socket address
|
||||
*/
|
||||
extern const struct sockaddr_storage ZT_SOCKADDR_NULL;
|
||||
|
||||
/****************************************************************************/
|
||||
/* Structures and other types */
|
||||
@ -122,53 +127,53 @@ extern "C" {
|
||||
/**
|
||||
* Function return code: OK (0) or error results
|
||||
*
|
||||
* Use ZT1_ResultCode_isFatal() to check for a fatal error. If a fatal error
|
||||
* Use ZT_ResultCode_isFatal() to check for a fatal error. If a fatal error
|
||||
* occurs, the node should be considered to not be working correctly. These
|
||||
* indicate serious problems like an inaccessible data store or a compile
|
||||
* problem.
|
||||
*/
|
||||
enum ZT1_ResultCode
|
||||
enum ZT_ResultCode
|
||||
{
|
||||
/**
|
||||
* Operation completed normally
|
||||
*/
|
||||
ZT1_RESULT_OK = 0,
|
||||
ZT_RESULT_OK = 0,
|
||||
|
||||
// Fatal errors (>0, <1000)
|
||||
|
||||
/**
|
||||
* Ran out of memory
|
||||
*/
|
||||
ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY = 1,
|
||||
ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY = 1,
|
||||
|
||||
/**
|
||||
* Data store is not writable or has failed
|
||||
*/
|
||||
ZT1_RESULT_FATAL_ERROR_DATA_STORE_FAILED = 2,
|
||||
ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED = 2,
|
||||
|
||||
/**
|
||||
* Internal error (e.g. unexpected exception indicating bug or build problem)
|
||||
*/
|
||||
ZT1_RESULT_FATAL_ERROR_INTERNAL = 3,
|
||||
ZT_RESULT_FATAL_ERROR_INTERNAL = 3,
|
||||
|
||||
// Non-fatal errors (>1000)
|
||||
|
||||
/**
|
||||
* Network ID not valid
|
||||
*/
|
||||
ZT1_RESULT_ERROR_NETWORK_NOT_FOUND = 1000
|
||||
ZT_RESULT_ERROR_NETWORK_NOT_FOUND = 1000
|
||||
};
|
||||
|
||||
/**
|
||||
* @param x Result code
|
||||
* @return True if result code indicates a fatal error
|
||||
*/
|
||||
#define ZT1_ResultCode_isFatal(x) ((((int)(x)) > 0)&&(((int)(x)) < 1000))
|
||||
#define ZT_ResultCode_isFatal(x) ((((int)(x)) > 0)&&(((int)(x)) < 1000))
|
||||
|
||||
/**
|
||||
* Status codes sent to status update callback when things happen
|
||||
*/
|
||||
enum ZT1_Event
|
||||
enum ZT_Event
|
||||
{
|
||||
/**
|
||||
* Node has been initialized
|
||||
@ -178,21 +183,21 @@ enum ZT1_Event
|
||||
*
|
||||
* Meta-data: none
|
||||
*/
|
||||
ZT1_EVENT_UP = 0,
|
||||
ZT_EVENT_UP = 0,
|
||||
|
||||
/**
|
||||
* Node is offline -- network does not seem to be reachable by any available strategy
|
||||
*
|
||||
* Meta-data: none
|
||||
*/
|
||||
ZT1_EVENT_OFFLINE = 1,
|
||||
ZT_EVENT_OFFLINE = 1,
|
||||
|
||||
/**
|
||||
* Node is online -- at least one upstream node appears reachable
|
||||
*
|
||||
* Meta-data: none
|
||||
*/
|
||||
ZT1_EVENT_ONLINE = 2,
|
||||
ZT_EVENT_ONLINE = 2,
|
||||
|
||||
/**
|
||||
* Node is shutting down
|
||||
@ -203,7 +208,7 @@ enum ZT1_Event
|
||||
*
|
||||
* Meta-data: none
|
||||
*/
|
||||
ZT1_EVENT_DOWN = 3,
|
||||
ZT_EVENT_DOWN = 3,
|
||||
|
||||
/**
|
||||
* Your identity has collided with another node's ZeroTier address
|
||||
@ -235,7 +240,7 @@ enum ZT1_Event
|
||||
*
|
||||
* Meta-data: none
|
||||
*/
|
||||
ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION = 4,
|
||||
ZT_EVENT_FATAL_ERROR_IDENTITY_COLLISION = 4,
|
||||
|
||||
/**
|
||||
* A more recent version was observed on the network
|
||||
@ -246,21 +251,21 @@ enum ZT1_Event
|
||||
*
|
||||
* Meta-data: unsigned int[3], more recent version number
|
||||
*/
|
||||
ZT1_EVENT_SAW_MORE_RECENT_VERSION = 5,
|
||||
ZT_EVENT_SAW_MORE_RECENT_VERSION = 5,
|
||||
|
||||
/**
|
||||
* A packet failed authentication
|
||||
*
|
||||
* Meta-data: struct sockaddr_storage containing origin address of packet
|
||||
*/
|
||||
ZT1_EVENT_AUTHENTICATION_FAILURE = 6,
|
||||
ZT_EVENT_AUTHENTICATION_FAILURE = 6,
|
||||
|
||||
/**
|
||||
* A received packet was not valid
|
||||
*
|
||||
* Meta-data: struct sockaddr_storage containing origin address of packet
|
||||
*/
|
||||
ZT1_EVENT_INVALID_PACKET = 7,
|
||||
ZT_EVENT_INVALID_PACKET = 7,
|
||||
|
||||
/**
|
||||
* Trace (debugging) message
|
||||
@ -269,7 +274,7 @@ enum ZT1_Event
|
||||
*
|
||||
* Meta-data: C string, TRACE message
|
||||
*/
|
||||
ZT1_EVENT_TRACE = 8
|
||||
ZT_EVENT_TRACE = 8
|
||||
};
|
||||
|
||||
/**
|
||||
@ -300,58 +305,58 @@ typedef struct
|
||||
* True if some kind of connectivity appears available
|
||||
*/
|
||||
int online;
|
||||
} ZT1_NodeStatus;
|
||||
} ZT_NodeStatus;
|
||||
|
||||
/**
|
||||
* Virtual network status codes
|
||||
*/
|
||||
enum ZT1_VirtualNetworkStatus
|
||||
enum ZT_VirtualNetworkStatus
|
||||
{
|
||||
/**
|
||||
* Waiting for network configuration (also means revision == 0)
|
||||
*/
|
||||
ZT1_NETWORK_STATUS_REQUESTING_CONFIGURATION = 0,
|
||||
ZT_NETWORK_STATUS_REQUESTING_CONFIGURATION = 0,
|
||||
|
||||
/**
|
||||
* Configuration received and we are authorized
|
||||
*/
|
||||
ZT1_NETWORK_STATUS_OK = 1,
|
||||
ZT_NETWORK_STATUS_OK = 1,
|
||||
|
||||
/**
|
||||
* Netconf master told us 'nope'
|
||||
*/
|
||||
ZT1_NETWORK_STATUS_ACCESS_DENIED = 2,
|
||||
ZT_NETWORK_STATUS_ACCESS_DENIED = 2,
|
||||
|
||||
/**
|
||||
* Netconf master exists, but this virtual network does not
|
||||
*/
|
||||
ZT1_NETWORK_STATUS_NOT_FOUND = 3,
|
||||
ZT_NETWORK_STATUS_NOT_FOUND = 3,
|
||||
|
||||
/**
|
||||
* Initialization of network failed or other internal error
|
||||
*/
|
||||
ZT1_NETWORK_STATUS_PORT_ERROR = 4,
|
||||
ZT_NETWORK_STATUS_PORT_ERROR = 4,
|
||||
|
||||
/**
|
||||
* ZeroTier One version too old
|
||||
*/
|
||||
ZT1_NETWORK_STATUS_CLIENT_TOO_OLD = 5
|
||||
ZT_NETWORK_STATUS_CLIENT_TOO_OLD = 5
|
||||
};
|
||||
|
||||
/**
|
||||
* Virtual network type codes
|
||||
*/
|
||||
enum ZT1_VirtualNetworkType
|
||||
enum ZT_VirtualNetworkType
|
||||
{
|
||||
/**
|
||||
* Private networks are authorized via certificates of membership
|
||||
*/
|
||||
ZT1_NETWORK_TYPE_PRIVATE = 0,
|
||||
ZT_NETWORK_TYPE_PRIVATE = 0,
|
||||
|
||||
/**
|
||||
* Public networks have no access control -- they'll always be AUTHORIZED
|
||||
*/
|
||||
ZT1_NETWORK_TYPE_PUBLIC = 1
|
||||
ZT_NETWORK_TYPE_PUBLIC = 1
|
||||
};
|
||||
|
||||
/**
|
||||
@ -368,32 +373,32 @@ typedef struct
|
||||
* Additional distinguishing information (usually zero)
|
||||
*/
|
||||
unsigned long adi;
|
||||
} ZT1_MulticastGroup;
|
||||
} ZT_MulticastGroup;
|
||||
|
||||
/**
|
||||
* Virtual network configuration update type
|
||||
*/
|
||||
enum ZT1_VirtualNetworkConfigOperation
|
||||
enum ZT_VirtualNetworkConfigOperation
|
||||
{
|
||||
/**
|
||||
* Network is coming up (either for the first time or after service restart)
|
||||
*/
|
||||
ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_UP = 1,
|
||||
ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP = 1,
|
||||
|
||||
/**
|
||||
* Network configuration has been updated
|
||||
*/
|
||||
ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE = 2,
|
||||
ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE = 2,
|
||||
|
||||
/**
|
||||
* Network is going down (not permanently)
|
||||
*/
|
||||
ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN = 3,
|
||||
ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN = 3,
|
||||
|
||||
/**
|
||||
* Network is going down permanently (leave/delete)
|
||||
*/
|
||||
ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY = 4
|
||||
ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY = 4
|
||||
};
|
||||
|
||||
/**
|
||||
@ -414,17 +419,17 @@ typedef struct
|
||||
/**
|
||||
* Network name (from network configuration master)
|
||||
*/
|
||||
char name[ZT1_MAX_NETWORK_SHORT_NAME_LENGTH + 1];
|
||||
char name[ZT_MAX_NETWORK_SHORT_NAME_LENGTH + 1];
|
||||
|
||||
/**
|
||||
* Network configuration request status
|
||||
*/
|
||||
enum ZT1_VirtualNetworkStatus status;
|
||||
enum ZT_VirtualNetworkStatus status;
|
||||
|
||||
/**
|
||||
* Network type
|
||||
*/
|
||||
enum ZT1_VirtualNetworkType type;
|
||||
enum ZT_VirtualNetworkType type;
|
||||
|
||||
/**
|
||||
* Maximum interface MTU
|
||||
@ -478,7 +483,7 @@ typedef struct
|
||||
/**
|
||||
* Multicast group subscriptions
|
||||
*/
|
||||
ZT1_MulticastGroup multicastSubscriptions[ZT1_MAX_NETWORK_MULTICAST_SUBSCRIPTIONS];
|
||||
ZT_MulticastGroup multicastSubscriptions[ZT_MAX_NETWORK_MULTICAST_SUBSCRIPTIONS];
|
||||
|
||||
/**
|
||||
* Number of assigned addresses
|
||||
@ -495,17 +500,17 @@ typedef struct
|
||||
* This is only used for ZeroTier-managed address assignments sent by the
|
||||
* virtual network's configuration master.
|
||||
*/
|
||||
struct sockaddr_storage assignedAddresses[ZT1_MAX_ZT_ASSIGNED_ADDRESSES];
|
||||
} ZT1_VirtualNetworkConfig;
|
||||
struct sockaddr_storage assignedAddresses[ZT_MAX_ZT_ASSIGNED_ADDRESSES];
|
||||
} ZT_VirtualNetworkConfig;
|
||||
|
||||
/**
|
||||
* A list of networks
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
ZT1_VirtualNetworkConfig *networks;
|
||||
ZT_VirtualNetworkConfig *networks;
|
||||
unsigned long networkCount;
|
||||
} ZT1_VirtualNetworkList;
|
||||
} ZT_VirtualNetworkList;
|
||||
|
||||
/**
|
||||
* Physical network path to a peer
|
||||
@ -541,15 +546,15 @@ typedef struct
|
||||
* Is path preferred?
|
||||
*/
|
||||
int preferred;
|
||||
} ZT1_PeerPhysicalPath;
|
||||
} ZT_PeerPhysicalPath;
|
||||
|
||||
/**
|
||||
* What trust hierarchy role does this peer have?
|
||||
*/
|
||||
enum ZT1_PeerRole {
|
||||
ZT1_PEER_ROLE_LEAF = 0, // ordinary node
|
||||
ZT1_PEER_ROLE_RELAY = 1, // relay node
|
||||
ZT1_PEER_ROLE_ROOT = 2 // root server
|
||||
enum ZT_PeerRole {
|
||||
ZT_PEER_ROLE_LEAF = 0, // ordinary node
|
||||
ZT_PEER_ROLE_RELAY = 1, // relay node
|
||||
ZT_PEER_ROLE_ROOT = 2 // root server
|
||||
};
|
||||
|
||||
/**
|
||||
@ -595,7 +600,7 @@ typedef struct
|
||||
/**
|
||||
* What trust hierarchy role does this device have?
|
||||
*/
|
||||
enum ZT1_PeerRole role;
|
||||
enum ZT_PeerRole role;
|
||||
|
||||
/**
|
||||
* Number of paths (size of paths[])
|
||||
@ -605,31 +610,31 @@ typedef struct
|
||||
/**
|
||||
* Known network paths to peer
|
||||
*/
|
||||
ZT1_PeerPhysicalPath paths[ZT1_MAX_PEER_NETWORK_PATHS];
|
||||
} ZT1_Peer;
|
||||
ZT_PeerPhysicalPath paths[ZT_MAX_PEER_NETWORK_PATHS];
|
||||
} ZT_Peer;
|
||||
|
||||
/**
|
||||
* List of peers
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
ZT1_Peer *peers;
|
||||
ZT_Peer *peers;
|
||||
unsigned long peerCount;
|
||||
} ZT1_PeerList;
|
||||
} ZT_PeerList;
|
||||
|
||||
/**
|
||||
* Local interface trust levels
|
||||
*/
|
||||
typedef enum {
|
||||
ZT1_LOCAL_INTERFACE_ADDRESS_TRUST_NORMAL = 0,
|
||||
ZT1_LOCAL_INTERFACE_ADDRESS_TRUST_PRIVACY = 1,
|
||||
ZT1_LOCAL_INTERFACE_ADDRESS_TRUST_ULTIMATE = 2
|
||||
} ZT1_LocalInterfaceAddressTrust;
|
||||
ZT_LOCAL_INTERFACE_ADDRESS_TRUST_NORMAL = 0,
|
||||
ZT_LOCAL_INTERFACE_ADDRESS_TRUST_PRIVACY = 1,
|
||||
ZT_LOCAL_INTERFACE_ADDRESS_TRUST_ULTIMATE = 2
|
||||
} ZT_LocalInterfaceAddressTrust;
|
||||
|
||||
/**
|
||||
* An instance of a ZeroTier One node (opaque)
|
||||
*/
|
||||
typedef void ZT1_Node;
|
||||
typedef void ZT_Node;
|
||||
|
||||
/****************************************************************************/
|
||||
/* Callbacks used by Node API */
|
||||
@ -656,7 +661,12 @@ typedef void ZT1_Node;
|
||||
* on failure, and this results in the network being placed into the
|
||||
* PORT_ERROR state.
|
||||
*/
|
||||
typedef int (*ZT1_VirtualNetworkConfigFunction)(ZT1_Node *,void *,uint64_t,enum ZT1_VirtualNetworkConfigOperation,const ZT1_VirtualNetworkConfig *);
|
||||
typedef int (*ZT_VirtualNetworkConfigFunction)(
|
||||
ZT_Node *,
|
||||
void *,
|
||||
uint64_t,
|
||||
enum ZT_VirtualNetworkConfigOperation,
|
||||
const ZT_VirtualNetworkConfig *);
|
||||
|
||||
/**
|
||||
* Function to send a frame out to a virtual network port
|
||||
@ -665,7 +675,16 @@ typedef int (*ZT1_VirtualNetworkConfigFunction)(ZT1_Node *,void *,uint64_t,enum
|
||||
* (5) destination MAC, (6) ethertype, (7) VLAN ID, (8) frame data,
|
||||
* (9) frame length.
|
||||
*/
|
||||
typedef void (*ZT1_VirtualNetworkFrameFunction)(ZT1_Node *,void *,uint64_t,uint64_t,uint64_t,unsigned int,unsigned int,const void *,unsigned int);
|
||||
typedef void (*ZT_VirtualNetworkFrameFunction)(
|
||||
ZT_Node *,
|
||||
void *,
|
||||
uint64_t,
|
||||
uint64_t,
|
||||
uint64_t,
|
||||
unsigned int,
|
||||
unsigned int,
|
||||
const void *,
|
||||
unsigned int);
|
||||
|
||||
/**
|
||||
* Callback for events
|
||||
@ -674,9 +693,13 @@ typedef void (*ZT1_VirtualNetworkFrameFunction)(ZT1_Node *,void *,uint64_t,uint6
|
||||
* and on certain non-fatal errors and events of interest. The final void
|
||||
* parameter points to event meta-data. The type of event meta-data (and
|
||||
* whether it is present at all) is event type dependent. See the comments
|
||||
* in the definition of ZT1_Event.
|
||||
* in the definition of ZT_Event.
|
||||
*/
|
||||
typedef void (*ZT1_EventCallback)(ZT1_Node *,void *,enum ZT1_Event,const void *);
|
||||
typedef void (*ZT_EventCallback)(
|
||||
ZT_Node *,
|
||||
void *,
|
||||
enum ZT_Event,
|
||||
const void *);
|
||||
|
||||
/**
|
||||
* Function to get an object from the data store
|
||||
@ -698,7 +721,14 @@ typedef void (*ZT1_EventCallback)(ZT1_Node *,void *,enum ZT1_Event,const void *)
|
||||
* read. The caller may call the function multiple times to read the whole
|
||||
* object.
|
||||
*/
|
||||
typedef long (*ZT1_DataStoreGetFunction)(ZT1_Node *,void *,const char *,void *,unsigned long,unsigned long,unsigned long *);
|
||||
typedef long (*ZT_DataStoreGetFunction)(
|
||||
ZT_Node *,
|
||||
void *,
|
||||
const char *,
|
||||
void *,
|
||||
unsigned long,
|
||||
unsigned long,
|
||||
unsigned long *);
|
||||
|
||||
/**
|
||||
* Function to store an object in the data store
|
||||
@ -716,19 +746,42 @@ typedef long (*ZT1_DataStoreGetFunction)(ZT1_Node *,void *,const char *,void *,u
|
||||
* If the data pointer is null, this must be interpreted as a delete
|
||||
* operation.
|
||||
*/
|
||||
typedef int (*ZT1_DataStorePutFunction)(ZT1_Node *,void *,const char *,const void *,unsigned long,int);
|
||||
typedef int (*ZT_DataStorePutFunction)(
|
||||
ZT_Node *,
|
||||
void *,
|
||||
const char *,
|
||||
const void *,
|
||||
unsigned long,
|
||||
int);
|
||||
|
||||
/**
|
||||
* Function to send a ZeroTier packet out over the wire
|
||||
*
|
||||
* Parameters: (1) node, (2) user ptr, (3) address, (4) packet data,
|
||||
* (5) packet data length.
|
||||
* Parameters:
|
||||
* (1) Node
|
||||
* (2) User pointer
|
||||
* (3) Local interface address
|
||||
* (4) Remote address
|
||||
* (5) Packet data
|
||||
* (6) Packet length
|
||||
*
|
||||
* If there is only one local interface it is safe to ignore the local
|
||||
* interface address. Otherwise if running with multiple interfaces, the
|
||||
* correct local interface should be chosen by address unless NULL. If
|
||||
* the ss_family field is zero (NULL address), a random or preferred
|
||||
* default interface should be used.
|
||||
*
|
||||
* The function must return zero on success and may return any error code
|
||||
* on failure. Note that success does not (of course) guarantee packet
|
||||
* delivery. It only means that the packet appears to have been sent.
|
||||
*/
|
||||
typedef int (*ZT1_WirePacketSendFunction)(ZT1_Node *,void *,const struct sockaddr_storage *,const void *,unsigned int);
|
||||
typedef int (*ZT_WirePacketSendFunction)(
|
||||
ZT_Node *, /* Node */
|
||||
void *, /* User ptr */
|
||||
const struct sockaddr_storage *, /* Local address */
|
||||
const struct sockaddr_storage *, /* Remote address */
|
||||
const void *, /* Packet data */
|
||||
unsigned int); /* Packet length */
|
||||
|
||||
/****************************************************************************/
|
||||
/* C Node API */
|
||||
@ -747,24 +800,20 @@ typedef int (*ZT1_WirePacketSendFunction)(ZT1_Node *,void *,const struct sockadd
|
||||
* @param dataStorePutFunction Function called to put objects in persistent storage
|
||||
* @param virtualNetworkConfigFunction Function to be called when virtual LANs are created, deleted, or their config parameters change
|
||||
* @param eventCallback Function to receive status updates and non-fatal error notices
|
||||
* @param overrideRootTopology If not NULL, must contain string-serialize root topology (for testing, default: NULL)
|
||||
* @param overrideRootTopology Alternative root server topology or NULL for default (mostly for test/debug use)
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_new(
|
||||
ZT1_Node **node,
|
||||
enum ZT_ResultCode ZT_Node_new(
|
||||
ZT_Node **node,
|
||||
void *uptr,
|
||||
uint64_t now,
|
||||
ZT1_DataStoreGetFunction dataStoreGetFunction,
|
||||
ZT1_DataStorePutFunction dataStorePutFunction,
|
||||
ZT1_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT1_EventCallback eventCallback,
|
||||
const char *overrideRootTopology
|
||||
#ifdef __cplusplus
|
||||
= (const char *)0
|
||||
#endif
|
||||
);
|
||||
ZT_DataStoreGetFunction dataStoreGetFunction,
|
||||
ZT_DataStorePutFunction dataStorePutFunction,
|
||||
ZT_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT_EventCallback eventCallback,
|
||||
const char *overrideRootTopology);
|
||||
|
||||
/**
|
||||
* Delete a node and free all resources it consumes
|
||||
@ -774,22 +823,24 @@ enum ZT1_ResultCode ZT1_Node_new(
|
||||
*
|
||||
* @param node Node to delete
|
||||
*/
|
||||
void ZT1_Node_delete(ZT1_Node *node);
|
||||
void ZT_Node_delete(ZT_Node *node);
|
||||
|
||||
/**
|
||||
* Process a packet received from the physical wire
|
||||
*
|
||||
* @param node Node instance
|
||||
* @param now Current clock in milliseconds
|
||||
* @param localAddress Local address, or point to ZT_SOCKADDR_NULL if unspecified
|
||||
* @param remoteAddress Origin of packet
|
||||
* @param packetData Packet data
|
||||
* @param packetLength Packet length
|
||||
* @param nextBackgroundTaskDeadline Value/result: set to deadline for next call to processBackgroundTasks()
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_processWirePacket(
|
||||
ZT1_Node *node,
|
||||
enum ZT_ResultCode ZT_Node_processWirePacket(
|
||||
ZT_Node *node,
|
||||
uint64_t now,
|
||||
const struct sockaddr_storage *localAddress,
|
||||
const struct sockaddr_storage *remoteAddress,
|
||||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
@ -810,8 +861,8 @@ enum ZT1_ResultCode ZT1_Node_processWirePacket(
|
||||
* @param nextBackgroundTaskDeadline Value/result: set to deadline for next call to processBackgroundTasks()
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
|
||||
ZT1_Node *node,
|
||||
enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
|
||||
ZT_Node *node,
|
||||
uint64_t now,
|
||||
uint64_t nwid,
|
||||
uint64_t sourceMac,
|
||||
@ -830,7 +881,7 @@ enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
|
||||
* @param nextBackgroundTaskDeadline Value/result: set to deadline for next call to processBackgroundTasks()
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_processBackgroundTasks(ZT1_Node *node,uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline);
|
||||
enum ZT_ResultCode ZT_Node_processBackgroundTasks(ZT_Node *node,uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline);
|
||||
|
||||
/**
|
||||
* Join a network
|
||||
@ -845,7 +896,7 @@ enum ZT1_ResultCode ZT1_Node_processBackgroundTasks(ZT1_Node *node,uint64_t now,
|
||||
* @param nwid 64-bit ZeroTier network ID
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_join(ZT1_Node *node,uint64_t nwid);
|
||||
enum ZT_ResultCode ZT_Node_join(ZT_Node *node,uint64_t nwid);
|
||||
|
||||
/**
|
||||
* Leave a network
|
||||
@ -858,7 +909,7 @@ enum ZT1_ResultCode ZT1_Node_join(ZT1_Node *node,uint64_t nwid);
|
||||
* @param nwid 64-bit network ID
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_leave(ZT1_Node *node,uint64_t nwid);
|
||||
enum ZT_ResultCode ZT_Node_leave(ZT_Node *node,uint64_t nwid);
|
||||
|
||||
/**
|
||||
* Subscribe to an Ethernet multicast group
|
||||
@ -882,14 +933,10 @@ enum ZT1_ResultCode ZT1_Node_leave(ZT1_Node *node,uint64_t nwid);
|
||||
* @param node Node instance
|
||||
* @param nwid 64-bit network ID
|
||||
* @param multicastGroup Ethernet multicast or broadcast MAC (least significant 48 bits)
|
||||
* @param multicastAdi Multicast ADI (least significant 32 bits only, default: 0)
|
||||
* @param multicastAdi Multicast ADI (least significant 32 bits only, use 0 if not needed)
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_multicastSubscribe(ZT1_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi
|
||||
#ifdef __cplusplus
|
||||
= 0
|
||||
#endif
|
||||
);
|
||||
enum ZT_ResultCode ZT_Node_multicastSubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
|
||||
|
||||
/**
|
||||
* Unsubscribe from an Ethernet multicast group (or all groups)
|
||||
@ -902,14 +949,10 @@ enum ZT1_ResultCode ZT1_Node_multicastSubscribe(ZT1_Node *node,uint64_t nwid,uin
|
||||
* @param node Node instance
|
||||
* @param nwid 64-bit network ID
|
||||
* @param multicastGroup Ethernet multicast or broadcast MAC (least significant 48 bits)
|
||||
* @param multicastAdi Multicast ADI (least significant 32 bits only, default: 0)
|
||||
* @param multicastAdi Multicast ADI (least significant 32 bits only, use 0 if not needed)
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
enum ZT1_ResultCode ZT1_Node_multicastUnsubscribe(ZT1_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi
|
||||
#ifdef __cplusplus
|
||||
= 0
|
||||
#endif
|
||||
);
|
||||
enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
|
||||
|
||||
/**
|
||||
* Get this node's 40-bit ZeroTier address
|
||||
@ -917,7 +960,7 @@ enum ZT1_ResultCode ZT1_Node_multicastUnsubscribe(ZT1_Node *node,uint64_t nwid,u
|
||||
* @param node Node instance
|
||||
* @return ZeroTier address (least significant 40 bits of 64-bit int)
|
||||
*/
|
||||
uint64_t ZT1_Node_address(ZT1_Node *node);
|
||||
uint64_t ZT_Node_address(ZT_Node *node);
|
||||
|
||||
/**
|
||||
* Get the status of this node
|
||||
@ -925,7 +968,7 @@ uint64_t ZT1_Node_address(ZT1_Node *node);
|
||||
* @param node Node instance
|
||||
* @param status Buffer to fill with current node status
|
||||
*/
|
||||
void ZT1_Node_status(ZT1_Node *node,ZT1_NodeStatus *status);
|
||||
void ZT_Node_status(ZT_Node *node,ZT_NodeStatus *status);
|
||||
|
||||
/**
|
||||
* Get a list of known peer nodes
|
||||
@ -936,7 +979,7 @@ void ZT1_Node_status(ZT1_Node *node,ZT1_NodeStatus *status);
|
||||
* @param node Node instance
|
||||
* @return List of known peers or NULL on failure
|
||||
*/
|
||||
ZT1_PeerList *ZT1_Node_peers(ZT1_Node *node);
|
||||
ZT_PeerList *ZT_Node_peers(ZT_Node *node);
|
||||
|
||||
/**
|
||||
* Get the status of a virtual network
|
||||
@ -948,7 +991,7 @@ ZT1_PeerList *ZT1_Node_peers(ZT1_Node *node);
|
||||
* @param nwid 64-bit network ID
|
||||
* @return Network configuration or NULL if we are not a member of this network
|
||||
*/
|
||||
ZT1_VirtualNetworkConfig *ZT1_Node_networkConfig(ZT1_Node *node,uint64_t nwid);
|
||||
ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node,uint64_t nwid);
|
||||
|
||||
/**
|
||||
* Enumerate and get status of all networks
|
||||
@ -956,7 +999,7 @@ ZT1_VirtualNetworkConfig *ZT1_Node_networkConfig(ZT1_Node *node,uint64_t nwid);
|
||||
* @param node Node instance
|
||||
* @return List of networks or NULL on failure
|
||||
*/
|
||||
ZT1_VirtualNetworkList *ZT1_Node_networks(ZT1_Node *node);
|
||||
ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node);
|
||||
|
||||
/**
|
||||
* Free a query result buffer
|
||||
@ -966,7 +1009,7 @@ ZT1_VirtualNetworkList *ZT1_Node_networks(ZT1_Node *node);
|
||||
* @param node Node instance
|
||||
* @param qr Query result buffer
|
||||
*/
|
||||
void ZT1_Node_freeQueryResult(ZT1_Node *node,void *qr);
|
||||
void ZT_Node_freeQueryResult(ZT_Node *node,void *qr);
|
||||
|
||||
/**
|
||||
* Add a local interface address
|
||||
@ -994,12 +1037,12 @@ void ZT1_Node_freeQueryResult(ZT1_Node *node,void *qr);
|
||||
* @param trust How much do you trust the local network under this interface?
|
||||
* @return Boolean: non-zero if address was accepted and added
|
||||
*/
|
||||
int ZT1_Node_addLocalInterfaceAddress(ZT1_Node *node,const struct sockaddr_storage *addr,int metric,ZT1_LocalInterfaceAddressTrust trust);
|
||||
int ZT_Node_addLocalInterfaceAddress(ZT_Node *node,const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust);
|
||||
|
||||
/**
|
||||
* Clear local interface addresses
|
||||
*/
|
||||
void ZT1_Node_clearLocalInterfaceAddresses(ZT1_Node *node);
|
||||
void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node);
|
||||
|
||||
/**
|
||||
* Set a network configuration master instance for this node
|
||||
@ -1016,7 +1059,7 @@ void ZT1_Node_clearLocalInterfaceAddresses(ZT1_Node *node);
|
||||
* @param networkConfigMasterInstance Instance of NetworkConfigMaster C++ class or NULL to disable
|
||||
* @return OK (0) or error code if a fatal error condition has occurred
|
||||
*/
|
||||
void ZT1_Node_setNetconfMaster(ZT1_Node *node,void *networkConfigMasterInstance);
|
||||
void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkConfigMasterInstance);
|
||||
|
||||
/**
|
||||
* Get ZeroTier One version
|
||||
@ -1026,7 +1069,7 @@ void ZT1_Node_setNetconfMaster(ZT1_Node *node,void *networkConfigMasterInstance)
|
||||
* @param revision Result: revision
|
||||
* @param featureFlags: Result: feature flag bitmap
|
||||
*/
|
||||
void ZT1_version(int *major,int *minor,int *revision,unsigned long *featureFlags);
|
||||
void ZT_version(int *major,int *minor,int *revision,unsigned long *featureFlags);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -53,8 +53,8 @@ set(src_files
|
||||
../osdep/Http.cpp
|
||||
../osdep/OSUtils.cpp
|
||||
jni/com_zerotierone_sdk_Node.cpp
|
||||
jni/ZT1_jniutils.cpp
|
||||
jni/ZT1_jnilookup.cpp
|
||||
jni/ZT_jniutils.cpp
|
||||
jni/ZT_jnicache.cpp
|
||||
)
|
||||
|
||||
set(include_dirs
|
||||
|
@ -40,7 +40,7 @@ LOCAL_SRC_FILES := \
|
||||
# JNI Files
|
||||
LOCAL_SRC_FILES += \
|
||||
com_zerotierone_sdk_Node.cpp \
|
||||
ZT1_jniutils.cpp \
|
||||
ZT1_jnilookup.cpp
|
||||
ZT_jniutils.cpp \
|
||||
ZT_jnilookup.cpp
|
||||
|
||||
include $(BUILD_SHARED_LIBRARY)
|
@ -25,8 +25,8 @@
|
||||
* LLC. Start here: http://www.zerotier.com/
|
||||
*/
|
||||
|
||||
#include "ZT1_jnilookup.h"
|
||||
#include "ZT1_jniutils.h"
|
||||
#include "ZT_jnilookup.h"
|
||||
#include "ZT_jniutils.h"
|
||||
|
||||
JniLookup::JniLookup()
|
||||
: m_jvm(NULL)
|
@ -25,8 +25,8 @@
|
||||
* LLC. Start here: http://www.zerotier.com/
|
||||
*/
|
||||
|
||||
#ifndef ZT1_JNILOOKUP_H_
|
||||
#define ZT1_JNILOOKUP_H_
|
||||
#ifndef ZT_JNILOOKUP_H_
|
||||
#define ZT_JNILOOKUP_H_
|
||||
|
||||
#include <jni.h>
|
||||
#include <map>
|
@ -1,5 +1,5 @@
|
||||
#include "ZT1_jniutils.h"
|
||||
#include "ZT1_jnilookup.h"
|
||||
#include "ZT_jniutils.h"
|
||||
#include "ZT_jnilookup.h"
|
||||
#include <string>
|
||||
#include <assert.h>
|
||||
|
||||
@ -9,7 +9,7 @@ extern JniLookup lookup;
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
jobject createResultObject(JNIEnv *env, ZT1_ResultCode code)
|
||||
jobject createResultObject(JNIEnv *env, ZT_ResultCode code)
|
||||
{
|
||||
jclass resultClass = NULL;
|
||||
|
||||
@ -25,23 +25,23 @@ jobject createResultObject(JNIEnv *env, ZT1_ResultCode code)
|
||||
std::string fieldName;
|
||||
switch(code)
|
||||
{
|
||||
case ZT1_RESULT_OK:
|
||||
LOGV("ZT1_RESULT_OK");
|
||||
case ZT_RESULT_OK:
|
||||
LOGV("ZT_RESULT_OK");
|
||||
fieldName = "RESULT_OK";
|
||||
break;
|
||||
case ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY:
|
||||
LOGV("ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY");
|
||||
case ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY:
|
||||
LOGV("ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY");
|
||||
fieldName = "RESULT_FATAL_ERROR_OUT_OF_MEMORY";
|
||||
break;
|
||||
case ZT1_RESULT_FATAL_ERROR_DATA_STORE_FAILED:
|
||||
case ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED:
|
||||
LOGV("RESULT_FATAL_ERROR_DATA_STORE_FAILED");
|
||||
fieldName = "RESULT_FATAL_ERROR_DATA_STORE_FAILED";
|
||||
break;
|
||||
case ZT1_RESULT_ERROR_NETWORK_NOT_FOUND:
|
||||
case ZT_RESULT_ERROR_NETWORK_NOT_FOUND:
|
||||
LOGV("RESULT_FATAL_ERROR_DATA_STORE_FAILED");
|
||||
fieldName = "RESULT_ERROR_NETWORK_NOT_FOUND";
|
||||
break;
|
||||
case ZT1_RESULT_FATAL_ERROR_INTERNAL:
|
||||
case ZT_RESULT_FATAL_ERROR_INTERNAL:
|
||||
default:
|
||||
LOGV("RESULT_FATAL_ERROR_DATA_STORE_FAILED");
|
||||
fieldName = "RESULT_FATAL_ERROR_INTERNAL";
|
||||
@ -64,7 +64,7 @@ jobject createResultObject(JNIEnv *env, ZT1_ResultCode code)
|
||||
}
|
||||
|
||||
|
||||
jobject createVirtualNetworkStatus(JNIEnv *env, ZT1_VirtualNetworkStatus status)
|
||||
jobject createVirtualNetworkStatus(JNIEnv *env, ZT_VirtualNetworkStatus status)
|
||||
{
|
||||
jobject statusObject = NULL;
|
||||
|
||||
@ -77,22 +77,22 @@ jobject createVirtualNetworkStatus(JNIEnv *env, ZT1_VirtualNetworkStatus status)
|
||||
std::string fieldName;
|
||||
switch(status)
|
||||
{
|
||||
case ZT1_NETWORK_STATUS_REQUESTING_CONFIGURATION:
|
||||
case ZT_NETWORK_STATUS_REQUESTING_CONFIGURATION:
|
||||
fieldName = "NETWORK_STATUS_REQUESTING_CONFIGURATION";
|
||||
break;
|
||||
case ZT1_NETWORK_STATUS_OK:
|
||||
case ZT_NETWORK_STATUS_OK:
|
||||
fieldName = "NETWORK_STATUS_OK";
|
||||
break;
|
||||
case ZT1_NETWORK_STATUS_ACCESS_DENIED:
|
||||
case ZT_NETWORK_STATUS_ACCESS_DENIED:
|
||||
fieldName = "NETWORK_STATUS_ACCESS_DENIED";
|
||||
break;
|
||||
case ZT1_NETWORK_STATUS_NOT_FOUND:
|
||||
case ZT_NETWORK_STATUS_NOT_FOUND:
|
||||
fieldName = "NETWORK_STATUS_NOT_FOUND";
|
||||
break;
|
||||
case ZT1_NETWORK_STATUS_PORT_ERROR:
|
||||
case ZT_NETWORK_STATUS_PORT_ERROR:
|
||||
fieldName = "NETWORK_STATUS_PORT_ERROR";
|
||||
break;
|
||||
case ZT1_NETWORK_STATUS_CLIENT_TOO_OLD:
|
||||
case ZT_NETWORK_STATUS_CLIENT_TOO_OLD:
|
||||
fieldName = "NETWORK_STATUS_CLIENT_TOO_OLD";
|
||||
break;
|
||||
}
|
||||
@ -104,7 +104,7 @@ jobject createVirtualNetworkStatus(JNIEnv *env, ZT1_VirtualNetworkStatus status)
|
||||
return statusObject;
|
||||
}
|
||||
|
||||
jobject createEvent(JNIEnv *env, ZT1_Event event)
|
||||
jobject createEvent(JNIEnv *env, ZT_Event event)
|
||||
{
|
||||
jclass eventClass = NULL;
|
||||
jobject eventObject = NULL;
|
||||
@ -118,31 +118,31 @@ jobject createEvent(JNIEnv *env, ZT1_Event event)
|
||||
std::string fieldName;
|
||||
switch(event)
|
||||
{
|
||||
case ZT1_EVENT_UP:
|
||||
case ZT_EVENT_UP:
|
||||
fieldName = "EVENT_UP";
|
||||
break;
|
||||
case ZT1_EVENT_OFFLINE:
|
||||
case ZT_EVENT_OFFLINE:
|
||||
fieldName = "EVENT_OFFLINE";
|
||||
break;
|
||||
case ZT1_EVENT_ONLINE:
|
||||
case ZT_EVENT_ONLINE:
|
||||
fieldName = "EVENT_ONLINE";
|
||||
break;
|
||||
case ZT1_EVENT_DOWN:
|
||||
case ZT_EVENT_DOWN:
|
||||
fieldName = "EVENT_DOWN";
|
||||
break;
|
||||
case ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION:
|
||||
case ZT_EVENT_FATAL_ERROR_IDENTITY_COLLISION:
|
||||
fieldName = "EVENT_FATAL_ERROR_IDENTITY_COLLISION";
|
||||
break;
|
||||
case ZT1_EVENT_SAW_MORE_RECENT_VERSION:
|
||||
case ZT_EVENT_SAW_MORE_RECENT_VERSION:
|
||||
fieldName = "EVENT_SAW_MORE_RECENT_VERSION";
|
||||
break;
|
||||
case ZT1_EVENT_AUTHENTICATION_FAILURE:
|
||||
case ZT_EVENT_AUTHENTICATION_FAILURE:
|
||||
fieldName = "EVENT_AUTHENTICATION_FAILURE";
|
||||
break;
|
||||
case ZT1_EVENT_INVALID_PACKET:
|
||||
case ZT_EVENT_INVALID_PACKET:
|
||||
fieldName = "EVENT_INVALID_PACKET";
|
||||
break;
|
||||
case ZT1_EVENT_TRACE:
|
||||
case ZT_EVENT_TRACE:
|
||||
fieldName = "EVENT_TRACE";
|
||||
break;
|
||||
}
|
||||
@ -154,7 +154,7 @@ jobject createEvent(JNIEnv *env, ZT1_Event event)
|
||||
return eventObject;
|
||||
}
|
||||
|
||||
jobject createPeerRole(JNIEnv *env, ZT1_PeerRole role)
|
||||
jobject createPeerRole(JNIEnv *env, ZT_PeerRole role)
|
||||
{
|
||||
jclass peerRoleClass = NULL;
|
||||
jobject peerRoleObject = NULL;
|
||||
@ -168,14 +168,14 @@ jobject createPeerRole(JNIEnv *env, ZT1_PeerRole role)
|
||||
std::string fieldName;
|
||||
switch(role)
|
||||
{
|
||||
case ZT1_PEER_ROLE_LEAF:
|
||||
case ZT_PEER_ROLE_LEAF:
|
||||
fieldName = "PEER_ROLE_LEAF";
|
||||
break;
|
||||
case ZT1_PEER_ROLE_RELAY:
|
||||
case ZT_PEER_ROLE_RELAY:
|
||||
fieldName = "PEER_ROLE_RELAY";
|
||||
break;
|
||||
case ZT1_PEER_ROLE_ROOT:
|
||||
fieldName = "PEER_ROLE_ROOT";
|
||||
case ZT_PEER_ROLE_ROOT:
|
||||
fieldName = "PEER_ROLE_ROOTS";
|
||||
break;
|
||||
}
|
||||
|
||||
@ -186,7 +186,7 @@ jobject createPeerRole(JNIEnv *env, ZT1_PeerRole role)
|
||||
return peerRoleObject;
|
||||
}
|
||||
|
||||
jobject createVirtualNetworkType(JNIEnv *env, ZT1_VirtualNetworkType type)
|
||||
jobject createVirtualNetworkType(JNIEnv *env, ZT_VirtualNetworkType type)
|
||||
{
|
||||
jclass vntypeClass = NULL;
|
||||
jobject vntypeObject = NULL;
|
||||
@ -200,10 +200,10 @@ jobject createVirtualNetworkType(JNIEnv *env, ZT1_VirtualNetworkType type)
|
||||
std::string fieldName;
|
||||
switch(type)
|
||||
{
|
||||
case ZT1_NETWORK_TYPE_PRIVATE:
|
||||
case ZT_NETWORK_TYPE_PRIVATE:
|
||||
fieldName = "NETWORK_TYPE_PRIVATE";
|
||||
break;
|
||||
case ZT1_NETWORK_TYPE_PUBLIC:
|
||||
case ZT_NETWORK_TYPE_PUBLIC:
|
||||
fieldName = "NETWORK_TYPE_PUBLIC";
|
||||
break;
|
||||
}
|
||||
@ -213,7 +213,7 @@ jobject createVirtualNetworkType(JNIEnv *env, ZT1_VirtualNetworkType type)
|
||||
return vntypeObject;
|
||||
}
|
||||
|
||||
jobject createVirtualNetworkConfigOperation(JNIEnv *env, ZT1_VirtualNetworkConfigOperation op)
|
||||
jobject createVirtualNetworkConfigOperation(JNIEnv *env, ZT_VirtualNetworkConfigOperation op)
|
||||
{
|
||||
jclass vnetConfigOpClass = NULL;
|
||||
jobject vnetConfigOpObject = NULL;
|
||||
@ -227,16 +227,16 @@ jobject createVirtualNetworkConfigOperation(JNIEnv *env, ZT1_VirtualNetworkConfi
|
||||
std::string fieldName;
|
||||
switch(op)
|
||||
{
|
||||
case ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_UP:
|
||||
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP:
|
||||
fieldName = "VIRTUAL_NETWORK_CONFIG_OPERATION_UP";
|
||||
break;
|
||||
case ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE:
|
||||
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE:
|
||||
fieldName = "VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE";
|
||||
break;
|
||||
case ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN:
|
||||
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN:
|
||||
fieldName = "VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN";
|
||||
break;
|
||||
case ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY:
|
||||
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY:
|
||||
fieldName = "VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY";
|
||||
break;
|
||||
}
|
||||
@ -372,7 +372,7 @@ jobject newInetSocketAddress(JNIEnv *env, const sockaddr_storage &addr)
|
||||
return inetSocketAddressObject;
|
||||
}
|
||||
|
||||
jobject newMulticastGroup(JNIEnv *env, const ZT1_MulticastGroup &mc)
|
||||
jobject newMulticastGroup(JNIEnv *env, const ZT_MulticastGroup &mc)
|
||||
{
|
||||
jclass multicastGroupClass = NULL;
|
||||
jmethodID multicastGroup_constructor = NULL;
|
||||
@ -417,7 +417,7 @@ jobject newMulticastGroup(JNIEnv *env, const ZT1_MulticastGroup &mc)
|
||||
return multicastGroupObj;
|
||||
}
|
||||
|
||||
jobject newPeerPhysicalPath(JNIEnv *env, const ZT1_PeerPhysicalPath &ppp)
|
||||
jobject newPeerPhysicalPath(JNIEnv *env, const ZT_PeerPhysicalPath &ppp)
|
||||
{
|
||||
LOGV("newPeerPhysicalPath Called");
|
||||
jclass pppClass = NULL;
|
||||
@ -514,7 +514,7 @@ jobject newPeerPhysicalPath(JNIEnv *env, const ZT1_PeerPhysicalPath &ppp)
|
||||
return pppObject;
|
||||
}
|
||||
|
||||
jobject newPeer(JNIEnv *env, const ZT1_Peer &peer)
|
||||
jobject newPeer(JNIEnv *env, const ZT_Peer &peer)
|
||||
{
|
||||
LOGV("newPeer called");
|
||||
|
||||
@ -656,7 +656,7 @@ jobject newPeer(JNIEnv *env, const ZT1_Peer &peer)
|
||||
return peerObject;
|
||||
}
|
||||
|
||||
jobject newNetworkConfig(JNIEnv *env, const ZT1_VirtualNetworkConfig &vnetConfig)
|
||||
jobject newNetworkConfig(JNIEnv *env, const ZT_VirtualNetworkConfig &vnetConfig)
|
||||
{
|
||||
jclass vnetConfigClass = NULL;
|
||||
jmethodID vnetConfig_constructor = NULL;
|
@ -1,5 +1,5 @@
|
||||
#ifndef ZT1_jniutils_h_
|
||||
#define ZT1_jniutils_h_
|
||||
#ifndef ZT_jniutils_h_
|
||||
#define ZT_jniutils_h_
|
||||
#include <stdio.h>
|
||||
#include <jni.h>
|
||||
#include <ZeroTierOne.h>
|
||||
@ -23,22 +23,22 @@ extern "C" {
|
||||
#define LOGE(...) fprintf(stdout, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
jobject createResultObject(JNIEnv *env, ZT1_ResultCode code);
|
||||
jobject createVirtualNetworkStatus(JNIEnv *env, ZT1_VirtualNetworkStatus status);
|
||||
jobject createVirtualNetworkType(JNIEnv *env, ZT1_VirtualNetworkType type);
|
||||
jobject createEvent(JNIEnv *env, ZT1_Event event);
|
||||
jobject createPeerRole(JNIEnv *env, ZT1_PeerRole role);
|
||||
jobject createVirtualNetworkConfigOperation(JNIEnv *env, ZT1_VirtualNetworkConfigOperation op);
|
||||
jobject createResultObject(JNIEnv *env, ZT_ResultCode code);
|
||||
jobject createVirtualNetworkStatus(JNIEnv *env, ZT_VirtualNetworkStatus status);
|
||||
jobject createVirtualNetworkType(JNIEnv *env, ZT_VirtualNetworkType type);
|
||||
jobject createEvent(JNIEnv *env, ZT_Event event);
|
||||
jobject createPeerRole(JNIEnv *env, ZT_PeerRole role);
|
||||
jobject createVirtualNetworkConfigOperation(JNIEnv *env, ZT_VirtualNetworkConfigOperation op);
|
||||
|
||||
jobject newInetSocketAddress(JNIEnv *env, const sockaddr_storage &addr);
|
||||
jobject newInetAddress(JNIEnv *env, const sockaddr_storage &addr);
|
||||
|
||||
jobject newMulticastGroup(JNIEnv *env, const ZT1_MulticastGroup &mc);
|
||||
jobject newMulticastGroup(JNIEnv *env, const ZT_MulticastGroup &mc);
|
||||
|
||||
jobject newPeer(JNIEnv *env, const ZT1_Peer &peer);
|
||||
jobject newPeerPhysicalPath(JNIEnv *env, const ZT1_PeerPhysicalPath &ppp);
|
||||
jobject newPeer(JNIEnv *env, const ZT_Peer &peer);
|
||||
jobject newPeerPhysicalPath(JNIEnv *env, const ZT_PeerPhysicalPath &ppp);
|
||||
|
||||
jobject newNetworkConfig(JNIEnv *env, const ZT1_VirtualNetworkConfig &config);
|
||||
jobject newNetworkConfig(JNIEnv *env, const ZT_VirtualNetworkConfig &config);
|
||||
|
||||
jobject newVersion(JNIEnv *env, int major, int minor, int rev, long featureFlags);
|
||||
|
@ -26,8 +26,8 @@
|
||||
*/
|
||||
|
||||
#include "com_zerotierone_sdk_Node.h"
|
||||
#include "ZT1_jniutils.h"
|
||||
#include "ZT1_jnilookup.h"
|
||||
#include "ZT_jniutils.h"
|
||||
#include "ZT_jnilookup.h"
|
||||
|
||||
#include <ZeroTierOne.h>
|
||||
#include "Mutex.hpp"
|
||||
@ -75,7 +75,7 @@ namespace {
|
||||
|
||||
JavaVM *jvm;
|
||||
|
||||
ZT1_Node *node;
|
||||
ZT_Node *node;
|
||||
|
||||
jobject dataStoreGetListener;
|
||||
jobject dataStorePutListener;
|
||||
@ -87,11 +87,11 @@ namespace {
|
||||
|
||||
|
||||
int VirtualNetworkConfigFunctionCallback(
|
||||
ZT1_Node *node,
|
||||
ZT_Node *node,
|
||||
void *userData,
|
||||
uint64_t nwid,
|
||||
enum ZT1_VirtualNetworkConfigOperation operation,
|
||||
const ZT1_VirtualNetworkConfig *config)
|
||||
enum ZT_VirtualNetworkConfigOperation operation,
|
||||
const ZT_VirtualNetworkConfig *config)
|
||||
{
|
||||
LOGV("VritualNetworkConfigFunctionCallback");
|
||||
JniRef *ref = (JniRef*)userData;
|
||||
@ -134,7 +134,7 @@ namespace {
|
||||
(jlong)nwid, operationObject, networkConfigObject);
|
||||
}
|
||||
|
||||
void VirtualNetworkFrameFunctionCallback(ZT1_Node *node,void *userData,
|
||||
void VirtualNetworkFrameFunctionCallback(ZT_Node *node,void *userData,
|
||||
uint64_t nwid,
|
||||
uint64_t sourceMac,
|
||||
uint64_t destMac,
|
||||
@ -189,11 +189,11 @@ namespace {
|
||||
}
|
||||
|
||||
|
||||
void EventCallback(ZT1_Node *node,void *userData,enum ZT1_Event event, const void *data)
|
||||
void EventCallback(ZT_Node *node,void *userData,enum ZT_Event event, const void *data)
|
||||
{
|
||||
LOGV("EventCallback");
|
||||
JniRef *ref = (JniRef*)userData;
|
||||
if(ref->node != node && event != ZT1_EVENT_UP)
|
||||
if(ref->node != node && event != ZT_EVENT_UP)
|
||||
{
|
||||
LOGE("Nodes not equal. ref->node %p, node %p. Event: %d", ref->node, node, event);
|
||||
return;
|
||||
@ -252,18 +252,18 @@ namespace {
|
||||
|
||||
switch(event)
|
||||
{
|
||||
case ZT1_EVENT_UP:
|
||||
case ZT1_EVENT_OFFLINE:
|
||||
case ZT1_EVENT_ONLINE:
|
||||
case ZT1_EVENT_DOWN:
|
||||
case ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION:
|
||||
case ZT_EVENT_UP:
|
||||
case ZT_EVENT_OFFLINE:
|
||||
case ZT_EVENT_ONLINE:
|
||||
case ZT_EVENT_DOWN:
|
||||
case ZT_EVENT_FATAL_ERROR_IDENTITY_COLLISION:
|
||||
{
|
||||
LOGV("Regular Event");
|
||||
// call onEvent()
|
||||
env->CallVoidMethod(ref->eventListener, onEventMethod, eventObject);
|
||||
}
|
||||
break;
|
||||
case ZT1_EVENT_SAW_MORE_RECENT_VERSION:
|
||||
case ZT_EVENT_SAW_MORE_RECENT_VERSION:
|
||||
{
|
||||
LOGV("Version Event");
|
||||
// call onOutOfDate()
|
||||
@ -275,8 +275,8 @@ namespace {
|
||||
}
|
||||
}
|
||||
break;
|
||||
case ZT1_EVENT_AUTHENTICATION_FAILURE:
|
||||
case ZT1_EVENT_INVALID_PACKET:
|
||||
case ZT_EVENT_AUTHENTICATION_FAILURE:
|
||||
case ZT_EVENT_INVALID_PACKET:
|
||||
{
|
||||
LOGV("Network Error Event");
|
||||
// call onNetworkError()
|
||||
@ -288,7 +288,7 @@ namespace {
|
||||
}
|
||||
}
|
||||
break;
|
||||
case ZT1_EVENT_TRACE:
|
||||
case ZT_EVENT_TRACE:
|
||||
{
|
||||
LOGV("Trace Event");
|
||||
// call onTrace()
|
||||
@ -303,7 +303,7 @@ namespace {
|
||||
}
|
||||
}
|
||||
|
||||
long DataStoreGetFunction(ZT1_Node *node,void *userData,
|
||||
long DataStoreGetFunction(ZT_Node *node,void *userData,
|
||||
const char *objectName,
|
||||
void *buffer,
|
||||
unsigned long bufferSize,
|
||||
@ -375,7 +375,7 @@ namespace {
|
||||
return retval;
|
||||
}
|
||||
|
||||
int DataStorePutFunction(ZT1_Node *node,void *userData,
|
||||
int DataStorePutFunction(ZT_Node *node,void *userData,
|
||||
const char *objectName,
|
||||
const void *buffer,
|
||||
unsigned long bufferSize,
|
||||
@ -440,12 +440,13 @@ namespace {
|
||||
}
|
||||
}
|
||||
|
||||
int WirePacketSendFunction(ZT1_Node *node,void *userData,\
|
||||
const struct sockaddr_storage *address,
|
||||
int WirePacketSendFunction(ZT_Node *node,void *userData,\
|
||||
const struct sockaddr_storage *localAddress,
|
||||
const struct sockaddr_storage *remoteAddress,
|
||||
const void *buffer,
|
||||
unsigned int bufferSize)
|
||||
{
|
||||
LOGV("WirePacketSendFunction(%p, %p, %d)", address, buffer, bufferSize);
|
||||
LOGV("WirePacketSendFunction(%p, %p, %p, %d)", localAddress, remoteAddress, buffer, bufferSize);
|
||||
JniRef *ref = (JniRef*)userData;
|
||||
assert(ref->node == node);
|
||||
|
||||
@ -468,10 +469,11 @@ namespace {
|
||||
return -2;
|
||||
}
|
||||
|
||||
jobject addressObj = newInetSocketAddress(env, *address);
|
||||
jobject localAddressObj = newInetSocketAddress(env, *localAddress);
|
||||
jobject remoteAddressObj = newInetSocketAddress(env, *remoteAddress);
|
||||
jbyteArray bufferObj = env->NewByteArray(bufferSize);
|
||||
env->SetByteArrayRegion(bufferObj, 0, bufferSize, (jbyte*)buffer);
|
||||
int retval = env->CallIntMethod(ref->packetSender, packetSenderCallbackMethod, addressObj, bufferObj);
|
||||
int retval = env->CallIntMethod(ref->packetSender, packetSenderCallbackMethod, localAddressObj, remoteAddressObj, bufferObj);
|
||||
|
||||
LOGV("JNI Packet Sender returned: %d", retval);
|
||||
return retval;
|
||||
@ -481,7 +483,7 @@ namespace {
|
||||
static NodeMap nodeMap;
|
||||
ZeroTier::Mutex nodeMapMutex;
|
||||
|
||||
ZT1_Node* findNode(uint64_t nodeId)
|
||||
ZT_Node* findNode(uint64_t nodeId)
|
||||
{
|
||||
ZeroTier::Mutex::Lock lock(nodeMapMutex);
|
||||
NodeMap::iterator found = nodeMap.find(nodeId);
|
||||
@ -514,10 +516,10 @@ JNIEXPORT void JNICALL JNI_OnUnload(JavaVM *vm, void *reserved)
|
||||
JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_node_1init(
|
||||
JNIEnv *env, jobject obj, jlong now)
|
||||
{
|
||||
LOGV("Creating ZT1_Node struct");
|
||||
jobject resultObject = createResultObject(env, ZT1_RESULT_OK);
|
||||
LOGV("Creating ZT_Node struct");
|
||||
jobject resultObject = createResultObject(env, ZT_RESULT_OK);
|
||||
|
||||
ZT1_Node *node;
|
||||
ZT_Node *node;
|
||||
JniRef *ref = new JniRef;
|
||||
ref->id = (uint64_t)now;
|
||||
env->GetJavaVM(&ref->jvm);
|
||||
@ -609,7 +611,7 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_node_1init(
|
||||
}
|
||||
ref->eventListener = env->NewGlobalRef(tmp);
|
||||
|
||||
ZT1_ResultCode rc = ZT1_Node_new(
|
||||
ZT_ResultCode rc = ZT_Node_new(
|
||||
&node,
|
||||
ref,
|
||||
(uint64_t)now,
|
||||
@ -618,17 +620,16 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_node_1init(
|
||||
&WirePacketSendFunction,
|
||||
&VirtualNetworkFrameFunctionCallback,
|
||||
&VirtualNetworkConfigFunctionCallback,
|
||||
&EventCallback);
|
||||
&EventCallback,
|
||||
NULL);
|
||||
|
||||
LOGI("Node Created.");
|
||||
|
||||
if(rc != ZT1_RESULT_OK)
|
||||
if(rc != ZT_RESULT_OK)
|
||||
{
|
||||
LOGE("Error creating Node: %d", rc);
|
||||
resultObject = createResultObject(env, rc);
|
||||
if(node)
|
||||
{
|
||||
ZT1_Node_delete(node);
|
||||
ZT_Node_delete(node);
|
||||
node = NULL;
|
||||
}
|
||||
delete ref;
|
||||
@ -652,7 +653,7 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_node_1init(
|
||||
JNIEXPORT void JNICALL Java_com_zerotier_sdk_Node_node_1delete(
|
||||
JNIEnv *env, jobject obj, jlong id)
|
||||
{
|
||||
LOGV("Destroying ZT1_Node struct");
|
||||
LOGV("Destroying ZT_Node struct");
|
||||
uint64_t nodeId = (uint64_t)id;
|
||||
|
||||
NodeMap::iterator found;
|
||||
@ -666,7 +667,7 @@ JNIEXPORT void JNICALL Java_com_zerotier_sdk_Node_node_1delete(
|
||||
JniRef *ref = found->second;
|
||||
nodeMap.erase(found);
|
||||
|
||||
ZT1_Node_delete(ref->node);
|
||||
ZT_Node_delete(ref->node);
|
||||
|
||||
delete ref;
|
||||
ref = NULL;
|
||||
@ -696,18 +697,18 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processVirtualNetworkFrame(
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
unsigned int nbtd_len = env->GetArrayLength(out_nextBackgroundTaskDeadline);
|
||||
if(nbtd_len < 1)
|
||||
{
|
||||
// array for next background task length has 0 elements!
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
uint64_t now = (uint64_t)in_now;
|
||||
@ -725,7 +726,7 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processVirtualNetworkFrame(
|
||||
|
||||
uint64_t nextBackgroundTaskDeadline = 0;
|
||||
|
||||
ZT1_ResultCode rc = ZT1_Node_processVirtualNetworkFrame(
|
||||
ZT_ResultCode rc = ZT_Node_processVirtualNetworkFrame(
|
||||
node,
|
||||
now,
|
||||
nwid,
|
||||
@ -753,24 +754,25 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket(
|
||||
JNIEnv *env, jobject obj,
|
||||
jlong id,
|
||||
jlong in_now,
|
||||
jobject in_localAddress,
|
||||
jobject in_remoteAddress,
|
||||
jbyteArray in_packetData,
|
||||
jlongArray out_nextBackgroundTaskDeadline)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
LOGE("Couldn't find a valid node!");
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
unsigned int nbtd_len = env->GetArrayLength(out_nextBackgroundTaskDeadline);
|
||||
if(nbtd_len < 1)
|
||||
{
|
||||
LOGE("nbtd_len < 1");
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
uint64_t now = (uint64_t)in_now;
|
||||
@ -781,7 +783,7 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket(
|
||||
{
|
||||
LOGE("Can't find InetAddress class");
|
||||
// can't find java.net.InetAddress
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
jmethodID getAddressMethod = lookup.findMethod(
|
||||
@ -789,23 +791,29 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket(
|
||||
if(getAddressMethod == NULL)
|
||||
{
|
||||
// cant find InetAddress.getAddres()
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
jclass InetSocketAddressClass = lookup.findClass("java/net/InetSocketAddress");
|
||||
if(InetSocketAddressClass == NULL)
|
||||
{
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
jmethodID inetSockGetAddressMethod = lookup.findMethod(
|
||||
InetSocketAddressClass, "getAddress", "()Ljava/net/InetAddress;");
|
||||
|
||||
jobject addrObject = env->CallObjectMethod(in_remoteAddress, inetSockGetAddressMethod);
|
||||
|
||||
if(addrObject == NULL)
|
||||
jobject localAddrObj = NULL;
|
||||
if(in_localAddress != NULL)
|
||||
{
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
localAddrObj = env->CallObjectMethod(in_localAddress, inetSockGetAddressMethod);
|
||||
}
|
||||
|
||||
jobject remoteAddrObject = env->CallObjectMethod(in_remoteAddress, inetSockGetAddressMethod);
|
||||
|
||||
if(remoteAddrObject == NULL)
|
||||
{
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
jmethodID inetSock_getPort = lookup.findMethod(
|
||||
@ -814,30 +822,72 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket(
|
||||
if(env->ExceptionCheck() || inetSock_getPort == NULL)
|
||||
{
|
||||
LOGE("Couldn't find getPort method on InetSocketAddress");
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
// call InetSocketAddress.getPort()
|
||||
int port = env->CallIntMethod(in_remoteAddress, inetSock_getPort);
|
||||
int remotePort = env->CallIntMethod(in_remoteAddress, inetSock_getPort);
|
||||
if(env->ExceptionCheck())
|
||||
{
|
||||
LOGE("Exception calling InetSocketAddress.getPort()");
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
// Call InetAddress.getAddress()
|
||||
jbyteArray addressArray = (jbyteArray)env->CallObjectMethod(addrObject, getAddressMethod);
|
||||
if(addressArray == NULL)
|
||||
jbyteArray remoteAddressArray = (jbyteArray)env->CallObjectMethod(remoteAddrObject, getAddressMethod);
|
||||
if(remoteAddressArray == NULL)
|
||||
{
|
||||
LOGE("Unable to call getAddress()");
|
||||
// unable to call getAddress()
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
unsigned int addrSize = env->GetArrayLength(addressArray);
|
||||
// get the address bytes
|
||||
jbyte *addr = (jbyte*)env->GetPrimitiveArrayCritical(addressArray, NULL);
|
||||
unsigned int addrSize = env->GetArrayLength(remoteAddressArray);
|
||||
|
||||
|
||||
sockaddr_storage localAddress = {};
|
||||
|
||||
if(localAddrObj == NULL)
|
||||
{
|
||||
localAddress = ZT_SOCKADDR_NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
int localPort = env->CallIntMethod(in_localAddress, inetSock_getPort);
|
||||
jbyteArray localAddressArray = (jbyteArray)env->CallObjectMethod(localAddrObj, getAddressMethod);
|
||||
if(localAddressArray != NULL)
|
||||
{
|
||||
|
||||
unsigned int localAddrSize = env->GetArrayLength(localAddressArray);
|
||||
jbyte *addr = (jbyte*)env->GetPrimitiveArrayCritical(localAddressArray, NULL);
|
||||
|
||||
if(localAddrSize == 16)
|
||||
{
|
||||
sockaddr_in6 ipv6 = {};
|
||||
ipv6.sin6_family = AF_INET6;
|
||||
ipv6.sin6_port = htons(localPort);
|
||||
memcpy(ipv6.sin6_addr.s6_addr, addr, 16);
|
||||
memcpy(&localAddress, &ipv6, sizeof(sockaddr_in6));
|
||||
}
|
||||
else if(localAddrSize)
|
||||
{
|
||||
// IPV4 address
|
||||
sockaddr_in ipv4 = {};
|
||||
ipv4.sin_family = AF_INET;
|
||||
ipv4.sin_port = htons(localPort);
|
||||
memcpy(&ipv4.sin_addr, addr, 4);
|
||||
memcpy(&localAddress, &ipv4, sizeof(sockaddr_in));
|
||||
}
|
||||
else
|
||||
{
|
||||
localAddress = ZT_SOCKADDR_NULL;
|
||||
}
|
||||
env->ReleasePrimitiveArrayCritical(localAddressArray, addr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// get the address bytes
|
||||
jbyte *addr = (jbyte*)env->GetPrimitiveArrayCritical(remoteAddressArray, NULL);
|
||||
sockaddr_storage remoteAddress = {};
|
||||
|
||||
if(addrSize == 16)
|
||||
@ -845,7 +895,7 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket(
|
||||
// IPV6 address
|
||||
sockaddr_in6 ipv6 = {};
|
||||
ipv6.sin6_family = AF_INET6;
|
||||
ipv6.sin6_port = htons(port);
|
||||
ipv6.sin6_port = htons(remotePort);
|
||||
memcpy(ipv6.sin6_addr.s6_addr, addr, 16);
|
||||
memcpy(&remoteAddress, &ipv6, sizeof(sockaddr_in6));
|
||||
}
|
||||
@ -854,7 +904,7 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket(
|
||||
// IPV4 address
|
||||
sockaddr_in ipv4 = {};
|
||||
ipv4.sin_family = AF_INET;
|
||||
ipv4.sin_port = htons(port);
|
||||
ipv4.sin_port = htons(remotePort);
|
||||
memcpy(&ipv4.sin_addr, addr, 4);
|
||||
memcpy(&remoteAddress, &ipv4, sizeof(sockaddr_in));
|
||||
}
|
||||
@ -862,16 +912,16 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket(
|
||||
{
|
||||
LOGE("Unknown IP version");
|
||||
// unknown address type
|
||||
env->ReleasePrimitiveArrayCritical(addressArray, addr, 0);
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
env->ReleasePrimitiveArrayCritical(remoteAddressArray, addr, 0);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
env->ReleasePrimitiveArrayCritical(addressArray, addr, 0);
|
||||
env->ReleasePrimitiveArrayCritical(remoteAddressArray, addr, 0);
|
||||
|
||||
unsigned int packetLength = env->GetArrayLength(in_packetData);
|
||||
if(packetLength == 0)
|
||||
{
|
||||
LOGE("Empty packet?!?");
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
void *packetData = env->GetPrimitiveArrayCritical(in_packetData, NULL);
|
||||
void *localData = malloc(packetLength);
|
||||
@ -880,16 +930,17 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket(
|
||||
|
||||
uint64_t nextBackgroundTaskDeadline = 0;
|
||||
|
||||
ZT1_ResultCode rc = ZT1_Node_processWirePacket(
|
||||
ZT_ResultCode rc = ZT_Node_processWirePacket(
|
||||
node,
|
||||
now,
|
||||
&localAddress,
|
||||
&remoteAddress,
|
||||
localData,
|
||||
packetLength,
|
||||
&nextBackgroundTaskDeadline);
|
||||
if(rc != ZT1_RESULT_OK)
|
||||
if(rc != ZT_RESULT_OK)
|
||||
{
|
||||
LOGE("ZT1_Node_processWirePacket returned: %d", rc);
|
||||
LOGE("ZT_Node_processWirePacket returned: %d", rc);
|
||||
}
|
||||
|
||||
free(localData);
|
||||
@ -913,23 +964,23 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processBackgroundTasks(
|
||||
jlongArray out_nextBackgroundTaskDeadline)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
unsigned int nbtd_len = env->GetArrayLength(out_nextBackgroundTaskDeadline);
|
||||
if(nbtd_len < 1)
|
||||
{
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
uint64_t now = (uint64_t)in_now;
|
||||
uint64_t nextBackgroundTaskDeadline = 0;
|
||||
|
||||
ZT1_ResultCode rc = ZT1_Node_processBackgroundTasks(node, now, &nextBackgroundTaskDeadline);
|
||||
ZT_ResultCode rc = ZT_Node_processBackgroundTasks(node, now, &nextBackgroundTaskDeadline);
|
||||
|
||||
jlong *outDeadline = (jlong*)env->GetPrimitiveArrayCritical(out_nextBackgroundTaskDeadline, NULL);
|
||||
outDeadline[0] = (jlong)nextBackgroundTaskDeadline;
|
||||
@ -947,16 +998,16 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_join(
|
||||
JNIEnv *env, jobject obj, jlong id, jlong in_nwid)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
uint64_t nwid = (uint64_t)in_nwid;
|
||||
|
||||
ZT1_ResultCode rc = ZT1_Node_join(node, nwid);
|
||||
ZT_ResultCode rc = ZT_Node_join(node, nwid);
|
||||
|
||||
return createResultObject(env, rc);
|
||||
}
|
||||
@ -970,16 +1021,16 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_leave(
|
||||
JNIEnv *env, jobject obj, jlong id, jlong in_nwid)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
uint64_t nwid = (uint64_t)in_nwid;
|
||||
|
||||
ZT1_ResultCode rc = ZT1_Node_leave(node, nwid);
|
||||
ZT_ResultCode rc = ZT_Node_leave(node, nwid);
|
||||
|
||||
return createResultObject(env, rc);
|
||||
}
|
||||
@ -997,18 +1048,18 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_multicastSubscribe(
|
||||
jlong in_multicastAdi)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
uint64_t nwid = (uint64_t)in_nwid;
|
||||
uint64_t multicastGroup = (uint64_t)in_multicastGroup;
|
||||
unsigned long multicastAdi = (unsigned long)in_multicastAdi;
|
||||
|
||||
ZT1_ResultCode rc = ZT1_Node_multicastSubscribe(
|
||||
ZT_ResultCode rc = ZT_Node_multicastSubscribe(
|
||||
node, nwid, multicastGroup, multicastAdi);
|
||||
|
||||
return createResultObject(env, rc);
|
||||
@ -1027,18 +1078,18 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_multicastUnsubscribe(
|
||||
jlong in_multicastAdi)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return createResultObject(env, ZT1_RESULT_FATAL_ERROR_INTERNAL);
|
||||
return createResultObject(env, ZT_RESULT_FATAL_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
uint64_t nwid = (uint64_t)in_nwid;
|
||||
uint64_t multicastGroup = (uint64_t)in_multicastGroup;
|
||||
unsigned long multicastAdi = (unsigned long)in_multicastAdi;
|
||||
|
||||
ZT1_ResultCode rc = ZT1_Node_multicastUnsubscribe(
|
||||
ZT_ResultCode rc = ZT_Node_multicastUnsubscribe(
|
||||
node, nwid, multicastGroup, multicastAdi);
|
||||
|
||||
return createResultObject(env, rc);
|
||||
@ -1053,14 +1104,14 @@ JNIEXPORT jlong JNICALL Java_com_zerotier_sdk_Node_address(
|
||||
JNIEnv *env , jobject obj, jlong id)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t address = ZT1_Node_address(node);
|
||||
uint64_t address = ZT_Node_address(node);
|
||||
return (jlong)address;
|
||||
}
|
||||
|
||||
@ -1073,7 +1124,7 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_status
|
||||
(JNIEnv *env, jobject obj, jlong id)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
@ -1103,8 +1154,8 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_status
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ZT1_NodeStatus nodeStatus;
|
||||
ZT1_Node_status(node, &nodeStatus);
|
||||
ZT_NodeStatus nodeStatus;
|
||||
ZT_Node_status(node, &nodeStatus);
|
||||
|
||||
jfieldID addressField = NULL;
|
||||
jfieldID publicIdentityField = NULL;
|
||||
@ -1165,18 +1216,18 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_networkConfig(
|
||||
JNIEnv *env, jobject obj, jlong id, jlong nwid)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZT1_VirtualNetworkConfig *vnetConfig = ZT1_Node_networkConfig(node, nwid);
|
||||
ZT_VirtualNetworkConfig *vnetConfig = ZT_Node_networkConfig(node, nwid);
|
||||
|
||||
jobject vnetConfigObject = newNetworkConfig(env, *vnetConfig);
|
||||
|
||||
ZT1_Node_freeQueryResult(node, vnetConfig);
|
||||
ZT_Node_freeQueryResult(node, vnetConfig);
|
||||
|
||||
return vnetConfigObject;
|
||||
}
|
||||
@ -1194,7 +1245,7 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_version(
|
||||
int revision = 0;
|
||||
unsigned long featureFlags = 0;
|
||||
|
||||
ZT1_version(&major, &minor, &revision, &featureFlags);
|
||||
ZT_version(&major, &minor, &revision, &featureFlags);
|
||||
|
||||
return newVersion(env, major, minor, revision, featureFlags);
|
||||
}
|
||||
@ -1208,18 +1259,18 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_peers(
|
||||
JNIEnv *env, jobject obj, jlong id)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZT1_PeerList *peerList = ZT1_Node_peers(node);
|
||||
ZT_PeerList *peerList = ZT_Node_peers(node);
|
||||
|
||||
if(peerList == NULL)
|
||||
{
|
||||
LOGE("ZT1_Node_peers returned NULL");
|
||||
LOGE("ZT_Node_peers returned NULL");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1228,7 +1279,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_peers(
|
||||
if(env->EnsureLocalCapacity(peerCount))
|
||||
{
|
||||
LOGE("EnsureLocalCapacity failed!!");
|
||||
ZT1_Node_freeQueryResult(node, peerList);
|
||||
ZT_Node_freeQueryResult(node, peerList);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1236,7 +1287,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_peers(
|
||||
if(env->ExceptionCheck() || peerClass == NULL)
|
||||
{
|
||||
LOGE("Error finding Peer class");
|
||||
ZT1_Node_freeQueryResult(node, peerList);
|
||||
ZT_Node_freeQueryResult(node, peerList);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1246,7 +1297,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_peers(
|
||||
if(env->ExceptionCheck() || peerArrayObj == NULL)
|
||||
{
|
||||
LOGE("Error creating Peer[] array");
|
||||
ZT1_Node_freeQueryResult(node, peerList);
|
||||
ZT_Node_freeQueryResult(node, peerList);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1262,7 +1313,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_peers(
|
||||
}
|
||||
}
|
||||
|
||||
ZT1_Node_freeQueryResult(node, peerList);
|
||||
ZT_Node_freeQueryResult(node, peerList);
|
||||
peerList = NULL;
|
||||
|
||||
return peerArrayObj;
|
||||
@ -1277,14 +1328,14 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_networks(
|
||||
JNIEnv *env, jobject obj, jlong id)
|
||||
{
|
||||
uint64_t nodeId = (uint64_t) id;
|
||||
ZT1_Node *node = findNode(nodeId);
|
||||
ZT_Node *node = findNode(nodeId);
|
||||
if(node == NULL)
|
||||
{
|
||||
// cannot find valid node. We should never get here.
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZT1_VirtualNetworkList *networkList = ZT1_Node_networks(node);
|
||||
ZT_VirtualNetworkList *networkList = ZT_Node_networks(node);
|
||||
if(networkList == NULL)
|
||||
{
|
||||
return NULL;
|
||||
@ -1294,7 +1345,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_networks(
|
||||
if(env->ExceptionCheck() || vnetConfigClass == NULL)
|
||||
{
|
||||
LOGE("Error finding VirtualNetworkConfig class");
|
||||
ZT1_Node_freeQueryResult(node, networkList);
|
||||
ZT_Node_freeQueryResult(node, networkList);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1303,7 +1354,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_networks(
|
||||
if(env->ExceptionCheck() || networkListObject == NULL)
|
||||
{
|
||||
LOGE("Error creating VirtualNetworkConfig[] array");
|
||||
ZT1_Node_freeQueryResult(node, networkList);
|
||||
ZT_Node_freeQueryResult(node, networkList);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1318,7 +1369,7 @@ JNIEXPORT jobjectArray JNICALL Java_com_zerotier_sdk_Node_networks(
|
||||
}
|
||||
}
|
||||
|
||||
ZT1_Node_freeQueryResult(node, networkList);
|
||||
ZT_Node_freeQueryResult(node, networkList);
|
||||
|
||||
return networkListObject;
|
||||
}
|
||||
|
@ -34,10 +34,10 @@ JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processVirtualNetworkFrame
|
||||
/*
|
||||
* Class: com_zerotier_sdk_Node
|
||||
* Method: processWirePacket
|
||||
* Signature: (JJLjava/net/InetSockAddress;[B[J)Lcom/zerotier/sdk/ResultCode;
|
||||
* Signature: (JJLjava/net/InetSockAddress;Ljava/net/InetSockAddress;[B[J)Lcom/zerotier/sdk/ResultCode;
|
||||
*/
|
||||
JNIEXPORT jobject JNICALL Java_com_zerotier_sdk_Node_processWirePacket
|
||||
(JNIEnv *, jobject, jlong, jlong, jobject, jbyteArray, jlongArray);
|
||||
(JNIEnv *, jobject, jlong, jlong, jobject, jobject, jbyteArray, jlongArray);
|
||||
|
||||
/*
|
||||
* Class: com_zerotier_sdk_Node
|
||||
|
@ -169,11 +169,12 @@ public class Node {
|
||||
*/
|
||||
public ResultCode processWirePacket(
|
||||
long now,
|
||||
InetSocketAddress localAddress,
|
||||
InetSocketAddress remoteAddress,
|
||||
byte[] packetData,
|
||||
long[] nextBackgroundTaskDeadline) {
|
||||
return processWirePacket(
|
||||
nodeId, now, remoteAddress, packetData,
|
||||
nodeId, now, localAddress, remoteAddress, packetData,
|
||||
nextBackgroundTaskDeadline);
|
||||
}
|
||||
|
||||
@ -393,6 +394,7 @@ public class Node {
|
||||
private native ResultCode processWirePacket(
|
||||
long nodeId,
|
||||
long now,
|
||||
InetSocketAddress localAddress,
|
||||
InetSocketAddress remoteAddress,
|
||||
byte[] packetData,
|
||||
long[] nextBackgroundTaskDeadline);
|
||||
|
@ -35,7 +35,7 @@ import java.net.InetSocketAddress;
|
||||
|
||||
public final class VirtualNetworkConfig implements Comparable<VirtualNetworkConfig> {
|
||||
public static final int MAX_MULTICAST_SUBSCRIPTIONS = 4096;
|
||||
public static final int ZT1_MAX_ZT_ASSIGNED_ADDRESSES = 16;
|
||||
public static final int ZT_MAX_ZT_ASSIGNED_ADDRESSES = 16;
|
||||
|
||||
private long nwid;
|
||||
private long mac;
|
||||
|
@ -166,6 +166,15 @@ public:
|
||||
return _a;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Hash code for use with Hashtable
|
||||
*/
|
||||
inline unsigned long hashCode() const
|
||||
throw()
|
||||
{
|
||||
return (unsigned long)_a;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Hexadecimal string
|
||||
*/
|
||||
@ -197,11 +206,11 @@ public:
|
||||
|
||||
/**
|
||||
* Check if this address is reserved
|
||||
*
|
||||
*
|
||||
* The all-zero null address and any address beginning with 0xff are
|
||||
* reserved. (0xff is reserved for future use to designate possibly
|
||||
* longer addresses, addresses based on IPv6 innards, etc.)
|
||||
*
|
||||
*
|
||||
* @return True if address is reserved and may not be used
|
||||
*/
|
||||
inline bool isReserved() const
|
||||
@ -230,4 +239,3 @@ private:
|
||||
} // namespace ZeroTier
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -161,7 +161,7 @@
|
||||
/**
|
||||
* Default MTU used for Ethernet tap device
|
||||
*/
|
||||
#define ZT_IF_MTU ZT1_MAX_MTU
|
||||
#define ZT_IF_MTU ZT_MAX_MTU
|
||||
|
||||
/**
|
||||
* Maximum number of packet fragments we'll support
|
||||
|
@ -75,7 +75,7 @@ static inline std::map< Address,Identity > _mkRootTopologyAuth()
|
||||
Defaults::Defaults() :
|
||||
defaultRootTopology((const char *)ZT_DEFAULT_ROOT_TOPOLOGY,ZT_DEFAULT_ROOT_TOPOLOGY_LEN),
|
||||
rootTopologyAuthorities(_mkRootTopologyAuth()),
|
||||
v4Broadcast(((uint32_t)0xffffffff),ZT1_DEFAULT_PORT)
|
||||
v4Broadcast(((uint32_t)0xffffffff),ZT_DEFAULT_PORT)
|
||||
{
|
||||
}
|
||||
|
||||
|
419
node/Hashtable.hpp
Normal file
419
node/Hashtable.hpp
Normal file
@ -0,0 +1,419 @@
|
||||
/*
|
||||
* ZeroTier One - Network Virtualization Everywhere
|
||||
* Copyright (C) 2011-2015 ZeroTier, Inc.
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* --
|
||||
*
|
||||
* ZeroTier may be used and distributed under the terms of the GPLv3, which
|
||||
* are available at: http://www.gnu.org/licenses/gpl-3.0.html
|
||||
*/
|
||||
|
||||
#ifndef ZT_HASHTABLE_HPP
|
||||
#define ZT_HASHTABLE_HPP
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <algorithm>
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
/**
|
||||
* A minimal hash table implementation for the ZeroTier core
|
||||
*
|
||||
* This is not a drop-in replacement for STL containers, and has several
|
||||
* limitations. Keys can be uint64_t or an object, and if the latter they
|
||||
* must implement a method called hashCode() that returns an unsigned long
|
||||
* value that is evenly distributed.
|
||||
*/
|
||||
template<typename K,typename V>
|
||||
class Hashtable
|
||||
{
|
||||
private:
|
||||
struct _Bucket
|
||||
{
|
||||
_Bucket(const K &k,const V &v) : k(k),v(v) {}
|
||||
_Bucket(const K &k) : k(k),v() {}
|
||||
_Bucket(const _Bucket &b) : k(b.k),v(b.v) {}
|
||||
inline _Bucket &operator=(const _Bucket &b) { k = b.k; v = b.v; return *this; }
|
||||
K k;
|
||||
V v;
|
||||
_Bucket *next; // must be set manually for each _Bucket
|
||||
};
|
||||
|
||||
public:
|
||||
/**
|
||||
* A simple forward iterator (different from STL)
|
||||
*
|
||||
* It's safe to erase the last key, but not others. Don't use set() since that
|
||||
* may rehash and invalidate the iterator. Note the erasing the key will destroy
|
||||
* the targets of the pointers returned by next().
|
||||
*/
|
||||
class Iterator
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @param ht Hash table to iterate over
|
||||
*/
|
||||
Iterator(Hashtable &ht) :
|
||||
_idx(0),
|
||||
_ht(&ht),
|
||||
_b(ht._t[0])
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @param kptr Pointer to set to point to next key
|
||||
* @param vptr Pointer to set to point to next value
|
||||
* @return True if kptr and vptr are set, false if no more entries
|
||||
*/
|
||||
inline bool next(K *&kptr,V *&vptr)
|
||||
{
|
||||
for(;;) {
|
||||
if (_b) {
|
||||
kptr = &(_b->k);
|
||||
vptr = &(_b->v);
|
||||
_b = _b->next;
|
||||
return true;
|
||||
}
|
||||
++_idx;
|
||||
if (_idx >= _ht->_bc)
|
||||
return false;
|
||||
_b = _ht->_t[_idx];
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned long _idx;
|
||||
Hashtable *_ht;
|
||||
Hashtable::_Bucket *_b;
|
||||
};
|
||||
friend class Hashtable::Iterator;
|
||||
|
||||
/**
|
||||
* @param bc Initial capacity in buckets (default: 128, must be nonzero)
|
||||
*/
|
||||
Hashtable(unsigned long bc = 128) :
|
||||
_t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * bc))),
|
||||
_bc(bc),
|
||||
_s(0)
|
||||
{
|
||||
if (!_t)
|
||||
throw std::bad_alloc();
|
||||
for(unsigned long i=0;i<bc;++i)
|
||||
_t[i] = (_Bucket *)0;
|
||||
}
|
||||
|
||||
Hashtable(const Hashtable<K,V> &ht) :
|
||||
_t(reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * ht._bc))),
|
||||
_bc(ht._bc),
|
||||
_s(ht._s)
|
||||
{
|
||||
if (!_t)
|
||||
throw std::bad_alloc();
|
||||
for(unsigned long i=0;i<_bc;++i)
|
||||
_t[i] = (_Bucket *)0;
|
||||
for(unsigned long i=0;i<_bc;++i) {
|
||||
const _Bucket *b = ht._t[i];
|
||||
while (b) {
|
||||
_Bucket *nb = new _Bucket(*b);
|
||||
nb->next = _t[i];
|
||||
_t[i] = nb;
|
||||
b = b->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~Hashtable()
|
||||
{
|
||||
this->clear();
|
||||
::free(_t);
|
||||
}
|
||||
|
||||
inline Hashtable &operator=(const Hashtable<K,V> &ht)
|
||||
{
|
||||
this->clear();
|
||||
if (ht._s) {
|
||||
for(unsigned long i=0;i<ht._bc;++i) {
|
||||
const _Bucket *b = ht._t[i];
|
||||
while (b) {
|
||||
this->set(b->k,b->v);
|
||||
b = b->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Erase all entries
|
||||
*/
|
||||
inline void clear()
|
||||
{
|
||||
if (_s) {
|
||||
for(unsigned long i=0;i<_bc;++i) {
|
||||
_Bucket *b = _t[i];
|
||||
while (b) {
|
||||
_Bucket *const nb = b->next;
|
||||
delete b;
|
||||
b = nb;
|
||||
}
|
||||
_t[i] = (_Bucket *)0;
|
||||
}
|
||||
_s = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Vector of all keys
|
||||
*/
|
||||
inline typename std::vector<K> keys() const
|
||||
{
|
||||
typename std::vector<K> k;
|
||||
if (_s) {
|
||||
k.reserve(_s);
|
||||
for(unsigned long i=0;i<_bc;++i) {
|
||||
_Bucket *b = _t[i];
|
||||
while (b) {
|
||||
k.push_back(b->k);
|
||||
b = b->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
return k;
|
||||
}
|
||||
|
||||
/**
|
||||
* Append all keys (in unspecified order) to the supplied vector or list
|
||||
*
|
||||
* @param v Vector, list, or other compliant container
|
||||
* @tparam Type of V (generally inferred)
|
||||
*/
|
||||
template<typename C>
|
||||
inline void appendKeys(C &v) const
|
||||
{
|
||||
if (_s) {
|
||||
for(unsigned long i=0;i<_bc;++i) {
|
||||
_Bucket *b = _t[i];
|
||||
while (b) {
|
||||
v.push_back(b->k);
|
||||
b = b->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Vector of all entries (pairs of K,V)
|
||||
*/
|
||||
inline typename std::vector< std::pair<K,V> > entries() const
|
||||
{
|
||||
typename std::vector< std::pair<K,V> > k;
|
||||
if (_s) {
|
||||
k.reserve(_s);
|
||||
for(unsigned long i=0;i<_bc;++i) {
|
||||
_Bucket *b = _t[i];
|
||||
while (b) {
|
||||
k.push_back(std::pair<K,V>(b->k,b->v));
|
||||
b = b->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
return k;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param k Key
|
||||
* @return Pointer to value or NULL if not found
|
||||
*/
|
||||
inline V *get(const K &k)
|
||||
{
|
||||
_Bucket *b = _t[_hc(k) % _bc];
|
||||
while (b) {
|
||||
if (b->k == k)
|
||||
return &(b->v);
|
||||
b = b->next;
|
||||
}
|
||||
return (V *)0;
|
||||
}
|
||||
inline const V *get(const K &k) const { return const_cast<Hashtable *>(this)->get(k); }
|
||||
|
||||
/**
|
||||
* @param k Key to check
|
||||
* @return True if key is present
|
||||
*/
|
||||
inline bool contains(const K &k) const
|
||||
{
|
||||
_Bucket *b = _t[_hc(k) % _bc];
|
||||
while (b) {
|
||||
if (b->k == k)
|
||||
return true;
|
||||
b = b->next;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param k Key
|
||||
* @return True if value was present
|
||||
*/
|
||||
inline bool erase(const K &k)
|
||||
{
|
||||
const unsigned long bidx = _hc(k) % _bc;
|
||||
_Bucket *lastb = (_Bucket *)0;
|
||||
_Bucket *b = _t[bidx];
|
||||
while (b) {
|
||||
if (b->k == k) {
|
||||
if (lastb)
|
||||
lastb->next = b->next;
|
||||
else _t[bidx] = b->next;
|
||||
delete b;
|
||||
--_s;
|
||||
return true;
|
||||
}
|
||||
lastb = b;
|
||||
b = b->next;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param k Key
|
||||
* @param v Value
|
||||
* @return Reference to value in table
|
||||
*/
|
||||
inline V &set(const K &k,const V &v)
|
||||
{
|
||||
const unsigned long h = _hc(k);
|
||||
unsigned long bidx = h % _bc;
|
||||
|
||||
_Bucket *b = _t[bidx];
|
||||
while (b) {
|
||||
if (b->k == k) {
|
||||
b->v = v;
|
||||
return b->v;
|
||||
}
|
||||
b = b->next;
|
||||
}
|
||||
|
||||
if (_s >= _bc) {
|
||||
_grow();
|
||||
bidx = h % _bc;
|
||||
}
|
||||
|
||||
b = new _Bucket(k,v);
|
||||
b->next = _t[bidx];
|
||||
_t[bidx] = b;
|
||||
++_s;
|
||||
|
||||
return b->v;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param k Key
|
||||
* @return Value, possibly newly created
|
||||
*/
|
||||
inline V &operator[](const K &k)
|
||||
{
|
||||
const unsigned long h = _hc(k);
|
||||
unsigned long bidx = h % _bc;
|
||||
|
||||
_Bucket *b = _t[bidx];
|
||||
while (b) {
|
||||
if (b->k == k)
|
||||
return b->v;
|
||||
b = b->next;
|
||||
}
|
||||
|
||||
if (_s >= _bc) {
|
||||
_grow();
|
||||
bidx = h % _bc;
|
||||
}
|
||||
|
||||
b = new _Bucket(k);
|
||||
b->next = _t[bidx];
|
||||
_t[bidx] = b;
|
||||
++_s;
|
||||
|
||||
return b->v;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Number of entries
|
||||
*/
|
||||
inline unsigned long size() const throw() { return _s; }
|
||||
|
||||
/**
|
||||
* @return True if table is empty
|
||||
*/
|
||||
inline bool empty() const throw() { return (_s == 0); }
|
||||
|
||||
private:
|
||||
template<typename O>
|
||||
static inline unsigned long _hc(const O &obj)
|
||||
{
|
||||
return obj.hashCode();
|
||||
}
|
||||
static inline unsigned long _hc(const uint64_t i)
|
||||
{
|
||||
/* NOTE: this assumes that 'i' is evenly distributed, which is the case for
|
||||
* packet IDs and network IDs -- the two use cases in ZT for uint64_t keys.
|
||||
* These values are also greater than 0xffffffff so they'll map onto a full
|
||||
* bucket count just fine no matter what happens. Normally you'd want to
|
||||
* hash an integer key index in a hash table. */
|
||||
return (unsigned long)i;
|
||||
}
|
||||
static inline unsigned long _hc(const uint32_t i)
|
||||
{
|
||||
// In the uint32_t case we use a simple multiplier for hashing to ensure coverage
|
||||
return ((unsigned long)i * (unsigned long)0x9e3779b1);
|
||||
}
|
||||
|
||||
inline void _grow()
|
||||
{
|
||||
const unsigned long nc = _bc * 2;
|
||||
_Bucket **nt = reinterpret_cast<_Bucket **>(::malloc(sizeof(_Bucket *) * nc));
|
||||
if (nt) {
|
||||
for(unsigned long i=0;i<nc;++i)
|
||||
nt[i] = (_Bucket *)0;
|
||||
for(unsigned long i=0;i<_bc;++i) {
|
||||
_Bucket *b = _t[i];
|
||||
while (b) {
|
||||
_Bucket *const nb = b->next;
|
||||
const unsigned long nidx = _hc(b->k) % nc;
|
||||
b->next = nt[nidx];
|
||||
nt[nidx] = b;
|
||||
b = nb;
|
||||
}
|
||||
}
|
||||
::free(_t);
|
||||
_t = nt;
|
||||
_bc = nc;
|
||||
}
|
||||
}
|
||||
|
||||
_Bucket **_t;
|
||||
unsigned long _bc;
|
||||
unsigned long _s;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
||||
#endif
|
@ -69,7 +69,7 @@ bool IncomingPacket::tryDecode(const RuntimeEnvironment *RR)
|
||||
switch(verb()) {
|
||||
//case Packet::VERB_NOP:
|
||||
default: // ignore unknown verbs, but if they pass auth check they are "received"
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),verb(),0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),verb(),0,Packet::VERB_NOP);
|
||||
return true;
|
||||
case Packet::VERB_HELLO: return _doHELLO(RR);
|
||||
case Packet::VERB_ERROR: return _doERROR(RR,peer);
|
||||
@ -130,7 +130,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
|
||||
case Packet::ERROR_IDENTITY_COLLISION:
|
||||
if (RR->topology->isRoot(peer->identity()))
|
||||
RR->node->postEvent(ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION);
|
||||
RR->node->postEvent(ZT_EVENT_FATAL_ERROR_IDENTITY_COLLISION);
|
||||
break;
|
||||
|
||||
case Packet::ERROR_NEED_MEMBERSHIP_CERTIFICATE: {
|
||||
@ -144,7 +144,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE);
|
||||
nconf->com().serialize(outp);
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
}
|
||||
}
|
||||
} break;
|
||||
@ -165,7 +165,7 @@ bool IncomingPacket::_doERROR(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
default: break;
|
||||
}
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_ERROR,inRePacketId,inReVerb);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_ERROR,inRePacketId,inReVerb);
|
||||
} catch (std::exception &ex) {
|
||||
TRACE("dropped ERROR from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
|
||||
} catch ( ... ) {
|
||||
@ -224,20 +224,20 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
|
||||
unsigned char key[ZT_PEER_SECRET_KEY_LENGTH];
|
||||
if (RR->identity.agree(id,key,ZT_PEER_SECRET_KEY_LENGTH)) {
|
||||
if (dearmor(key)) { // ensure packet is authentic, otherwise drop
|
||||
RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
TRACE("rejected HELLO from %s(%s): address already claimed",id.address().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
Packet outp(id.address(),RR->identity.address(),Packet::VERB_ERROR);
|
||||
outp.append((unsigned char)Packet::VERB_HELLO);
|
||||
outp.append(packetId());
|
||||
outp.append((unsigned char)Packet::ERROR_IDENTITY_COLLISION);
|
||||
outp.armor(key,true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
} else {
|
||||
RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
}
|
||||
} else {
|
||||
RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
TRACE("rejected HELLO from %s(%s): key agreement failed",id.address().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
}
|
||||
|
||||
@ -246,7 +246,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
|
||||
// Identity is the same as the one we already have -- check packet integrity
|
||||
|
||||
if (!dearmor(peer->key())) {
|
||||
RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
return true;
|
||||
}
|
||||
@ -258,7 +258,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
|
||||
|
||||
// Check identity proof of work
|
||||
if (!id.locallyValidate()) {
|
||||
RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
TRACE("dropped HELLO from %s(%s): identity invalid",id.address().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
return true;
|
||||
}
|
||||
@ -266,7 +266,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
|
||||
// Check packet integrity and authentication
|
||||
SharedPtr<Peer> newPeer(new Peer(RR->identity,id));
|
||||
if (!dearmor(newPeer->key())) {
|
||||
RR->node->postEvent(ZT1_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
RR->node->postEvent(ZT_EVENT_AUTHENTICATION_FAILURE,(const void *)&_remoteAddress);
|
||||
TRACE("rejected HELLO from %s(%s): packet failed authentication",id.address().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
return true;
|
||||
}
|
||||
@ -278,7 +278,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
|
||||
|
||||
// VALID -- continues here
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_HELLO,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_HELLO,0,Packet::VERB_NOP);
|
||||
peer->setRemoteVersion(protoVersion,vMajor,vMinor,vRevision);
|
||||
|
||||
bool trusted = false;
|
||||
@ -316,7 +316,7 @@ bool IncomingPacket::_doHELLO(const RuntimeEnvironment *RR)
|
||||
}
|
||||
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
} catch (std::exception &ex) {
|
||||
TRACE("dropped HELLO from %s(%s): %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
|
||||
} catch ( ... ) {
|
||||
@ -436,7 +436,7 @@ bool IncomingPacket::_doOK(const RuntimeEnvironment *RR,const SharedPtr<Peer> &p
|
||||
default: break;
|
||||
}
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_OK,inRePacketId,inReVerb);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_OK,inRePacketId,inReVerb);
|
||||
} catch (std::exception &ex) {
|
||||
TRACE("dropped OK from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
|
||||
} catch ( ... ) {
|
||||
@ -456,7 +456,7 @@ bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
outp.append(packetId());
|
||||
queried->identity().serialize(outp,false);
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
} else {
|
||||
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_ERROR);
|
||||
outp.append((unsigned char)Packet::VERB_WHOIS);
|
||||
@ -464,12 +464,12 @@ bool IncomingPacket::_doWHOIS(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
|
||||
outp.append(payload(),ZT_ADDRESS_LENGTH);
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
}
|
||||
} else {
|
||||
TRACE("dropped WHOIS from %s(%s): missing or invalid address",source().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
}
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_WHOIS,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_WHOIS,0,Packet::VERB_NOP);
|
||||
} catch ( ... ) {
|
||||
TRACE("dropped WHOIS from %s(%s): unexpected exception",source().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
}
|
||||
@ -487,8 +487,8 @@ bool IncomingPacket::_doRENDEZVOUS(const RuntimeEnvironment *RR,const SharedPtr<
|
||||
if ((port > 0)&&((addrlen == 4)||(addrlen == 16))) {
|
||||
InetAddress atAddr(field(ZT_PROTO_VERB_RENDEZVOUS_IDX_ADDRESS,addrlen),addrlen,port);
|
||||
TRACE("RENDEZVOUS from %s says %s might be at %s, starting NAT-t",peer->address().toString().c_str(),with.toString().c_str(),atAddr.toString().c_str());
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP);
|
||||
RR->sw->rendezvous(withPeer,atAddr);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_RENDEZVOUS,0,Packet::VERB_NOP);
|
||||
RR->sw->rendezvous(withPeer,_localAddress,atAddr);
|
||||
} else {
|
||||
TRACE("dropped corrupt RENDEZVOUS from %s(%s) (bad address or port)",peer->address().toString().c_str(),_remoteAddress.toString().c_str());
|
||||
}
|
||||
@ -525,7 +525,7 @@ bool IncomingPacket::_doFRAME(const RuntimeEnvironment *RR,const SharedPtr<Peer>
|
||||
RR->node->putFrame(network->id(),MAC(peer->address(),network->id()),network->mac(),etherType,0,field(ZT_PROTO_VERB_FRAME_IDX_PAYLOAD,payloadLen),payloadLen);
|
||||
}
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_FRAME,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_FRAME,0,Packet::VERB_NOP);
|
||||
} else {
|
||||
TRACE("dropped FRAME from %s(%s): we are not connected to network %.16llx",source().toString().c_str(),_remoteAddress.toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
|
||||
}
|
||||
@ -602,7 +602,7 @@ bool IncomingPacket::_doEXT_FRAME(const RuntimeEnvironment *RR,const SharedPtr<P
|
||||
RR->node->putFrame(network->id(),from,to,etherType,0,field(comLen + ZT_PROTO_VERB_EXT_FRAME_IDX_PAYLOAD,payloadLen),payloadLen);
|
||||
}
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_EXT_FRAME,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_EXT_FRAME,0,Packet::VERB_NOP);
|
||||
} else {
|
||||
TRACE("dropped EXT_FRAME from %s(%s): we are not connected to network %.16llx",source().toString().c_str(),_remoteAddress.toString().c_str(),at<uint64_t>(ZT_PROTO_VERB_FRAME_IDX_NETWORK_ID));
|
||||
}
|
||||
@ -623,7 +623,7 @@ bool IncomingPacket::_doMULTICAST_LIKE(const RuntimeEnvironment *RR,const Shared
|
||||
for(unsigned int ptr=ZT_PACKET_IDX_PAYLOAD;ptr<size();ptr+=18)
|
||||
RR->mc->add(now,at<uint64_t>(ptr),MulticastGroup(MAC(field(ptr + 8,6),6),at<uint32_t>(ptr + 14)),peer->address());
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_LIKE,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_LIKE,0,Packet::VERB_NOP);
|
||||
} catch (std::exception &ex) {
|
||||
TRACE("dropped MULTICAST_LIKE from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
|
||||
} catch ( ... ) {
|
||||
@ -647,7 +647,7 @@ bool IncomingPacket::_doNETWORK_MEMBERSHIP_CERTIFICATE(const RuntimeEnvironment
|
||||
}
|
||||
}
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_MEMBERSHIP_CERTIFICATE,0,Packet::VERB_NOP);
|
||||
} catch (std::exception &ex) {
|
||||
TRACE("dropped NETWORK_MEMBERSHIP_CERTIFICATE from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),ex.what());
|
||||
} catch ( ... ) {
|
||||
@ -662,15 +662,15 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
|
||||
const uint64_t nwid = at<uint64_t>(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_NETWORK_ID);
|
||||
const unsigned int metaDataLength = at<uint16_t>(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT_LEN);
|
||||
const Dictionary metaData((const char *)field(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT,metaDataLength),metaDataLength);
|
||||
const uint64_t haveRevision = ((ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength + 8) <= size()) ? at<uint64_t>(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength) : 0ULL;
|
||||
//const uint64_t haveRevision = ((ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength + 8) <= size()) ? at<uint64_t>(ZT_PROTO_VERB_NETWORK_CONFIG_REQUEST_IDX_DICT + metaDataLength) : 0ULL;
|
||||
|
||||
const unsigned int h = hops();
|
||||
const uint64_t pid = packetId();
|
||||
peer->received(RR,_remoteAddress,h,pid,Packet::VERB_NETWORK_CONFIG_REQUEST,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,h,pid,Packet::VERB_NETWORK_CONFIG_REQUEST,0,Packet::VERB_NOP);
|
||||
|
||||
if (RR->localNetworkController) {
|
||||
Dictionary netconf;
|
||||
switch(RR->localNetworkController->doNetworkConfigRequest((h > 0) ? InetAddress() : _remoteAddress,RR->identity,peer->identity(),nwid,metaData,haveRevision,netconf)) {
|
||||
switch(RR->localNetworkController->doNetworkConfigRequest((h > 0) ? InetAddress() : _remoteAddress,RR->identity,peer->identity(),nwid,metaData,netconf)) {
|
||||
|
||||
case NetworkController::NETCONF_QUERY_OK: {
|
||||
const std::string netconfStr(netconf.toString());
|
||||
@ -688,7 +688,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
|
||||
if (outp.size() > ZT_PROTO_MAX_PACKET_LENGTH) {
|
||||
TRACE("NETWORK_CONFIG_REQUEST failed: internal error: netconf size %u is too large",(unsigned int)netconfStr.length());
|
||||
} else {
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
}
|
||||
}
|
||||
} break;
|
||||
@ -700,7 +700,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
|
||||
outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
|
||||
outp.append(nwid);
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
} break;
|
||||
|
||||
case NetworkController::NETCONF_QUERY_ACCESS_DENIED: {
|
||||
@ -710,7 +710,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
|
||||
outp.append((unsigned char)Packet::ERROR_NETWORK_ACCESS_DENIED_);
|
||||
outp.append(nwid);
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
} break;
|
||||
|
||||
case NetworkController::NETCONF_QUERY_INTERNAL_SERVER_ERROR:
|
||||
@ -732,7 +732,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REQUEST(const RuntimeEnvironment *RR,cons
|
||||
outp.append((unsigned char)Packet::ERROR_UNSUPPORTED_OPERATION);
|
||||
outp.append(nwid);
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
}
|
||||
} catch (std::exception &exc) {
|
||||
TRACE("dropped NETWORK_CONFIG_REQUEST from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
|
||||
@ -753,7 +753,7 @@ bool IncomingPacket::_doNETWORK_CONFIG_REFRESH(const RuntimeEnvironment *RR,cons
|
||||
nw->requestConfiguration();
|
||||
ptr += 8;
|
||||
}
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_CONFIG_REFRESH,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_NETWORK_CONFIG_REFRESH,0,Packet::VERB_NOP);
|
||||
} catch (std::exception &exc) {
|
||||
TRACE("dropped NETWORK_CONFIG_REFRESH from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
|
||||
} catch ( ... ) {
|
||||
@ -780,11 +780,11 @@ bool IncomingPacket::_doMULTICAST_GATHER(const RuntimeEnvironment *RR,const Shar
|
||||
outp.append((uint32_t)mg.adi());
|
||||
if (RR->mc->gather(peer->address(),nwid,mg,outp,gatherLimit)) {
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
}
|
||||
}
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_GATHER,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_GATHER,0,Packet::VERB_NOP);
|
||||
} catch (std::exception &exc) {
|
||||
TRACE("dropped MULTICAST_GATHER from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
|
||||
} catch ( ... ) {
|
||||
@ -871,12 +871,12 @@ bool IncomingPacket::_doMULTICAST_FRAME(const RuntimeEnvironment *RR,const Share
|
||||
outp.append((unsigned char)0x02); // flag 0x02 = contains gather results
|
||||
if (RR->mc->gather(peer->address(),nwid,to,outp,gatherLimit)) {
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
}
|
||||
}
|
||||
} // else ignore -- not a member of this network
|
||||
|
||||
peer->received(RR,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_FRAME,0,Packet::VERB_NOP);
|
||||
peer->received(RR,_localAddress,_remoteAddress,hops(),packetId(),Packet::VERB_MULTICAST_FRAME,0,Packet::VERB_NOP);
|
||||
} catch (std::exception &exc) {
|
||||
TRACE("dropped MULTICAST_FRAME from %s(%s): unexpected exception: %s",source().toString().c_str(),_remoteAddress.toString().c_str(),exc.what());
|
||||
} catch ( ... ) {
|
||||
@ -905,14 +905,14 @@ bool IncomingPacket::_doPUSH_DIRECT_PATHS(const RuntimeEnvironment *RR,const Sha
|
||||
InetAddress a(field(ptr,4),4,at<uint16_t>(ptr + 4));
|
||||
if ( ((flags & (0x01 | 0x02)) == 0) && (Path::isAddressValidForPath(a)) ) {
|
||||
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
|
||||
peer->attemptToContactAt(RR,a,RR->node->now());
|
||||
peer->attemptToContactAt(RR,_localAddress,a,RR->node->now());
|
||||
}
|
||||
} break;
|
||||
case 6: {
|
||||
InetAddress a(field(ptr,16),16,at<uint16_t>(ptr + 16));
|
||||
if ( ((flags & (0x01 | 0x02)) == 0) && (Path::isAddressValidForPath(a)) ) {
|
||||
TRACE("attempting to contact %s at pushed direct path %s",peer->address().toString().c_str(),a.toString().c_str());
|
||||
peer->attemptToContactAt(RR,a,RR->node->now());
|
||||
peer->attemptToContactAt(RR,_localAddress,a,RR->node->now());
|
||||
}
|
||||
} break;
|
||||
}
|
||||
@ -934,7 +934,7 @@ void IncomingPacket::_sendErrorNeedCertificate(const RuntimeEnvironment *RR,cons
|
||||
outp.append((unsigned char)Packet::ERROR_NEED_MEMBERSHIP_CERTIFICATE);
|
||||
outp.append(nwid);
|
||||
outp.armor(peer->key(),true);
|
||||
RR->node->putPacket(_remoteAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(_localAddress,_remoteAddress,outp.data(),outp.size());
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
@ -72,13 +72,15 @@ public:
|
||||
*
|
||||
* @param data Packet data
|
||||
* @param len Packet length
|
||||
* @param localAddress Local interface address
|
||||
* @param remoteAddress Address from which packet came
|
||||
* @param now Current time
|
||||
* @throws std::out_of_range Range error processing packet
|
||||
*/
|
||||
IncomingPacket(const void *data,unsigned int len,const InetAddress &remoteAddress,uint64_t now) :
|
||||
IncomingPacket(const void *data,unsigned int len,const InetAddress &localAddress,const InetAddress &remoteAddress,uint64_t now) :
|
||||
Packet(data,len),
|
||||
_receiveTime(now),
|
||||
_localAddress(localAddress),
|
||||
_remoteAddress(remoteAddress),
|
||||
__refCount()
|
||||
{
|
||||
@ -127,6 +129,7 @@ private:
|
||||
void _sendErrorNeedCertificate(const RuntimeEnvironment *RR,const SharedPtr<Peer> &peer,uint64_t nwid);
|
||||
|
||||
uint64_t _receiveTime;
|
||||
InetAddress _localAddress;
|
||||
InetAddress _remoteAddress;
|
||||
AtomicCounter __refCount;
|
||||
};
|
||||
|
@ -399,4 +399,30 @@ InetAddress InetAddress::makeIpv6LinkLocal(const MAC &mac)
|
||||
return InetAddress(sin6);
|
||||
}
|
||||
|
||||
InetAddress InetAddress::makeIpv6rfc4193(uint64_t nwid,uint64_t zeroTierAddress)
|
||||
throw()
|
||||
{
|
||||
InetAddress r;
|
||||
struct sockaddr_in6 *const sin6 = reinterpret_cast<struct sockaddr_in6 *>(&r);
|
||||
sin6->sin6_family = AF_INET6;
|
||||
sin6->sin6_addr.s6_addr[0] = 0xfd;
|
||||
sin6->sin6_addr.s6_addr[1] = (uint8_t)(nwid >> 56);
|
||||
sin6->sin6_addr.s6_addr[2] = (uint8_t)(nwid >> 48);
|
||||
sin6->sin6_addr.s6_addr[3] = (uint8_t)(nwid >> 40);
|
||||
sin6->sin6_addr.s6_addr[4] = (uint8_t)(nwid >> 32);
|
||||
sin6->sin6_addr.s6_addr[5] = (uint8_t)(nwid >> 24);
|
||||
sin6->sin6_addr.s6_addr[6] = (uint8_t)(nwid >> 16);
|
||||
sin6->sin6_addr.s6_addr[7] = (uint8_t)(nwid >> 8);
|
||||
sin6->sin6_addr.s6_addr[8] = (uint8_t)nwid;
|
||||
sin6->sin6_addr.s6_addr[9] = 0x99;
|
||||
sin6->sin6_addr.s6_addr[10] = 0x93;
|
||||
sin6->sin6_addr.s6_addr[11] = (uint8_t)(zeroTierAddress >> 32);
|
||||
sin6->sin6_addr.s6_addr[12] = (uint8_t)(zeroTierAddress >> 24);
|
||||
sin6->sin6_addr.s6_addr[13] = (uint8_t)(zeroTierAddress >> 16);
|
||||
sin6->sin6_addr.s6_addr[14] = (uint8_t)(zeroTierAddress >> 8);
|
||||
sin6->sin6_addr.s6_addr[15] = (uint8_t)zeroTierAddress;
|
||||
sin6->sin6_port = Utils::hton((uint16_t)88); // /88 includes 0xfd + network ID, discriminating by device ID below that
|
||||
return r;
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
@ -375,6 +375,50 @@ struct InetAddress : public sockaddr_storage
|
||||
*/
|
||||
static InetAddress makeIpv6LinkLocal(const MAC &mac)
|
||||
throw();
|
||||
|
||||
/**
|
||||
* Compute private IPv6 unicast address from network ID and ZeroTier address
|
||||
*
|
||||
* This generates a private unicast IPv6 address that is mostly compliant
|
||||
* with the letter of RFC4193 and certainly compliant in spirit.
|
||||
*
|
||||
* RFC4193 specifies a format of:
|
||||
*
|
||||
* | 7 bits |1| 40 bits | 16 bits | 64 bits |
|
||||
* | Prefix |L| Global ID | Subnet ID | Interface ID |
|
||||
*
|
||||
* The 'L' bit is set to 1, yielding an address beginning with 0xfd. Then
|
||||
* the network ID is filled into the global ID, subnet ID, and first byte
|
||||
* of the "interface ID" field. Since the first 40 bits of the network ID
|
||||
* is the unique ZeroTier address of its controller, this makes a very
|
||||
* good random global ID. Since network IDs have 24 more bits, we let it
|
||||
* overflow into the interface ID.
|
||||
*
|
||||
* After that we pad with two bytes: 0x99, 0x93, namely the default ZeroTier
|
||||
* port in hex.
|
||||
*
|
||||
* Finally we fill the remaining 40 bits of the interface ID field with
|
||||
* the 40-bit unique ZeroTier device ID of the network member.
|
||||
*
|
||||
* This yields a valid RFC4193 address with a random global ID, a
|
||||
* meaningful subnet ID, and a unique interface ID, all mappable back onto
|
||||
* ZeroTier space.
|
||||
*
|
||||
* This in turn could allow us, on networks numbered this way, to emulate
|
||||
* IPv6 NDP and eliminate all multicast. This could be beneficial for
|
||||
* small devices and huge networks, e.g. IoT applications.
|
||||
*
|
||||
* The returned address is given an odd prefix length of /88, since within
|
||||
* a given network only the last 40 bits (device ID) are variable. This
|
||||
* is a bit unusual but as far as we know should not cause any problems with
|
||||
* any non-braindead IPv6 stack.
|
||||
*
|
||||
* @param nwid 64-bit network ID
|
||||
* @param zeroTierAddress 40-bit device address (in least significant 40 bits, highest 24 bits ignored)
|
||||
* @return IPv6 private unicast address with /88 netmask
|
||||
*/
|
||||
static InetAddress makeIpv6rfc4193(uint64_t nwid,uint64_t zeroTierAddress)
|
||||
throw();
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
@ -242,12 +242,20 @@ public:
|
||||
*/
|
||||
inline unsigned int size() const throw() { return 6; }
|
||||
|
||||
inline unsigned long hashCode() const throw() { return (unsigned long)_m; }
|
||||
|
||||
inline MAC &operator=(const MAC &m)
|
||||
throw()
|
||||
{
|
||||
_m = m._m;
|
||||
return *this;
|
||||
}
|
||||
inline MAC &operator=(const uint64_t m)
|
||||
throw()
|
||||
{
|
||||
_m = m;
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline bool operator==(const MAC &m) const throw() { return (_m == m._m); }
|
||||
inline bool operator!=(const MAC &m) const throw() { return (_m != m._m); }
|
||||
|
@ -141,6 +141,8 @@ public:
|
||||
*/
|
||||
inline uint32_t adi() const throw() { return _adi; }
|
||||
|
||||
inline unsigned long hashCode() const throw() { return (_mac.hashCode() ^ (unsigned long)_adi); }
|
||||
|
||||
inline bool operator==(const MulticastGroup &g) const throw() { return ((_mac == g._mac)&&(_adi == g._adi)); }
|
||||
inline bool operator!=(const MulticastGroup &g) const throw() { return ((_mac != g._mac)||(_adi != g._adi)); }
|
||||
inline bool operator<(const MulticastGroup &g) const throw()
|
||||
|
@ -41,7 +41,9 @@
|
||||
namespace ZeroTier {
|
||||
|
||||
Multicaster::Multicaster(const RuntimeEnvironment *renv) :
|
||||
RR(renv)
|
||||
RR(renv),
|
||||
_groups(1024),
|
||||
_groups_m()
|
||||
{
|
||||
}
|
||||
|
||||
@ -54,7 +56,7 @@ void Multicaster::addMultiple(uint64_t now,uint64_t nwid,const MulticastGroup &m
|
||||
const unsigned char *p = (const unsigned char *)addresses;
|
||||
const unsigned char *e = p + (5 * count);
|
||||
Mutex::Lock _l(_groups_m);
|
||||
MulticastGroupStatus &gs = _groups[std::pair<uint64_t,MulticastGroup>(nwid,mg)];
|
||||
MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)];
|
||||
while (p != e) {
|
||||
_add(now,nwid,mg,gs,Address(p,5));
|
||||
p += 5;
|
||||
@ -64,11 +66,11 @@ void Multicaster::addMultiple(uint64_t now,uint64_t nwid,const MulticastGroup &m
|
||||
void Multicaster::remove(uint64_t nwid,const MulticastGroup &mg,const Address &member)
|
||||
{
|
||||
Mutex::Lock _l(_groups_m);
|
||||
std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus >::iterator g(_groups.find(std::pair<uint64_t,MulticastGroup>(nwid,mg)));
|
||||
if (g != _groups.end()) {
|
||||
for(std::vector<MulticastGroupMember>::iterator m(g->second.members.begin());m!=g->second.members.end();++m) {
|
||||
MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg));
|
||||
if (s) {
|
||||
for(std::vector<MulticastGroupMember>::iterator m(s->members.begin());m!=s->members.end();++m) {
|
||||
if (m->address == member) {
|
||||
g->second.members.erase(m);
|
||||
s->members.erase(m);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -102,18 +104,18 @@ unsigned int Multicaster::gather(const Address &queryingPeer,uint64_t nwid,const
|
||||
|
||||
Mutex::Lock _l(_groups_m);
|
||||
|
||||
std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus >::const_iterator gs(_groups.find(std::pair<uint64_t,MulticastGroup>(nwid,mg)));
|
||||
if ((gs != _groups.end())&&(!gs->second.members.empty())) {
|
||||
totalKnown += (unsigned int)gs->second.members.size();
|
||||
const MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg));
|
||||
if ((s)&&(!s->members.empty())) {
|
||||
totalKnown += (unsigned int)s->members.size();
|
||||
|
||||
// Members are returned in random order so that repeated gather queries
|
||||
// will return different subsets of a large multicast group.
|
||||
k = 0;
|
||||
while ((added < limit)&&(k < gs->second.members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_UDP_DEFAULT_PAYLOAD_MTU)) {
|
||||
while ((added < limit)&&(k < s->members.size())&&((appendTo.size() + ZT_ADDRESS_LENGTH) <= ZT_UDP_DEFAULT_PAYLOAD_MTU)) {
|
||||
rptr = (unsigned int)RR->node->prng();
|
||||
|
||||
restart_member_scan:
|
||||
a = gs->second.members[rptr % (unsigned int)gs->second.members.size()].address.toInt();
|
||||
a = s->members[rptr % (unsigned int)s->members.size()].address.toInt();
|
||||
for(i=0;i<k;++i) {
|
||||
if (picked[i] == a) {
|
||||
++rptr;
|
||||
@ -146,10 +148,10 @@ std::vector<Address> Multicaster::getMembers(uint64_t nwid,const MulticastGroup
|
||||
{
|
||||
std::vector<Address> ls;
|
||||
Mutex::Lock _l(_groups_m);
|
||||
std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus >::const_iterator gs(_groups.find(std::pair<uint64_t,MulticastGroup>(nwid,mg)));
|
||||
if (gs == _groups.end())
|
||||
const MulticastGroupStatus *s = _groups.get(Multicaster::Key(nwid,mg));
|
||||
if (!s)
|
||||
return ls;
|
||||
for(std::vector<MulticastGroupMember>::const_reverse_iterator m(gs->second.members.rbegin());m!=gs->second.members.rend();++m) {
|
||||
for(std::vector<MulticastGroupMember>::const_reverse_iterator m(s->members.rbegin());m!=s->members.rend();++m) {
|
||||
ls.push_back(m->address);
|
||||
if (ls.size() >= limit)
|
||||
break;
|
||||
@ -173,7 +175,7 @@ void Multicaster::send(
|
||||
unsigned long *indexes = idxbuf;
|
||||
|
||||
Mutex::Lock _l(_groups_m);
|
||||
MulticastGroupStatus &gs = _groups[std::pair<uint64_t,MulticastGroup>(nwid,mg)];
|
||||
MulticastGroupStatus &gs = _groups[Multicaster::Key(nwid,mg)];
|
||||
|
||||
if (!gs.members.empty()) {
|
||||
// Allocate a memory buffer if group is monstrous
|
||||
@ -291,18 +293,22 @@ void Multicaster::send(
|
||||
void Multicaster::clean(uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_groups_m);
|
||||
for(std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus >::iterator mm(_groups.begin());mm!=_groups.end();) {
|
||||
for(std::list<OutboundMulticast>::iterator tx(mm->second.txQueue.begin());tx!=mm->second.txQueue.end();) {
|
||||
|
||||
Multicaster::Key *k = (Multicaster::Key *)0;
|
||||
MulticastGroupStatus *s = (MulticastGroupStatus *)0;
|
||||
Hashtable<Multicaster::Key,MulticastGroupStatus>::Iterator mm(_groups);
|
||||
while (mm.next(k,s)) {
|
||||
for(std::list<OutboundMulticast>::iterator tx(s->txQueue.begin());tx!=s->txQueue.end();) {
|
||||
if ((tx->expired(now))||(tx->atLimit()))
|
||||
mm->second.txQueue.erase(tx++);
|
||||
s->txQueue.erase(tx++);
|
||||
else ++tx;
|
||||
}
|
||||
|
||||
unsigned long count = 0;
|
||||
{
|
||||
std::vector<MulticastGroupMember>::iterator reader(mm->second.members.begin());
|
||||
std::vector<MulticastGroupMember>::iterator reader(s->members.begin());
|
||||
std::vector<MulticastGroupMember>::iterator writer(reader);
|
||||
while (reader != mm->second.members.end()) {
|
||||
while (reader != s->members.end()) {
|
||||
if ((now - reader->timestamp) < ZT_MULTICAST_LIKE_EXPIRE) {
|
||||
*writer = *reader;
|
||||
++writer;
|
||||
@ -313,13 +319,11 @@ void Multicaster::clean(uint64_t now)
|
||||
}
|
||||
|
||||
if (count) {
|
||||
mm->second.members.resize(count);
|
||||
++mm;
|
||||
} else if (mm->second.txQueue.empty()) {
|
||||
_groups.erase(mm++);
|
||||
s->members.resize(count);
|
||||
} else if (s->txQueue.empty()) {
|
||||
_groups.erase(*k);
|
||||
} else {
|
||||
mm->second.members.clear();
|
||||
++mm;
|
||||
s->members.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <list>
|
||||
|
||||
#include "Constants.hpp"
|
||||
#include "Hashtable.hpp"
|
||||
#include "Address.hpp"
|
||||
#include "MAC.hpp"
|
||||
#include "MulticastGroup.hpp"
|
||||
@ -56,6 +57,18 @@ class Packet;
|
||||
class Multicaster : NonCopyable
|
||||
{
|
||||
private:
|
||||
struct Key
|
||||
{
|
||||
Key() : nwid(0),mg() {}
|
||||
Key(uint64_t n,const MulticastGroup &g) : nwid(n),mg(g) {}
|
||||
|
||||
uint64_t nwid;
|
||||
MulticastGroup mg;
|
||||
|
||||
inline bool operator==(const Key &k) const throw() { return ((nwid == k.nwid)&&(mg == k.mg)); }
|
||||
inline unsigned long hashCode() const throw() { return (mg.hashCode() ^ (unsigned long)(nwid ^ (nwid >> 32))); }
|
||||
};
|
||||
|
||||
struct MulticastGroupMember
|
||||
{
|
||||
MulticastGroupMember() {}
|
||||
@ -89,7 +102,7 @@ public:
|
||||
inline void add(uint64_t now,uint64_t nwid,const MulticastGroup &mg,const Address &member)
|
||||
{
|
||||
Mutex::Lock _l(_groups_m);
|
||||
_add(now,nwid,mg,_groups[std::pair<uint64_t,MulticastGroup>(nwid,mg)],member);
|
||||
_add(now,nwid,mg,_groups[Multicaster::Key(nwid,mg)],member);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -181,7 +194,7 @@ private:
|
||||
void _add(uint64_t now,uint64_t nwid,const MulticastGroup &mg,MulticastGroupStatus &gs,const Address &member);
|
||||
|
||||
const RuntimeEnvironment *RR;
|
||||
std::map< std::pair<uint64_t,MulticastGroup>,MulticastGroupStatus > _groups;
|
||||
Hashtable<Multicaster::Key,MulticastGroupStatus> _groups;
|
||||
Mutex _groups_m;
|
||||
};
|
||||
|
||||
|
173
node/Network.cpp
173
node/Network.cpp
@ -92,7 +92,7 @@ Network::Network(const RuntimeEnvironment *renv,uint64_t nwid) :
|
||||
com.deserialize2(p,e);
|
||||
if (!com)
|
||||
break;
|
||||
_membershipCertificates.insert(std::pair< Address,CertificateOfMembership >(com.issuedTo(),com));
|
||||
_certInfo[com.issuedTo()].com = com;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -100,44 +100,47 @@ Network::Network(const RuntimeEnvironment *renv,uint64_t nwid) :
|
||||
}
|
||||
|
||||
if (!_portInitialized) {
|
||||
ZT1_VirtualNetworkConfig ctmp;
|
||||
ZT_VirtualNetworkConfig ctmp;
|
||||
_externalConfig(&ctmp);
|
||||
_portError = RR->node->configureVirtualNetworkPort(_id,ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_UP,&ctmp);
|
||||
_portError = RR->node->configureVirtualNetworkPort(_id,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP,&ctmp);
|
||||
_portInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
Network::~Network()
|
||||
{
|
||||
ZT1_VirtualNetworkConfig ctmp;
|
||||
ZT_VirtualNetworkConfig ctmp;
|
||||
_externalConfig(&ctmp);
|
||||
|
||||
char n[128];
|
||||
if (_destroyed) {
|
||||
RR->node->configureVirtualNetworkPort(_id,ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY,&ctmp);
|
||||
RR->node->configureVirtualNetworkPort(_id,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY,&ctmp);
|
||||
|
||||
Utils::snprintf(n,sizeof(n),"networks.d/%.16llx.conf",_id);
|
||||
RR->node->dataStoreDelete(n);
|
||||
Utils::snprintf(n,sizeof(n),"networks.d/%.16llx.mcerts",_id);
|
||||
RR->node->dataStoreDelete(n);
|
||||
} else {
|
||||
RR->node->configureVirtualNetworkPort(_id,ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN,&ctmp);
|
||||
RR->node->configureVirtualNetworkPort(_id,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN,&ctmp);
|
||||
|
||||
clean();
|
||||
|
||||
std::string buf("ZTMCD0");
|
||||
Utils::snprintf(n,sizeof(n),"networks.d/%.16llx.mcerts",_id);
|
||||
|
||||
Mutex::Lock _l(_lock);
|
||||
|
||||
if ((!_config)||(_config->isPublic())||(_membershipCertificates.size() == 0)) {
|
||||
if ((!_config)||(_config->isPublic())||(_certInfo.empty())) {
|
||||
RR->node->dataStoreDelete(n);
|
||||
return;
|
||||
} else {
|
||||
std::string buf("ZTMCD0");
|
||||
Hashtable< Address,_RemoteMemberCertificateInfo >::Iterator i(_certInfo);
|
||||
Address *a = (Address *)0;
|
||||
_RemoteMemberCertificateInfo *ci = (_RemoteMemberCertificateInfo *)0;
|
||||
while (i.next(a,ci)) {
|
||||
if (ci->com)
|
||||
ci->com.serialize2(buf);
|
||||
}
|
||||
RR->node->dataStorePut(n,buf,true);
|
||||
}
|
||||
|
||||
for(std::map<Address,CertificateOfMembership>::iterator c(_membershipCertificates.begin());c!=_membershipCertificates.end();++c)
|
||||
c->second.serialize2(buf);
|
||||
|
||||
RR->node->dataStorePut(n,buf,true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,7 +150,7 @@ bool Network::subscribedToMulticastGroup(const MulticastGroup &mg,bool includeBr
|
||||
if (std::binary_search(_myMulticastGroups.begin(),_myMulticastGroups.end(),mg))
|
||||
return true;
|
||||
else if (includeBridgedGroups)
|
||||
return (_multicastGroupsBehindMe.find(mg) != _multicastGroupsBehindMe.end());
|
||||
return _multicastGroupsBehindMe.contains(mg);
|
||||
else return false;
|
||||
}
|
||||
|
||||
@ -181,7 +184,7 @@ bool Network::applyConfiguration(const SharedPtr<NetworkConfig> &conf)
|
||||
return false;
|
||||
try {
|
||||
if ((conf->networkId() == _id)&&(conf->issuedTo() == RR->identity.address())) {
|
||||
ZT1_VirtualNetworkConfig ctmp;
|
||||
ZT_VirtualNetworkConfig ctmp;
|
||||
bool portInitialized;
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
@ -192,7 +195,7 @@ bool Network::applyConfiguration(const SharedPtr<NetworkConfig> &conf)
|
||||
portInitialized = _portInitialized;
|
||||
_portInitialized = true;
|
||||
}
|
||||
_portError = RR->node->configureVirtualNetworkPort(_id,(portInitialized) ? ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE : ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_UP,&ctmp);
|
||||
_portError = RR->node->configureVirtualNetworkPort(_id,(portInitialized) ? ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE : ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP,&ctmp);
|
||||
return true;
|
||||
} else {
|
||||
TRACE("ignored invalid configuration for network %.16llx (configuration contains mismatched network ID or issued-to address)",(unsigned long long)_id);
|
||||
@ -237,7 +240,7 @@ void Network::requestConfiguration()
|
||||
if (RR->localNetworkController) {
|
||||
SharedPtr<NetworkConfig> nconf(config2());
|
||||
Dictionary newconf;
|
||||
switch(RR->localNetworkController->doNetworkConfigRequest(InetAddress(),RR->identity,RR->identity,_id,Dictionary(),(nconf) ? nconf->revision() : (uint64_t)0,newconf)) {
|
||||
switch(RR->localNetworkController->doNetworkConfigRequest(InetAddress(),RR->identity,RR->identity,_id,Dictionary(),newconf)) {
|
||||
case NetworkController::NETCONF_QUERY_OK:
|
||||
this->setConfiguration(newconf,true);
|
||||
return;
|
||||
@ -284,11 +287,12 @@ bool Network::validateAndAddMembershipCertificate(const CertificateOfMembership
|
||||
return false;
|
||||
|
||||
Mutex::Lock _l(_lock);
|
||||
CertificateOfMembership &old = _membershipCertificates[cert.issuedTo()];
|
||||
|
||||
// Nothing to do if the cert hasn't changed -- we get duplicates due to zealous cert pushing
|
||||
if (old == cert)
|
||||
return true; // but if it's a duplicate of one we already accepted, return is 'true'
|
||||
{
|
||||
const _RemoteMemberCertificateInfo *ci = _certInfo.get(cert.issuedTo());
|
||||
if ((ci)&&(ci->com == cert))
|
||||
return true; // we already have it
|
||||
}
|
||||
|
||||
// Check signature, log and return if cert is invalid
|
||||
if (cert.signedBy() != controller()) {
|
||||
@ -322,9 +326,8 @@ bool Network::validateAndAddMembershipCertificate(const CertificateOfMembership
|
||||
}
|
||||
}
|
||||
|
||||
// If we made it past authentication, update cert
|
||||
if (cert.revision() != old.revision())
|
||||
old = cert;
|
||||
// If we made it past authentication, add or update cert in our cert info store
|
||||
_certInfo[cert.issuedTo()].com = cert;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -333,9 +336,9 @@ bool Network::peerNeedsOurMembershipCertificate(const Address &to,uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
if ((_config)&&(!_config->isPublic())&&(_config->com())) {
|
||||
uint64_t &lastPushed = _lastPushedMembershipCertificate[to];
|
||||
if ((now - lastPushed) > (ZT_NETWORK_AUTOCONF_DELAY / 2)) {
|
||||
lastPushed = now;
|
||||
_RemoteMemberCertificateInfo &ci = _certInfo[to];
|
||||
if ((now - ci.lastPushed) > (ZT_NETWORK_AUTOCONF_DELAY / 2)) {
|
||||
ci.lastPushed = now;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -352,31 +355,28 @@ void Network::clean()
|
||||
|
||||
if ((_config)&&(_config->isPublic())) {
|
||||
// Open (public) networks do not track certs or cert pushes at all.
|
||||
_membershipCertificates.clear();
|
||||
_lastPushedMembershipCertificate.clear();
|
||||
_certInfo.clear();
|
||||
} else if (_config) {
|
||||
// Clean certificates that are no longer valid from the cache.
|
||||
for(std::map<Address,CertificateOfMembership>::iterator c=(_membershipCertificates.begin());c!=_membershipCertificates.end();) {
|
||||
if (_config->com().agreesWith(c->second))
|
||||
++c;
|
||||
else _membershipCertificates.erase(c++);
|
||||
}
|
||||
|
||||
// Clean entries from the last pushed tracking map if they're so old as
|
||||
// to be no longer relevant.
|
||||
uint64_t forgetIfBefore = now - (ZT_PEER_ACTIVITY_TIMEOUT * 16); // arbitrary reasonable cutoff
|
||||
for(std::map<Address,uint64_t>::iterator lp(_lastPushedMembershipCertificate.begin());lp!=_lastPushedMembershipCertificate.end();) {
|
||||
if (lp->second < forgetIfBefore)
|
||||
_lastPushedMembershipCertificate.erase(lp++);
|
||||
else ++lp;
|
||||
// Clean obsolete entries from private network cert info table
|
||||
Hashtable< Address,_RemoteMemberCertificateInfo >::Iterator i(_certInfo);
|
||||
Address *a = (Address *)0;
|
||||
_RemoteMemberCertificateInfo *ci = (_RemoteMemberCertificateInfo *)0;
|
||||
const uint64_t forgetIfBefore = now - (ZT_PEER_ACTIVITY_TIMEOUT * 16); // arbitrary reasonable cutoff
|
||||
while (i.next(a,ci)) {
|
||||
if ((ci->lastPushed < forgetIfBefore)&&(!ci->com.agreesWith(_config->com())))
|
||||
_certInfo.erase(*a);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean learned multicast groups if we haven't heard from them in a while
|
||||
for(std::map<MulticastGroup,uint64_t>::iterator mg(_multicastGroupsBehindMe.begin());mg!=_multicastGroupsBehindMe.end();) {
|
||||
if ((now - mg->second) > (ZT_MULTICAST_LIKE_EXPIRE * 2))
|
||||
_multicastGroupsBehindMe.erase(mg++);
|
||||
else ++mg;
|
||||
{
|
||||
Hashtable< MulticastGroup,uint64_t >::Iterator i(_multicastGroupsBehindMe);
|
||||
MulticastGroup *mg = (MulticastGroup *)0;
|
||||
uint64_t *ts = (uint64_t *)0;
|
||||
while (i.next(mg,ts)) {
|
||||
if ((now - *ts) > (ZT_MULTICAST_LIKE_EXPIRE * 2))
|
||||
_multicastGroupsBehindMe.erase(*mg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -385,22 +385,34 @@ void Network::learnBridgeRoute(const MAC &mac,const Address &addr)
|
||||
Mutex::Lock _l(_lock);
|
||||
_remoteBridgeRoutes[mac] = addr;
|
||||
|
||||
// If _remoteBridgeRoutes exceeds sanity limit, trim worst offenders until below -- denial of service circuit breaker
|
||||
// Anti-DOS circuit breaker to prevent nodes from spamming us with absurd numbers of bridge routes
|
||||
while (_remoteBridgeRoutes.size() > ZT_MAX_BRIDGE_ROUTES) {
|
||||
std::map<Address,unsigned long> counts;
|
||||
Hashtable< Address,unsigned long > counts;
|
||||
Address maxAddr;
|
||||
unsigned long maxCount = 0;
|
||||
for(std::map<MAC,Address>::iterator br(_remoteBridgeRoutes.begin());br!=_remoteBridgeRoutes.end();++br) {
|
||||
unsigned long c = ++counts[br->second];
|
||||
if (c > maxCount) {
|
||||
maxCount = c;
|
||||
maxAddr = br->second;
|
||||
|
||||
MAC *m = (MAC *)0;
|
||||
Address *a = (Address *)0;
|
||||
|
||||
// Find the address responsible for the most entries
|
||||
{
|
||||
Hashtable<MAC,Address>::Iterator i(_remoteBridgeRoutes);
|
||||
while (i.next(m,a)) {
|
||||
const unsigned long c = ++counts[*a];
|
||||
if (c > maxCount) {
|
||||
maxCount = c;
|
||||
maxAddr = *a;
|
||||
}
|
||||
}
|
||||
}
|
||||
for(std::map<MAC,Address>::iterator br(_remoteBridgeRoutes.begin());br!=_remoteBridgeRoutes.end();) {
|
||||
if (br->second == maxAddr)
|
||||
_remoteBridgeRoutes.erase(br++);
|
||||
else ++br;
|
||||
|
||||
// Kill this address from our table, since it's most likely spamming us
|
||||
{
|
||||
Hashtable<MAC,Address>::Iterator i(_remoteBridgeRoutes);
|
||||
while (i.next(m,a)) {
|
||||
if (*a == maxAddr)
|
||||
_remoteBridgeRoutes.erase(*m);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -408,8 +420,8 @@ void Network::learnBridgeRoute(const MAC &mac,const Address &addr)
|
||||
void Network::learnBridgedMulticastGroup(const MulticastGroup &mg,uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size();
|
||||
_multicastGroupsBehindMe[mg] = now;
|
||||
const unsigned long tmp = (unsigned long)_multicastGroupsBehindMe.size();
|
||||
_multicastGroupsBehindMe.set(mg,now);
|
||||
if (tmp != _multicastGroupsBehindMe.size())
|
||||
_announceMulticastGroups();
|
||||
}
|
||||
@ -419,9 +431,9 @@ void Network::setEnabled(bool enabled)
|
||||
Mutex::Lock _l(_lock);
|
||||
if (_enabled != enabled) {
|
||||
_enabled = enabled;
|
||||
ZT1_VirtualNetworkConfig ctmp;
|
||||
ZT_VirtualNetworkConfig ctmp;
|
||||
_externalConfig(&ctmp);
|
||||
_portError = RR->node->configureVirtualNetworkPort(_id,ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE,&ctmp);
|
||||
_portError = RR->node->configureVirtualNetworkPort(_id,ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE,&ctmp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -432,24 +444,24 @@ void Network::destroy()
|
||||
_destroyed = true;
|
||||
}
|
||||
|
||||
ZT1_VirtualNetworkStatus Network::_status() const
|
||||
ZT_VirtualNetworkStatus Network::_status() const
|
||||
{
|
||||
// assumes _lock is locked
|
||||
if (_portError)
|
||||
return ZT1_NETWORK_STATUS_PORT_ERROR;
|
||||
return ZT_NETWORK_STATUS_PORT_ERROR;
|
||||
switch(_netconfFailure) {
|
||||
case NETCONF_FAILURE_ACCESS_DENIED:
|
||||
return ZT1_NETWORK_STATUS_ACCESS_DENIED;
|
||||
return ZT_NETWORK_STATUS_ACCESS_DENIED;
|
||||
case NETCONF_FAILURE_NOT_FOUND:
|
||||
return ZT1_NETWORK_STATUS_NOT_FOUND;
|
||||
return ZT_NETWORK_STATUS_NOT_FOUND;
|
||||
case NETCONF_FAILURE_NONE:
|
||||
return ((_config) ? ZT1_NETWORK_STATUS_OK : ZT1_NETWORK_STATUS_REQUESTING_CONFIGURATION);
|
||||
return ((_config) ? ZT_NETWORK_STATUS_OK : ZT_NETWORK_STATUS_REQUESTING_CONFIGURATION);
|
||||
default:
|
||||
return ZT1_NETWORK_STATUS_PORT_ERROR;
|
||||
return ZT_NETWORK_STATUS_PORT_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
void Network::_externalConfig(ZT1_VirtualNetworkConfig *ec) const
|
||||
void Network::_externalConfig(ZT_VirtualNetworkConfig *ec) const
|
||||
{
|
||||
// assumes _lock is locked
|
||||
ec->nwid = _id;
|
||||
@ -458,7 +470,7 @@ void Network::_externalConfig(ZT1_VirtualNetworkConfig *ec) const
|
||||
Utils::scopy(ec->name,sizeof(ec->name),_config->name().c_str());
|
||||
else ec->name[0] = (char)0;
|
||||
ec->status = _status();
|
||||
ec->type = (_config) ? (_config->isPrivate() ? ZT1_NETWORK_TYPE_PRIVATE : ZT1_NETWORK_TYPE_PUBLIC) : ZT1_NETWORK_TYPE_PRIVATE;
|
||||
ec->type = (_config) ? (_config->isPrivate() ? ZT_NETWORK_TYPE_PRIVATE : ZT_NETWORK_TYPE_PUBLIC) : ZT_NETWORK_TYPE_PRIVATE;
|
||||
ec->mtu = ZT_IF_MTU;
|
||||
ec->dhcp = 0;
|
||||
ec->bridge = (_config) ? ((_config->allowPassiveBridging() || (std::find(_config->activeBridges().begin(),_config->activeBridges().end(),RR->identity.address()) != _config->activeBridges().end())) ? 1 : 0) : 0;
|
||||
@ -467,7 +479,7 @@ void Network::_externalConfig(ZT1_VirtualNetworkConfig *ec) const
|
||||
ec->enabled = (_enabled) ? 1 : 0;
|
||||
ec->netconfRevision = (_config) ? (unsigned long)_config->revision() : 0;
|
||||
|
||||
ec->multicastSubscriptionCount = std::min((unsigned int)_myMulticastGroups.size(),(unsigned int)ZT1_MAX_NETWORK_MULTICAST_SUBSCRIPTIONS);
|
||||
ec->multicastSubscriptionCount = std::min((unsigned int)_myMulticastGroups.size(),(unsigned int)ZT_MAX_NETWORK_MULTICAST_SUBSCRIPTIONS);
|
||||
for(unsigned int i=0;i<ec->multicastSubscriptionCount;++i) {
|
||||
ec->multicastSubscriptions[i].mac = _myMulticastGroups[i].mac().toInt();
|
||||
ec->multicastSubscriptions[i].adi = _myMulticastGroups[i].adi();
|
||||
@ -475,7 +487,7 @@ void Network::_externalConfig(ZT1_VirtualNetworkConfig *ec) const
|
||||
|
||||
if (_config) {
|
||||
ec->assignedAddressCount = (unsigned int)_config->staticIps().size();
|
||||
for(unsigned long i=0;i<ZT1_MAX_ZT_ASSIGNED_ADDRESSES;++i) {
|
||||
for(unsigned long i=0;i<ZT_MAX_ZT_ASSIGNED_ADDRESSES;++i) {
|
||||
if (i < _config->staticIps().size())
|
||||
memcpy(&(ec->assignedAddresses[i]),&(_config->staticIps()[i]),sizeof(struct sockaddr_storage));
|
||||
}
|
||||
@ -490,12 +502,10 @@ bool Network::_isAllowed(const Address &peer) const
|
||||
return false;
|
||||
if (_config->isPublic())
|
||||
return true;
|
||||
|
||||
std::map<Address,CertificateOfMembership>::const_iterator pc(_membershipCertificates.find(peer));
|
||||
if (pc == _membershipCertificates.end())
|
||||
return false; // no certificate on file
|
||||
|
||||
return _config->com().agreesWith(pc->second); // is other cert valid against ours?
|
||||
const _RemoteMemberCertificateInfo *ci = _certInfo.get(peer);
|
||||
if (!ci)
|
||||
return false;
|
||||
return _config->com().agreesWith(ci->com);
|
||||
} catch (std::exception &exc) {
|
||||
TRACE("isAllowed() check failed for peer %s: unexpected exception: %s",peer.toString().c_str(),exc.what());
|
||||
} catch ( ... ) {
|
||||
@ -510,8 +520,7 @@ std::vector<MulticastGroup> Network::_allMulticastGroups() const
|
||||
std::vector<MulticastGroup> mgs;
|
||||
mgs.reserve(_myMulticastGroups.size() + _multicastGroupsBehindMe.size() + 1);
|
||||
mgs.insert(mgs.end(),_myMulticastGroups.begin(),_myMulticastGroups.end());
|
||||
for(std::map< MulticastGroup,uint64_t >::const_iterator i(_multicastGroupsBehindMe.begin());i!=_multicastGroupsBehindMe.end();++i)
|
||||
mgs.push_back(i->first);
|
||||
_multicastGroupsBehindMe.appendKeys(mgs);
|
||||
if ((_config)&&(_config->enableBroadcast()))
|
||||
mgs.push_back(Network::BROADCAST);
|
||||
std::sort(mgs.begin(),mgs.end());
|
||||
|
@ -40,6 +40,7 @@
|
||||
|
||||
#include "Constants.hpp"
|
||||
#include "NonCopyable.hpp"
|
||||
#include "Hashtable.hpp"
|
||||
#include "Address.hpp"
|
||||
#include "Mutex.hpp"
|
||||
#include "SharedPtr.hpp"
|
||||
@ -221,7 +222,7 @@ public:
|
||||
/**
|
||||
* @return Status of this network
|
||||
*/
|
||||
inline ZT1_VirtualNetworkStatus status() const
|
||||
inline ZT_VirtualNetworkStatus status() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return _status();
|
||||
@ -230,7 +231,7 @@ public:
|
||||
/**
|
||||
* @param ec Buffer to fill with externally-visible network configuration
|
||||
*/
|
||||
inline void externalConfig(ZT1_VirtualNetworkConfig *ec) const
|
||||
inline void externalConfig(ZT_VirtualNetworkConfig *ec) const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
_externalConfig(ec);
|
||||
@ -297,10 +298,10 @@ public:
|
||||
inline Address findBridgeTo(const MAC &mac) const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
std::map<MAC,Address>::const_iterator br(_remoteBridgeRoutes.find(mac));
|
||||
if (br == _remoteBridgeRoutes.end())
|
||||
return Address();
|
||||
return br->second;
|
||||
const Address *const br = _remoteBridgeRoutes.get(mac);
|
||||
if (br)
|
||||
return *br;
|
||||
return Address();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -346,8 +347,15 @@ public:
|
||||
inline bool operator>=(const Network &n) const throw() { return (_id >= n._id); }
|
||||
|
||||
private:
|
||||
ZT1_VirtualNetworkStatus _status() const;
|
||||
void _externalConfig(ZT1_VirtualNetworkConfig *ec) const; // assumes _lock is locked
|
||||
struct _RemoteMemberCertificateInfo
|
||||
{
|
||||
_RemoteMemberCertificateInfo() : com(),lastPushed(0) {}
|
||||
CertificateOfMembership com; // remote member's COM
|
||||
uint64_t lastPushed; // when did we last push ours to them?
|
||||
};
|
||||
|
||||
ZT_VirtualNetworkStatus _status() const;
|
||||
void _externalConfig(ZT_VirtualNetworkConfig *ec) const; // assumes _lock is locked
|
||||
bool _isAllowed(const Address &peer) const;
|
||||
void _announceMulticastGroups();
|
||||
std::vector<MulticastGroup> _allMulticastGroups() const;
|
||||
@ -358,13 +366,11 @@ private:
|
||||
volatile bool _enabled;
|
||||
volatile bool _portInitialized;
|
||||
|
||||
std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to including those behind us (updated periodically)
|
||||
std::map< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups bridged to us and when we last saw activity on each
|
||||
std::vector< MulticastGroup > _myMulticastGroups; // multicast groups that we belong to (according to tap)
|
||||
Hashtable< MulticastGroup,uint64_t > _multicastGroupsBehindMe; // multicast groups that seem to be behind us and when we last saw them (if we are a bridge)
|
||||
Hashtable< MAC,Address > _remoteBridgeRoutes; // remote addresses where given MACs are reachable (for tracking devices behind remote bridges)
|
||||
|
||||
std::map<MAC,Address> _remoteBridgeRoutes; // remote addresses where given MACs are reachable
|
||||
|
||||
std::map<Address,CertificateOfMembership> _membershipCertificates; // Other members' certificates of membership
|
||||
std::map<Address,uint64_t> _lastPushedMembershipCertificate; // When did we last push our certificate to each remote member?
|
||||
Hashtable< Address,_RemoteMemberCertificateInfo > _certInfo;
|
||||
|
||||
SharedPtr<NetworkConfig> _config; // Most recent network configuration, which is an immutable value-object
|
||||
volatile uint64_t _lastConfigUpdate;
|
||||
|
@ -108,7 +108,7 @@ void NetworkConfig::_fromDictionary(const Dictionary &d)
|
||||
_private = (Utils::hexStrToUInt(d.get(ZT_NETWORKCONFIG_DICT_KEY_PRIVATE,one).c_str()) != 0);
|
||||
_enableBroadcast = (Utils::hexStrToUInt(d.get(ZT_NETWORKCONFIG_DICT_KEY_ENABLE_BROADCAST,one).c_str()) != 0);
|
||||
_name = d.get(ZT_NETWORKCONFIG_DICT_KEY_NAME);
|
||||
if (_name.length() > ZT1_MAX_NETWORK_SHORT_NAME_LENGTH)
|
||||
if (_name.length() > ZT_MAX_NETWORK_SHORT_NAME_LENGTH)
|
||||
throw std::invalid_argument("network short name too long (max: 255 characters)");
|
||||
|
||||
// In dictionary IPs are split into V4 and V6 addresses, but we don't really
|
||||
@ -142,8 +142,8 @@ void NetworkConfig::_fromDictionary(const Dictionary &d)
|
||||
_localRoutes.push_back(addr);
|
||||
else _staticIps.push_back(addr);
|
||||
}
|
||||
if (_localRoutes.size() > ZT1_MAX_ZT_ASSIGNED_ADDRESSES) throw std::invalid_argument("too many ZT-assigned routes");
|
||||
if (_staticIps.size() > ZT1_MAX_ZT_ASSIGNED_ADDRESSES) throw std::invalid_argument("too many ZT-assigned IP addresses");
|
||||
if (_localRoutes.size() > ZT_MAX_ZT_ASSIGNED_ADDRESSES) throw std::invalid_argument("too many ZT-assigned routes");
|
||||
if (_staticIps.size() > ZT_MAX_ZT_ASSIGNED_ADDRESSES) throw std::invalid_argument("too many ZT-assigned IP addresses");
|
||||
std::sort(_localRoutes.begin(),_localRoutes.end());
|
||||
_localRoutes.erase(std::unique(_localRoutes.begin(),_localRoutes.end()),_localRoutes.end());
|
||||
std::sort(_staticIps.begin(),_staticIps.end());
|
||||
|
@ -75,7 +75,6 @@ public:
|
||||
* @param identity Originating peer ZeroTier identity
|
||||
* @param nwid 64-bit network ID
|
||||
* @param metaData Meta-data bundled with request (empty if none)
|
||||
* @param haveRevision Network revision ID sent by requesting peer or 0 if none
|
||||
* @param result Dictionary to receive resulting signed netconf on success
|
||||
* @return Returns NETCONF_QUERY_OK if result dictionary is valid, or an error code on error
|
||||
*/
|
||||
@ -85,7 +84,6 @@ public:
|
||||
const Identity &identity,
|
||||
uint64_t nwid,
|
||||
const Dictionary &metaData,
|
||||
uint64_t haveRevision,
|
||||
Dictionary &result) = 0;
|
||||
};
|
||||
|
||||
|
225
node/Node.cpp
225
node/Node.cpp
@ -48,6 +48,8 @@
|
||||
#include "SelfAwareness.hpp"
|
||||
#include "Defaults.hpp"
|
||||
|
||||
const struct sockaddr_storage ZT_SOCKADDR_NULL = {0};
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
/****************************************************************************/
|
||||
@ -57,12 +59,12 @@ namespace ZeroTier {
|
||||
Node::Node(
|
||||
uint64_t now,
|
||||
void *uptr,
|
||||
ZT1_DataStoreGetFunction dataStoreGetFunction,
|
||||
ZT1_DataStorePutFunction dataStorePutFunction,
|
||||
ZT1_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT1_EventCallback eventCallback,
|
||||
ZT_DataStoreGetFunction dataStoreGetFunction,
|
||||
ZT_DataStorePutFunction dataStorePutFunction,
|
||||
ZT_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT_EventCallback eventCallback,
|
||||
const char *overrideRootTopology) :
|
||||
_RR(this),
|
||||
RR(&_RR),
|
||||
@ -141,7 +143,7 @@ Node::Node(
|
||||
}
|
||||
RR->topology->setRootServers(Dictionary(rt.get("rootservers","")));
|
||||
|
||||
postEvent(ZT1_EVENT_UP);
|
||||
postEvent(ZT_EVENT_UP);
|
||||
}
|
||||
|
||||
Node::~Node()
|
||||
@ -155,19 +157,20 @@ Node::~Node()
|
||||
delete RR->sw;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::processWirePacket(
|
||||
ZT_ResultCode Node::processWirePacket(
|
||||
uint64_t now,
|
||||
const struct sockaddr_storage *localAddress,
|
||||
const struct sockaddr_storage *remoteAddress,
|
||||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
volatile uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
_now = now;
|
||||
RR->sw->onRemotePacket(*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
|
||||
return ZT1_RESULT_OK;
|
||||
RR->sw->onRemotePacket(*(reinterpret_cast<const InetAddress *>(localAddress)),*(reinterpret_cast<const InetAddress *>(remoteAddress)),packetData,packetLength);
|
||||
return ZT_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::processVirtualNetworkFrame(
|
||||
ZT_ResultCode Node::processVirtualNetworkFrame(
|
||||
uint64_t now,
|
||||
uint64_t nwid,
|
||||
uint64_t sourceMac,
|
||||
@ -182,8 +185,8 @@ ZT1_ResultCode Node::processVirtualNetworkFrame(
|
||||
SharedPtr<Network> nw(this->network(nwid));
|
||||
if (nw) {
|
||||
RR->sw->onLocalEthernet(nw,MAC(sourceMac),MAC(destMac),etherType,vlanId,frameData,frameLength);
|
||||
return ZT1_RESULT_OK;
|
||||
} else return ZT1_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
return ZT_RESULT_OK;
|
||||
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
}
|
||||
|
||||
class _PingPeersThatNeedPing
|
||||
@ -227,12 +230,14 @@ private:
|
||||
std::vector<Address> _rootAddresses;
|
||||
};
|
||||
|
||||
ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
|
||||
ZT_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
_now = now;
|
||||
Mutex::Lock bl(_backgroundTasksLock);
|
||||
|
||||
if ((now - _lastPingCheck) >= ZT_PING_CHECK_INVERVAL) {
|
||||
unsigned long timeUntilNextPingCheck = ZT_PING_CHECK_INVERVAL;
|
||||
const uint64_t timeSinceLastPingCheck = now - _lastPingCheck;
|
||||
if (timeSinceLastPingCheck >= ZT_PING_CHECK_INVERVAL) {
|
||||
try {
|
||||
_lastPingCheck = now;
|
||||
|
||||
@ -261,7 +266,7 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
|
||||
if (nr->second) {
|
||||
SharedPtr<Peer> rp(RR->topology->getPeer(nr->first));
|
||||
if ((rp)&&(!rp->hasActiveDirectPath(now)))
|
||||
rp->attemptToContactAt(RR,nr->second,now);
|
||||
rp->attemptToContactAt(RR,InetAddress(),nr->second,now);
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,10 +278,12 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
|
||||
bool oldOnline = _online;
|
||||
_online = ((now - pfunc.lastReceiveFromUpstream) < ZT_PEER_ACTIVITY_TIMEOUT);
|
||||
if (oldOnline != _online)
|
||||
postEvent(_online ? ZT1_EVENT_ONLINE : ZT1_EVENT_OFFLINE);
|
||||
postEvent(_online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
} else {
|
||||
timeUntilNextPingCheck -= (unsigned long)timeSinceLastPingCheck;
|
||||
}
|
||||
|
||||
if ((now - _lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
|
||||
@ -286,30 +293,30 @@ ZT1_ResultCode Node::processBackgroundTasks(uint64_t now,volatile uint64_t *next
|
||||
RR->sa->clean(now);
|
||||
RR->mc->clean(now);
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
*nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min((unsigned long)ZT_PING_CHECK_INVERVAL,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
|
||||
*nextBackgroundTaskDeadline = now + (uint64_t)std::max(std::min(timeUntilNextPingCheck,RR->sw->doTimerTasks(now)),(unsigned long)ZT_CORE_TIMER_TASK_GRANULARITY);
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
return ZT1_RESULT_OK;
|
||||
return ZT_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::join(uint64_t nwid)
|
||||
ZT_ResultCode Node::join(uint64_t nwid)
|
||||
{
|
||||
Mutex::Lock _l(_networks_m);
|
||||
SharedPtr<Network> nw = _network(nwid);
|
||||
if(!nw)
|
||||
_networks.push_back(std::pair< uint64_t,SharedPtr<Network> >(nwid,SharedPtr<Network>(new Network(RR,nwid))));
|
||||
std::sort(_networks.begin(),_networks.end()); // will sort by nwid since it's the first in a pair<>
|
||||
return ZT1_RESULT_OK;
|
||||
return ZT_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::leave(uint64_t nwid)
|
||||
ZT_ResultCode Node::leave(uint64_t nwid)
|
||||
{
|
||||
std::vector< std::pair< uint64_t,SharedPtr<Network> > > newn;
|
||||
Mutex::Lock _l(_networks_m);
|
||||
@ -319,25 +326,25 @@ ZT1_ResultCode Node::leave(uint64_t nwid)
|
||||
else n->second->destroy();
|
||||
}
|
||||
_networks.swap(newn);
|
||||
return ZT1_RESULT_OK;
|
||||
return ZT_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
|
||||
ZT_ResultCode Node::multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
|
||||
{
|
||||
SharedPtr<Network> nw(this->network(nwid));
|
||||
if (nw) {
|
||||
nw->multicastSubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
|
||||
return ZT1_RESULT_OK;
|
||||
} else return ZT1_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
return ZT_RESULT_OK;
|
||||
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
}
|
||||
|
||||
ZT1_ResultCode Node::multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
|
||||
ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
|
||||
{
|
||||
SharedPtr<Network> nw(this->network(nwid));
|
||||
if (nw) {
|
||||
nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup),(uint32_t)(multicastAdi & 0xffffffff)));
|
||||
return ZT1_RESULT_OK;
|
||||
} else return ZT1_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
return ZT_RESULT_OK;
|
||||
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
}
|
||||
|
||||
uint64_t Node::address() const
|
||||
@ -345,7 +352,7 @@ uint64_t Node::address() const
|
||||
return RR->identity.address().toInt();
|
||||
}
|
||||
|
||||
void Node::status(ZT1_NodeStatus *status) const
|
||||
void Node::status(ZT_NodeStatus *status) const
|
||||
{
|
||||
status->address = RR->identity.address().toInt();
|
||||
status->publicIdentity = RR->publicIdentityStr.c_str();
|
||||
@ -353,19 +360,20 @@ void Node::status(ZT1_NodeStatus *status) const
|
||||
status->online = _online ? 1 : 0;
|
||||
}
|
||||
|
||||
ZT1_PeerList *Node::peers() const
|
||||
ZT_PeerList *Node::peers() const
|
||||
{
|
||||
std::map< Address,SharedPtr<Peer> > peers(RR->topology->allPeers());
|
||||
std::vector< std::pair< Address,SharedPtr<Peer> > > peers(RR->topology->allPeers());
|
||||
std::sort(peers.begin(),peers.end());
|
||||
|
||||
char *buf = (char *)::malloc(sizeof(ZT1_PeerList) + (sizeof(ZT1_Peer) * peers.size()));
|
||||
char *buf = (char *)::malloc(sizeof(ZT_PeerList) + (sizeof(ZT_Peer) * peers.size()));
|
||||
if (!buf)
|
||||
return (ZT1_PeerList *)0;
|
||||
ZT1_PeerList *pl = (ZT1_PeerList *)buf;
|
||||
pl->peers = (ZT1_Peer *)(buf + sizeof(ZT1_PeerList));
|
||||
return (ZT_PeerList *)0;
|
||||
ZT_PeerList *pl = (ZT_PeerList *)buf;
|
||||
pl->peers = (ZT_Peer *)(buf + sizeof(ZT_PeerList));
|
||||
|
||||
pl->peerCount = 0;
|
||||
for(std::map< Address,SharedPtr<Peer> >::iterator pi(peers.begin());pi!=peers.end();++pi) {
|
||||
ZT1_Peer *p = &(pl->peers[pl->peerCount++]);
|
||||
for(std::vector< std::pair< Address,SharedPtr<Peer> > >::iterator pi(peers.begin());pi!=peers.end();++pi) {
|
||||
ZT_Peer *p = &(pl->peers[pl->peerCount++]);
|
||||
p->address = pi->second->address().toInt();
|
||||
p->lastUnicastFrame = pi->second->lastUnicastFrame();
|
||||
p->lastMulticastFrame = pi->second->lastMulticastFrame();
|
||||
@ -379,7 +387,7 @@ ZT1_PeerList *Node::peers() const
|
||||
p->versionRev = -1;
|
||||
}
|
||||
p->latency = pi->second->latency();
|
||||
p->role = RR->topology->isRoot(pi->second->identity()) ? ZT1_PEER_ROLE_ROOT : ZT1_PEER_ROLE_LEAF;
|
||||
p->role = RR->topology->isRoot(pi->second->identity()) ? ZT_PEER_ROLE_ROOT : ZT_PEER_ROLE_LEAF;
|
||||
|
||||
std::vector<RemotePath> paths(pi->second->paths());
|
||||
RemotePath *bestPath = pi->second->getBestPath(_now);
|
||||
@ -398,27 +406,27 @@ ZT1_PeerList *Node::peers() const
|
||||
return pl;
|
||||
}
|
||||
|
||||
ZT1_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
|
||||
ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
|
||||
{
|
||||
Mutex::Lock _l(_networks_m);
|
||||
SharedPtr<Network> nw = _network(nwid);
|
||||
if(nw) {
|
||||
ZT1_VirtualNetworkConfig *nc = (ZT1_VirtualNetworkConfig *)::malloc(sizeof(ZT1_VirtualNetworkConfig));
|
||||
ZT_VirtualNetworkConfig *nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
|
||||
nw->externalConfig(nc);
|
||||
return nc;
|
||||
}
|
||||
return (ZT1_VirtualNetworkConfig *)0;
|
||||
return (ZT_VirtualNetworkConfig *)0;
|
||||
}
|
||||
|
||||
ZT1_VirtualNetworkList *Node::networks() const
|
||||
ZT_VirtualNetworkList *Node::networks() const
|
||||
{
|
||||
Mutex::Lock _l(_networks_m);
|
||||
|
||||
char *buf = (char *)::malloc(sizeof(ZT1_VirtualNetworkList) + (sizeof(ZT1_VirtualNetworkConfig) * _networks.size()));
|
||||
char *buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * _networks.size()));
|
||||
if (!buf)
|
||||
return (ZT1_VirtualNetworkList *)0;
|
||||
ZT1_VirtualNetworkList *nl = (ZT1_VirtualNetworkList *)buf;
|
||||
nl->networks = (ZT1_VirtualNetworkConfig *)(buf + sizeof(ZT1_VirtualNetworkList));
|
||||
return (ZT_VirtualNetworkList *)0;
|
||||
ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf;
|
||||
nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
|
||||
|
||||
nl->networkCount = 0;
|
||||
for(std::vector< std::pair< uint64_t,SharedPtr<Network> > >::const_iterator n(_networks.begin());n!=_networks.end();++n)
|
||||
@ -433,7 +441,7 @@ void Node::freeQueryResult(void *qr)
|
||||
::free(qr);
|
||||
}
|
||||
|
||||
int Node::addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT1_LocalInterfaceAddressTrust trust)
|
||||
int Node::addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust)
|
||||
{
|
||||
if (Path::isAddressValidForPath(*(reinterpret_cast<const InetAddress *>(addr)))) {
|
||||
Mutex::Lock _l(_directPaths_m);
|
||||
@ -466,7 +474,7 @@ std::string Node::dataStoreGet(const char *name)
|
||||
std::string r;
|
||||
unsigned long olen = 0;
|
||||
do {
|
||||
long n = _dataStoreGetFunction(reinterpret_cast<ZT1_Node *>(this),_uPtr,name,buf,sizeof(buf),(unsigned long)r.length(),&olen);
|
||||
long n = _dataStoreGetFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,name,buf,sizeof(buf),(unsigned long)r.length(),&olen);
|
||||
if (n <= 0)
|
||||
return std::string();
|
||||
r.append(buf,n);
|
||||
@ -480,7 +488,7 @@ void Node::postNewerVersionIfNewer(unsigned int major,unsigned int minor,unsigne
|
||||
_newestVersionSeen[0] = major;
|
||||
_newestVersionSeen[1] = minor;
|
||||
_newestVersionSeen[2] = rev;
|
||||
this->postEvent(ZT1_EVENT_SAW_MORE_RECENT_VERSION,(const void *)_newestVersionSeen);
|
||||
this->postEvent(ZT_EVENT_SAW_MORE_RECENT_VERSION,(const void *)_newestVersionSeen);
|
||||
}
|
||||
}
|
||||
|
||||
@ -513,7 +521,7 @@ void Node::postTrace(const char *module,unsigned int line,const char *fmt,...)
|
||||
tmp2[sizeof(tmp2)-1] = (char)0;
|
||||
|
||||
Utils::snprintf(tmp1,sizeof(tmp1),"[%s] %s:%u %s",nowstr,module,line,tmp2);
|
||||
postEvent(ZT1_EVENT_TRACE,tmp1);
|
||||
postEvent(ZT_EVENT_TRACE,tmp1);
|
||||
}
|
||||
#endif // ZT_TRACE
|
||||
|
||||
@ -533,58 +541,59 @@ uint64_t Node::prng()
|
||||
|
||||
extern "C" {
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_new(
|
||||
ZT1_Node **node,
|
||||
enum ZT_ResultCode ZT_Node_new(
|
||||
ZT_Node **node,
|
||||
void *uptr,
|
||||
uint64_t now,
|
||||
ZT1_DataStoreGetFunction dataStoreGetFunction,
|
||||
ZT1_DataStorePutFunction dataStorePutFunction,
|
||||
ZT1_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT1_EventCallback eventCallback,
|
||||
ZT_DataStoreGetFunction dataStoreGetFunction,
|
||||
ZT_DataStorePutFunction dataStorePutFunction,
|
||||
ZT_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT_EventCallback eventCallback,
|
||||
const char *overrideRootTopology)
|
||||
{
|
||||
*node = (ZT1_Node *)0;
|
||||
*node = (ZT_Node *)0;
|
||||
try {
|
||||
*node = reinterpret_cast<ZT1_Node *>(new ZeroTier::Node(now,uptr,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,eventCallback,overrideRootTopology));
|
||||
return ZT1_RESULT_OK;
|
||||
*node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(now,uptr,dataStoreGetFunction,dataStorePutFunction,wirePacketSendFunction,virtualNetworkFrameFunction,virtualNetworkConfigFunction,eventCallback,overrideRootTopology));
|
||||
return ZT_RESULT_OK;
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch (std::runtime_error &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
|
||||
return ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
void ZT1_Node_delete(ZT1_Node *node)
|
||||
void ZT_Node_delete(ZT_Node *node)
|
||||
{
|
||||
try {
|
||||
delete (reinterpret_cast<ZeroTier::Node *>(node));
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_processWirePacket(
|
||||
ZT1_Node *node,
|
||||
enum ZT_ResultCode ZT_Node_processWirePacket(
|
||||
ZT_Node *node,
|
||||
uint64_t now,
|
||||
const struct sockaddr_storage *localAddress,
|
||||
const struct sockaddr_storage *remoteAddress,
|
||||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
volatile uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(now,remoteAddress,packetData,packetLength,nextBackgroundTaskDeadline);
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(now,localAddress,remoteAddress,packetData,packetLength,nextBackgroundTaskDeadline);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
reinterpret_cast<ZeroTier::Node *>(node)->postEvent(ZT1_EVENT_INVALID_PACKET,(const void *)remoteAddress);
|
||||
return ZT1_RESULT_OK;
|
||||
reinterpret_cast<ZeroTier::Node *>(node)->postEvent(ZT_EVENT_INVALID_PACKET,(const void *)remoteAddress);
|
||||
return ZT_RESULT_OK;
|
||||
}
|
||||
}
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
|
||||
ZT1_Node *node,
|
||||
enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
|
||||
ZT_Node *node,
|
||||
uint64_t now,
|
||||
uint64_t nwid,
|
||||
uint64_t sourceMac,
|
||||
@ -598,121 +607,121 @@ enum ZT1_ResultCode ZT1_Node_processVirtualNetworkFrame(
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(now,nwid,sourceMac,destMac,etherType,vlanId,frameData,frameLength,nextBackgroundTaskDeadline);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_processBackgroundTasks(ZT1_Node *node,uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
|
||||
enum ZT_ResultCode ZT_Node_processBackgroundTasks(ZT_Node *node,uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(now,nextBackgroundTaskDeadline);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_join(ZT1_Node *node,uint64_t nwid)
|
||||
enum ZT_ResultCode ZT_Node_join(ZT_Node *node,uint64_t nwid)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->join(nwid);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_leave(ZT1_Node *node,uint64_t nwid)
|
||||
enum ZT_ResultCode ZT_Node_leave(ZT_Node *node,uint64_t nwid)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->leave(nwid);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_multicastSubscribe(ZT1_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
|
||||
enum ZT_ResultCode ZT_Node_multicastSubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->multicastSubscribe(nwid,multicastGroup,multicastAdi);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
enum ZT1_ResultCode ZT1_Node_multicastUnsubscribe(ZT1_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
|
||||
enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node,uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->multicastUnsubscribe(nwid,multicastGroup,multicastAdi);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT1_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch ( ... ) {
|
||||
return ZT1_RESULT_FATAL_ERROR_INTERNAL;
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t ZT1_Node_address(ZT1_Node *node)
|
||||
uint64_t ZT_Node_address(ZT_Node *node)
|
||||
{
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->address();
|
||||
}
|
||||
|
||||
void ZT1_Node_status(ZT1_Node *node,ZT1_NodeStatus *status)
|
||||
void ZT_Node_status(ZT_Node *node,ZT_NodeStatus *status)
|
||||
{
|
||||
try {
|
||||
reinterpret_cast<ZeroTier::Node *>(node)->status(status);
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
|
||||
ZT1_PeerList *ZT1_Node_peers(ZT1_Node *node)
|
||||
ZT_PeerList *ZT_Node_peers(ZT_Node *node)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->peers();
|
||||
} catch ( ... ) {
|
||||
return (ZT1_PeerList *)0;
|
||||
return (ZT_PeerList *)0;
|
||||
}
|
||||
}
|
||||
|
||||
ZT1_VirtualNetworkConfig *ZT1_Node_networkConfig(ZT1_Node *node,uint64_t nwid)
|
||||
ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node,uint64_t nwid)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->networkConfig(nwid);
|
||||
} catch ( ... ) {
|
||||
return (ZT1_VirtualNetworkConfig *)0;
|
||||
return (ZT_VirtualNetworkConfig *)0;
|
||||
}
|
||||
}
|
||||
|
||||
ZT1_VirtualNetworkList *ZT1_Node_networks(ZT1_Node *node)
|
||||
ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->networks();
|
||||
} catch ( ... ) {
|
||||
return (ZT1_VirtualNetworkList *)0;
|
||||
return (ZT_VirtualNetworkList *)0;
|
||||
}
|
||||
}
|
||||
|
||||
void ZT1_Node_freeQueryResult(ZT1_Node *node,void *qr)
|
||||
void ZT_Node_freeQueryResult(ZT_Node *node,void *qr)
|
||||
{
|
||||
try {
|
||||
reinterpret_cast<ZeroTier::Node *>(node)->freeQueryResult(qr);
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
|
||||
void ZT1_Node_setNetconfMaster(ZT1_Node *node,void *networkControllerInstance)
|
||||
void ZT_Node_setNetconfMaster(ZT_Node *node,void *networkControllerInstance)
|
||||
{
|
||||
try {
|
||||
reinterpret_cast<ZeroTier::Node *>(node)->setNetconfMaster(networkControllerInstance);
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
|
||||
int ZT1_Node_addLocalInterfaceAddress(ZT1_Node *node,const struct sockaddr_storage *addr,int metric,ZT1_LocalInterfaceAddressTrust trust)
|
||||
int ZT_Node_addLocalInterfaceAddress(ZT_Node *node,const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->addLocalInterfaceAddress(addr,metric,trust);
|
||||
@ -721,21 +730,21 @@ int ZT1_Node_addLocalInterfaceAddress(ZT1_Node *node,const struct sockaddr_stora
|
||||
}
|
||||
}
|
||||
|
||||
void ZT1_Node_clearLocalInterfaceAddresses(ZT1_Node *node)
|
||||
void ZT_Node_clearLocalInterfaceAddresses(ZT_Node *node)
|
||||
{
|
||||
try {
|
||||
reinterpret_cast<ZeroTier::Node *>(node)->clearLocalInterfaceAddresses();
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
|
||||
void ZT1_version(int *major,int *minor,int *revision,unsigned long *featureFlags)
|
||||
void ZT_version(int *major,int *minor,int *revision,unsigned long *featureFlags)
|
||||
{
|
||||
if (major) *major = ZEROTIER_ONE_VERSION_MAJOR;
|
||||
if (minor) *minor = ZEROTIER_ONE_VERSION_MINOR;
|
||||
if (revision) *revision = ZEROTIER_ONE_VERSION_REVISION;
|
||||
if (featureFlags) {
|
||||
*featureFlags = (
|
||||
ZT1_FEATURE_FLAG_THREAD_SAFE
|
||||
ZT_FEATURE_FLAG_THREAD_SAFE
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ namespace ZeroTier {
|
||||
/**
|
||||
* Implementation of Node object as defined in CAPI
|
||||
*
|
||||
* The pointer returned by ZT1_Node_new() is an instance of this class.
|
||||
* The pointer returned by ZT_Node_new() is an instance of this class.
|
||||
*/
|
||||
class Node
|
||||
{
|
||||
@ -66,25 +66,26 @@ public:
|
||||
Node(
|
||||
uint64_t now,
|
||||
void *uptr,
|
||||
ZT1_DataStoreGetFunction dataStoreGetFunction,
|
||||
ZT1_DataStorePutFunction dataStorePutFunction,
|
||||
ZT1_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT1_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT1_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT1_EventCallback eventCallback,
|
||||
ZT_DataStoreGetFunction dataStoreGetFunction,
|
||||
ZT_DataStorePutFunction dataStorePutFunction,
|
||||
ZT_WirePacketSendFunction wirePacketSendFunction,
|
||||
ZT_VirtualNetworkFrameFunction virtualNetworkFrameFunction,
|
||||
ZT_VirtualNetworkConfigFunction virtualNetworkConfigFunction,
|
||||
ZT_EventCallback eventCallback,
|
||||
const char *overrideRootTopology);
|
||||
|
||||
~Node();
|
||||
|
||||
// Public API Functions ----------------------------------------------------
|
||||
|
||||
ZT1_ResultCode processWirePacket(
|
||||
ZT_ResultCode processWirePacket(
|
||||
uint64_t now,
|
||||
const struct sockaddr_storage *localAddress,
|
||||
const struct sockaddr_storage *remoteAddress,
|
||||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
volatile uint64_t *nextBackgroundTaskDeadline);
|
||||
ZT1_ResultCode processVirtualNetworkFrame(
|
||||
ZT_ResultCode processVirtualNetworkFrame(
|
||||
uint64_t now,
|
||||
uint64_t nwid,
|
||||
uint64_t sourceMac,
|
||||
@ -94,18 +95,18 @@ public:
|
||||
const void *frameData,
|
||||
unsigned int frameLength,
|
||||
volatile uint64_t *nextBackgroundTaskDeadline);
|
||||
ZT1_ResultCode processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline);
|
||||
ZT1_ResultCode join(uint64_t nwid);
|
||||
ZT1_ResultCode leave(uint64_t nwid);
|
||||
ZT1_ResultCode multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
|
||||
ZT1_ResultCode multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
|
||||
ZT_ResultCode processBackgroundTasks(uint64_t now,volatile uint64_t *nextBackgroundTaskDeadline);
|
||||
ZT_ResultCode join(uint64_t nwid);
|
||||
ZT_ResultCode leave(uint64_t nwid);
|
||||
ZT_ResultCode multicastSubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
|
||||
ZT_ResultCode multicastUnsubscribe(uint64_t nwid,uint64_t multicastGroup,unsigned long multicastAdi);
|
||||
uint64_t address() const;
|
||||
void status(ZT1_NodeStatus *status) const;
|
||||
ZT1_PeerList *peers() const;
|
||||
ZT1_VirtualNetworkConfig *networkConfig(uint64_t nwid) const;
|
||||
ZT1_VirtualNetworkList *networks() const;
|
||||
void status(ZT_NodeStatus *status) const;
|
||||
ZT_PeerList *peers() const;
|
||||
ZT_VirtualNetworkConfig *networkConfig(uint64_t nwid) const;
|
||||
ZT_VirtualNetworkList *networks() const;
|
||||
void freeQueryResult(void *qr);
|
||||
int addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT1_LocalInterfaceAddressTrust trust);
|
||||
int addLocalInterfaceAddress(const struct sockaddr_storage *addr,int metric,ZT_LocalInterfaceAddressTrust trust);
|
||||
void clearLocalInterfaceAddresses();
|
||||
void setNetconfMaster(void *networkControllerInstance);
|
||||
|
||||
@ -119,16 +120,18 @@ public:
|
||||
/**
|
||||
* Enqueue a ZeroTier message to be sent
|
||||
*
|
||||
* @param localAddress Local address
|
||||
* @param addr Destination address
|
||||
* @param data Packet data
|
||||
* @param len Packet length
|
||||
* @return True if packet appears to have been sent
|
||||
*/
|
||||
inline bool putPacket(const InetAddress &addr,const void *data,unsigned int len)
|
||||
inline bool putPacket(const InetAddress &localAddress,const InetAddress &addr,const void *data,unsigned int len)
|
||||
{
|
||||
return (_wirePacketSendFunction(
|
||||
reinterpret_cast<ZT1_Node *>(this),
|
||||
reinterpret_cast<ZT_Node *>(this),
|
||||
_uPtr,
|
||||
reinterpret_cast<const struct sockaddr_storage *>(&localAddress),
|
||||
reinterpret_cast<const struct sockaddr_storage *>(&addr),
|
||||
data,
|
||||
len) == 0);
|
||||
@ -148,7 +151,7 @@ public:
|
||||
inline void putFrame(uint64_t nwid,const MAC &source,const MAC &dest,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
|
||||
{
|
||||
_virtualNetworkFrameFunction(
|
||||
reinterpret_cast<ZT1_Node *>(this),
|
||||
reinterpret_cast<ZT_Node *>(this),
|
||||
_uPtr,
|
||||
nwid,
|
||||
source.toInt(),
|
||||
@ -184,9 +187,9 @@ public:
|
||||
return _directPaths;
|
||||
}
|
||||
|
||||
inline bool dataStorePut(const char *name,const void *data,unsigned int len,bool secure) { return (_dataStorePutFunction(reinterpret_cast<ZT1_Node *>(this),_uPtr,name,data,len,(int)secure) == 0); }
|
||||
inline bool dataStorePut(const char *name,const void *data,unsigned int len,bool secure) { return (_dataStorePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,name,data,len,(int)secure) == 0); }
|
||||
inline bool dataStorePut(const char *name,const std::string &data,bool secure) { return dataStorePut(name,(const void *)data.data(),(unsigned int)data.length(),secure); }
|
||||
inline void dataStoreDelete(const char *name) { _dataStorePutFunction(reinterpret_cast<ZT1_Node *>(this),_uPtr,name,(const void *)0,0,0); }
|
||||
inline void dataStoreDelete(const char *name) { _dataStorePutFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,name,(const void *)0,0,0); }
|
||||
std::string dataStoreGet(const char *name);
|
||||
|
||||
/**
|
||||
@ -195,7 +198,7 @@ public:
|
||||
* @param ev Event type
|
||||
* @param md Meta-data (default: NULL/none)
|
||||
*/
|
||||
inline void postEvent(ZT1_Event ev,const void *md = (const void *)0) { _eventCallback(reinterpret_cast<ZT1_Node *>(this),_uPtr,ev,md); }
|
||||
inline void postEvent(ZT_Event ev,const void *md = (const void *)0) { _eventCallback(reinterpret_cast<ZT_Node *>(this),_uPtr,ev,md); }
|
||||
|
||||
/**
|
||||
* Update virtual network port configuration
|
||||
@ -204,7 +207,7 @@ public:
|
||||
* @param op Configuration operation
|
||||
* @param nc Network configuration
|
||||
*/
|
||||
inline int configureVirtualNetworkPort(uint64_t nwid,ZT1_VirtualNetworkConfigOperation op,const ZT1_VirtualNetworkConfig *nc) { return _virtualNetworkConfigFunction(reinterpret_cast<ZT1_Node *>(this),_uPtr,nwid,op,nc); }
|
||||
inline int configureVirtualNetworkPort(uint64_t nwid,ZT_VirtualNetworkConfigOperation op,const ZT_VirtualNetworkConfig *nc) { return _virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this),_uPtr,nwid,op,nc); }
|
||||
|
||||
/**
|
||||
* @return True if we appear to be online
|
||||
@ -241,12 +244,12 @@ private:
|
||||
|
||||
void *_uPtr; // _uptr (lower case) is reserved in Visual Studio :P
|
||||
|
||||
ZT1_DataStoreGetFunction _dataStoreGetFunction;
|
||||
ZT1_DataStorePutFunction _dataStorePutFunction;
|
||||
ZT1_WirePacketSendFunction _wirePacketSendFunction;
|
||||
ZT1_VirtualNetworkFrameFunction _virtualNetworkFrameFunction;
|
||||
ZT1_VirtualNetworkConfigFunction _virtualNetworkConfigFunction;
|
||||
ZT1_EventCallback _eventCallback;
|
||||
ZT_DataStoreGetFunction _dataStoreGetFunction;
|
||||
ZT_DataStorePutFunction _dataStorePutFunction;
|
||||
ZT_WirePacketSendFunction _wirePacketSendFunction;
|
||||
ZT_VirtualNetworkFrameFunction _virtualNetworkFrameFunction;
|
||||
ZT_VirtualNetworkConfigFunction _virtualNetworkConfigFunction;
|
||||
ZT_EventCallback _eventCallback;
|
||||
|
||||
std::vector< std::pair< uint64_t, SharedPtr<Network> > > _networks;
|
||||
Mutex _networks_m;
|
||||
|
@ -57,7 +57,7 @@ public:
|
||||
* Nearly all paths will be normal trust. The other levels are for high
|
||||
* performance local SDN use only.
|
||||
*
|
||||
* These values MUST match ZT1_LocalInterfaceAddressTrust in ZeroTierOne.h
|
||||
* These values MUST match ZT_LocalInterfaceAddressTrust in ZeroTierOne.h
|
||||
*/
|
||||
enum Trust
|
||||
{
|
||||
@ -93,7 +93,16 @@ public:
|
||||
/**
|
||||
* @return Preference rank, higher == better
|
||||
*/
|
||||
inline int preferenceRank() const throw() { return (int)_ipScope; } // IP scopes are in ascending rank order in InetAddress.hpp
|
||||
inline int preferenceRank() const throw()
|
||||
{
|
||||
// First, since the scope enum values in InetAddress.hpp are in order of
|
||||
// use preference rank, we take that. Then we multiple by two, yielding
|
||||
// a sequence like 0, 2, 4, 6, etc. Then if it's IPv6 we add one. This
|
||||
// makes IPv6 addresses of a given scope outrank IPv4 addresses of the
|
||||
// same scope -- e.g. 1 outranks 0. This makes us prefer IPv6, but not
|
||||
// if the address scope/class is of a fundamentally lower rank.
|
||||
return ( ((int)_ipScope * 2) + ((_addr.ss_family == AF_INET6) ? 1 : 0) );
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Path trust level
|
||||
|
@ -39,6 +39,9 @@
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
// Used to send varying values for NAT keepalive
|
||||
static uint32_t _natKeepaliveBuf = 0;
|
||||
|
||||
Peer::Peer(const Identity &myIdentity,const Identity &peerIdentity)
|
||||
throw(std::runtime_error) :
|
||||
_lastUsed(0),
|
||||
@ -61,6 +64,7 @@ Peer::Peer(const Identity &myIdentity,const Identity &peerIdentity)
|
||||
|
||||
void Peer::received(
|
||||
const RuntimeEnvironment *RR,
|
||||
const InetAddress &localAddr,
|
||||
const InetAddress &remoteAddr,
|
||||
unsigned int hops,
|
||||
uint64_t packetId,
|
||||
@ -78,7 +82,7 @@ void Peer::received(
|
||||
{
|
||||
unsigned int np = _numPaths;
|
||||
for(unsigned int p=0;p<np;++p) {
|
||||
if (_paths[p].address() == remoteAddr) {
|
||||
if ((_paths[p].address() == remoteAddr)&&(_paths[p].localAddress() == localAddr)) {
|
||||
_paths[p].received(now);
|
||||
pathIsConfirmed = true;
|
||||
break;
|
||||
@ -89,13 +93,13 @@ void Peer::received(
|
||||
if ((verb == Packet::VERB_OK)&&(inReVerb == Packet::VERB_HELLO)) {
|
||||
// Learn paths if they've been confirmed via a HELLO
|
||||
RemotePath *slot = (RemotePath *)0;
|
||||
if (np < ZT1_MAX_PEER_NETWORK_PATHS) {
|
||||
if (np < ZT_MAX_PEER_NETWORK_PATHS) {
|
||||
// Add new path
|
||||
slot = &(_paths[np++]);
|
||||
} else {
|
||||
// Replace oldest non-fixed path
|
||||
uint64_t slotLRmin = 0xffffffffffffffffULL;
|
||||
for(unsigned int p=0;p<ZT1_MAX_PEER_NETWORK_PATHS;++p) {
|
||||
for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
|
||||
if ((!_paths[p].fixed())&&(_paths[p].lastReceived() <= slotLRmin)) {
|
||||
slotLRmin = _paths[p].lastReceived();
|
||||
slot = &(_paths[p]);
|
||||
@ -103,7 +107,7 @@ void Peer::received(
|
||||
}
|
||||
}
|
||||
if (slot) {
|
||||
*slot = RemotePath(remoteAddr,false);
|
||||
*slot = RemotePath(localAddr,remoteAddr,false);
|
||||
slot->received(now);
|
||||
_numPaths = np;
|
||||
pathIsConfirmed = true;
|
||||
@ -116,7 +120,7 @@ void Peer::received(
|
||||
if ((now - _lastPathConfirmationSent) >= ZT_MIN_PATH_CONFIRMATION_INTERVAL) {
|
||||
_lastPathConfirmationSent = now;
|
||||
TRACE("got %s via unknown path %s(%s), confirming...",Packet::verbString(verb),_id.address().toString().c_str(),remoteAddr.toString().c_str());
|
||||
attemptToContactAt(RR,remoteAddr,now);
|
||||
attemptToContactAt(RR,localAddr,remoteAddr,now);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -138,7 +142,7 @@ void Peer::received(
|
||||
for(std::vector<MulticastGroup>::const_iterator mg(mgs.begin());mg!=mgs.end();++mg) {
|
||||
if ((outp.size() + 18) > ZT_UDP_DEFAULT_PAYLOAD_MTU) {
|
||||
outp.armor(_key,true);
|
||||
RR->node->putPacket(remoteAddr,outp.data(),outp.size());
|
||||
RR->node->putPacket(localAddr,remoteAddr,outp.data(),outp.size());
|
||||
outp.reset(_id.address(),RR->identity.address(),Packet::VERB_MULTICAST_LIKE);
|
||||
}
|
||||
|
||||
@ -151,7 +155,7 @@ void Peer::received(
|
||||
}
|
||||
if (outp.size() > ZT_PROTO_MIN_PACKET_LENGTH) {
|
||||
outp.armor(_key,true);
|
||||
RR->node->putPacket(remoteAddr,outp.data(),outp.size());
|
||||
RR->node->putPacket(localAddr,remoteAddr,outp.data(),outp.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -177,7 +181,7 @@ RemotePath *Peer::getBestPath(uint64_t now)
|
||||
return bestPath;
|
||||
}
|
||||
|
||||
void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &atAddress,uint64_t now)
|
||||
void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now)
|
||||
{
|
||||
Packet outp(_id.address(),RR->identity.address(),Packet::VERB_HELLO);
|
||||
outp.append((unsigned char)ZT_PROTO_VERSION);
|
||||
@ -205,7 +209,7 @@ void Peer::attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &at
|
||||
}
|
||||
|
||||
outp.armor(_key,false); // HELLO is sent in the clear
|
||||
RR->node->putPacket(atAddress,outp.data(),outp.size());
|
||||
RR->node->putPacket(localAddr,atAddress,outp.data(),outp.size());
|
||||
}
|
||||
|
||||
void Peer::doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now)
|
||||
@ -214,11 +218,12 @@ void Peer::doPingAndKeepalive(const RuntimeEnvironment *RR,uint64_t now)
|
||||
if (bestPath) {
|
||||
if ((now - bestPath->lastReceived()) >= ZT_PEER_DIRECT_PING_DELAY) {
|
||||
TRACE("PING %s(%s)",_id.address().toString().c_str(),bestPath->address().toString().c_str());
|
||||
attemptToContactAt(RR,bestPath->address(),now);
|
||||
attemptToContactAt(RR,bestPath->localAddress(),bestPath->address(),now);
|
||||
bestPath->sent(now);
|
||||
} else if (((now - bestPath->lastSend()) >= ZT_NAT_KEEPALIVE_DELAY)&&(!bestPath->reliable())) {
|
||||
_natKeepaliveBuf += (uint32_t)((now * 0x9e3779b1) >> 1); // tumble this around to send constantly varying (meaningless) payloads
|
||||
TRACE("NAT keepalive %s(%s)",_id.address().toString().c_str(),bestPath->address().toString().c_str());
|
||||
RR->node->putPacket(bestPath->address(),"",0);
|
||||
RR->node->putPacket(bestPath->localAddress(),bestPath->address(),&_natKeepaliveBuf,sizeof(_natKeepaliveBuf));
|
||||
bestPath->sent(now);
|
||||
}
|
||||
}
|
||||
@ -306,13 +311,13 @@ void Peer::addPath(const RemotePath &newp)
|
||||
}
|
||||
|
||||
RemotePath *slot = (RemotePath *)0;
|
||||
if (np < ZT1_MAX_PEER_NETWORK_PATHS) {
|
||||
if (np < ZT_MAX_PEER_NETWORK_PATHS) {
|
||||
// Add new path
|
||||
slot = &(_paths[np++]);
|
||||
} else {
|
||||
// Replace oldest non-fixed path
|
||||
uint64_t slotLRmin = 0xffffffffffffffffULL;
|
||||
for(unsigned int p=0;p<ZT1_MAX_PEER_NETWORK_PATHS;++p) {
|
||||
for(unsigned int p=0;p<ZT_MAX_PEER_NETWORK_PATHS;++p) {
|
||||
if ((!_paths[p].fixed())&&(_paths[p].lastReceived() <= slotLRmin)) {
|
||||
slotLRmin = _paths[p].lastReceived();
|
||||
slot = &(_paths[p]);
|
||||
@ -350,7 +355,7 @@ bool Peer::resetWithinScope(const RuntimeEnvironment *RR,InetAddress::IpScope sc
|
||||
while (x < np) {
|
||||
if (_paths[x].address().ipScope() == scope) {
|
||||
if (_paths[x].fixed()) {
|
||||
attemptToContactAt(RR,_paths[x].address(),now);
|
||||
attemptToContactAt(RR,_paths[x].localAddress(),_paths[x].address(),now);
|
||||
_paths[y++] = _paths[x]; // keep fixed paths
|
||||
}
|
||||
} else {
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "../include/ZeroTierOne.h"
|
||||
|
||||
#include "RuntimeEnvironment.hpp"
|
||||
#include "CertificateOfMembership.hpp"
|
||||
#include "RemotePath.hpp"
|
||||
#include "Address.hpp"
|
||||
#include "Utils.hpp"
|
||||
@ -104,6 +105,7 @@ public:
|
||||
* and appears to be valid.
|
||||
*
|
||||
* @param RR Runtime environment
|
||||
* @param localAddr Local address
|
||||
* @param remoteAddr Internet address of sender
|
||||
* @param hops ZeroTier (not IP) hops
|
||||
* @param packetId Packet ID
|
||||
@ -113,6 +115,7 @@ public:
|
||||
*/
|
||||
void received(
|
||||
const RuntimeEnvironment *RR,
|
||||
const InetAddress &localAddr,
|
||||
const InetAddress &remoteAddr,
|
||||
unsigned int hops,
|
||||
uint64_t packetId,
|
||||
@ -154,10 +157,11 @@ public:
|
||||
* for NAT traversal and path verification.
|
||||
*
|
||||
* @param RR Runtime environment
|
||||
* @param localAddr Local address
|
||||
* @param atAddress Destination address
|
||||
* @param now Current time
|
||||
*/
|
||||
void attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &atAddress,uint64_t now);
|
||||
void attemptToContactAt(const RuntimeEnvironment *RR,const InetAddress &localAddr,const InetAddress &atAddress,uint64_t now);
|
||||
|
||||
/**
|
||||
* Send pings or keepalives depending on configured timeouts
|
||||
@ -413,7 +417,7 @@ private:
|
||||
uint16_t _vMinor;
|
||||
uint16_t _vRevision;
|
||||
Identity _id;
|
||||
RemotePath _paths[ZT1_MAX_PEER_NETWORK_PATHS];
|
||||
RemotePath _paths[ZT_MAX_PEER_NETWORK_PATHS];
|
||||
unsigned int _numPaths;
|
||||
unsigned int _latency;
|
||||
|
||||
|
@ -53,14 +53,18 @@ public:
|
||||
Path(),
|
||||
_lastSend(0),
|
||||
_lastReceived(0),
|
||||
_localAddress(),
|
||||
_fixed(false) {}
|
||||
|
||||
RemotePath(const InetAddress &addr,bool fixed) :
|
||||
RemotePath(const InetAddress &localAddress,const InetAddress &addr,bool fixed) :
|
||||
Path(addr,0,TRUST_NORMAL),
|
||||
_lastSend(0),
|
||||
_lastReceived(0),
|
||||
_localAddress(localAddress),
|
||||
_fixed(fixed) {}
|
||||
|
||||
inline const InetAddress &localAddress() const throw() { return _localAddress; }
|
||||
|
||||
inline uint64_t lastSend() const throw() { return _lastSend; }
|
||||
inline uint64_t lastReceived() const throw() { return _lastReceived; }
|
||||
|
||||
@ -123,7 +127,7 @@ public:
|
||||
*/
|
||||
inline bool send(const RuntimeEnvironment *RR,const void *data,unsigned int len,uint64_t now)
|
||||
{
|
||||
if (RR->node->putPacket(address(),data,len)) {
|
||||
if (RR->node->putPacket(_localAddress,address(),data,len)) {
|
||||
sent(now);
|
||||
RR->antiRec->logOutgoingZT(data,len);
|
||||
return true;
|
||||
@ -134,6 +138,7 @@ public:
|
||||
private:
|
||||
uint64_t _lastSend;
|
||||
uint64_t _lastReceived;
|
||||
InetAddress _localAddress;
|
||||
bool _fixed;
|
||||
};
|
||||
|
||||
|
@ -48,10 +48,8 @@ Public domain.
|
||||
|
||||
#define uint64 uint64_t
|
||||
|
||||
#define load_bigendian(x) Utils::ntoh(*((const uint64_t *)(x)))
|
||||
#define store_bigendian(x,u) (*((uint64_t *)(x)) = Utils::hton((u)))
|
||||
#ifdef ZT_NO_TYPE_PUNNING
|
||||
|
||||
#if 0
|
||||
static uint64 load_bigendian(const unsigned char *x)
|
||||
{
|
||||
return
|
||||
@ -77,7 +75,13 @@ static void store_bigendian(unsigned char *x,uint64 u)
|
||||
x[1] = u; u >>= 8;
|
||||
x[0] = u;
|
||||
}
|
||||
#endif
|
||||
|
||||
#else // !ZT_NO_TYPE_PUNNING
|
||||
|
||||
#define load_bigendian(x) Utils::ntoh(*((const uint64_t *)(x)))
|
||||
#define store_bigendian(x,u) (*((uint64_t *)(x)) = Utils::hton((u)))
|
||||
|
||||
#endif // ZT_NO_TYPE_PUNNING
|
||||
|
||||
#define SHR(x,c) ((x) >> (c))
|
||||
#define ROTR(x,c) (((x) >> (c)) | ((x) << (64 - (c))))
|
||||
|
@ -107,10 +107,14 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
|
||||
|
||||
// Erase all entries (other than this one) for this scope to prevent thrashing
|
||||
// Note: we should probably not use 'entry' after this
|
||||
for(std::map< PhySurfaceKey,PhySurfaceEntry >::iterator p(_phy.begin());p!=_phy.end();) {
|
||||
if ((p->first.reporter != reporter)&&(p->first.scope == scope))
|
||||
_phy.erase(p++);
|
||||
else ++p;
|
||||
{
|
||||
Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy);
|
||||
PhySurfaceKey *k = (PhySurfaceKey *)0;
|
||||
PhySurfaceEntry *e = (PhySurfaceEntry *)0;
|
||||
while (i.next(k,e)) {
|
||||
if ((k->reporter != reporter)&&(k->scope == scope))
|
||||
_phy.erase(*k);
|
||||
}
|
||||
}
|
||||
|
||||
_ResetWithinScope rset(RR,now,(InetAddress::IpScope)scope);
|
||||
@ -140,26 +144,13 @@ void SelfAwareness::iam(const Address &reporter,const InetAddress &reporterPhysi
|
||||
void SelfAwareness::clean(uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_phy_m);
|
||||
for(std::map< PhySurfaceKey,PhySurfaceEntry >::iterator p(_phy.begin());p!=_phy.end();) {
|
||||
if ((now - p->second.ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
|
||||
_phy.erase(p++);
|
||||
else ++p;
|
||||
Hashtable< PhySurfaceKey,PhySurfaceEntry >::Iterator i(_phy);
|
||||
PhySurfaceKey *k = (PhySurfaceKey *)0;
|
||||
PhySurfaceEntry *e = (PhySurfaceEntry *)0;
|
||||
while (i.next(k,e)) {
|
||||
if ((now - e->ts) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
|
||||
_phy.erase(*k);
|
||||
}
|
||||
}
|
||||
|
||||
bool SelfAwareness::areGlobalIPv4PortsRandomized() const
|
||||
{
|
||||
int port = 0;
|
||||
Mutex::Lock _l(_phy_m);
|
||||
for(std::map< PhySurfaceKey,PhySurfaceEntry >::const_iterator p(_phy.begin());p!=_phy.end();++p) {
|
||||
if ((p->first.scope == InetAddress::IP_SCOPE_GLOBAL)&&(p->second.mySurface.ss_family == AF_INET)) {
|
||||
const int tmp = (int)p->second.mySurface.port();
|
||||
if ((port)&&(tmp != port))
|
||||
return true;
|
||||
else port = tmp;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
@ -28,10 +28,9 @@
|
||||
#ifndef ZT_SELFAWARENESS_HPP
|
||||
#define ZT_SELFAWARENESS_HPP
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "Constants.hpp"
|
||||
#include "InetAddress.hpp"
|
||||
#include "Hashtable.hpp"
|
||||
#include "Address.hpp"
|
||||
#include "Mutex.hpp"
|
||||
|
||||
@ -66,17 +65,14 @@ public:
|
||||
*/
|
||||
void clean(uint64_t now);
|
||||
|
||||
/**
|
||||
* @return True if our external (global scope) IPv4 ports appear to be randomized by a NAT device
|
||||
*/
|
||||
bool areGlobalIPv4PortsRandomized() const;
|
||||
|
||||
private:
|
||||
struct PhySurfaceKey
|
||||
{
|
||||
Address reporter;
|
||||
InetAddress::IpScope scope;
|
||||
|
||||
inline unsigned long hashCode() const throw() { return ((unsigned long)reporter.toInt() + (unsigned long)scope); }
|
||||
|
||||
PhySurfaceKey() : reporter(),scope(InetAddress::IP_SCOPE_NONE) {}
|
||||
PhySurfaceKey(const Address &r,InetAddress::IpScope s) : reporter(r),scope(s) {}
|
||||
inline bool operator<(const PhySurfaceKey &k) const throw() { return ((reporter < k.reporter) ? true : ((reporter == k.reporter) ? ((int)scope < (int)k.scope) : false)); }
|
||||
@ -93,7 +89,7 @@ private:
|
||||
|
||||
const RuntimeEnvironment *RR;
|
||||
|
||||
std::map< PhySurfaceKey,PhySurfaceEntry > _phy;
|
||||
Hashtable< PhySurfaceKey,PhySurfaceEntry > _phy;
|
||||
Mutex _phy_m;
|
||||
};
|
||||
|
||||
|
179
node/Switch.cpp
179
node/Switch.cpp
@ -67,7 +67,10 @@ static const char *etherTypeName(const unsigned int etherType)
|
||||
|
||||
Switch::Switch(const RuntimeEnvironment *renv) :
|
||||
RR(renv),
|
||||
_lastBeaconResponse(0)
|
||||
_lastBeaconResponse(0),
|
||||
_outstandingWhoisRequests(32),
|
||||
_defragQueue(32),
|
||||
_lastUniteAttempt(8) // only really used on root servers and upstreams, and it'll grow there just fine
|
||||
{
|
||||
}
|
||||
|
||||
@ -75,7 +78,7 @@ Switch::~Switch()
|
||||
{
|
||||
}
|
||||
|
||||
void Switch::onRemotePacket(const InetAddress &fromAddr,const void *data,unsigned int len)
|
||||
void Switch::onRemotePacket(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len)
|
||||
{
|
||||
try {
|
||||
if (len == 13) {
|
||||
@ -93,14 +96,14 @@ void Switch::onRemotePacket(const InetAddress &fromAddr,const void *data,unsigne
|
||||
_lastBeaconResponse = now;
|
||||
Packet outp(peer->address(),RR->identity.address(),Packet::VERB_NOP);
|
||||
outp.armor(peer->key(),false);
|
||||
RR->node->putPacket(fromAddr,outp.data(),outp.size());
|
||||
RR->node->putPacket(localAddr,fromAddr,outp.data(),outp.size());
|
||||
}
|
||||
}
|
||||
} else if (len > ZT_PROTO_MIN_FRAGMENT_LENGTH) {
|
||||
if (((const unsigned char *)data)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR) {
|
||||
_handleRemotePacketFragment(fromAddr,data,len);
|
||||
_handleRemotePacketFragment(localAddr,fromAddr,data,len);
|
||||
} else if (len >= ZT_PROTO_MIN_PACKET_LENGTH) {
|
||||
_handleRemotePacketHead(fromAddr,data,len);
|
||||
_handleRemotePacketHead(localAddr,fromAddr,data,len);
|
||||
}
|
||||
}
|
||||
} catch (std::exception &ex) {
|
||||
@ -291,7 +294,7 @@ void Switch::send(const Packet &packet,bool encrypt,uint64_t nwid)
|
||||
|
||||
if (!_trySend(packet,encrypt,nwid)) {
|
||||
Mutex::Lock _l(_txQueue_m);
|
||||
_txQueue.insert(std::pair< Address,TXQueueEntry >(packet.destination(),TXQueueEntry(RR->node->now(),packet,encrypt,nwid)));
|
||||
_txQueue.push_back(TXQueueEntry(packet.destination(),RR->node->now(),packet,encrypt,nwid));
|
||||
}
|
||||
}
|
||||
|
||||
@ -309,31 +312,18 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force)
|
||||
|
||||
const uint64_t now = RR->node->now();
|
||||
|
||||
std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now));
|
||||
if (!(cg.first))
|
||||
return false;
|
||||
|
||||
if (cg.first.ipScope() != cg.second.ipScope())
|
||||
return false;
|
||||
|
||||
// Addresses are sorted in key for last unite attempt map for order
|
||||
// invariant lookup: (p1,p2) == (p2,p1)
|
||||
Array<Address,2> uniteKey;
|
||||
if (p1 >= p2) {
|
||||
uniteKey[0] = p2;
|
||||
uniteKey[1] = p1;
|
||||
} else {
|
||||
uniteKey[0] = p1;
|
||||
uniteKey[1] = p2;
|
||||
}
|
||||
{
|
||||
Mutex::Lock _l(_lastUniteAttempt_m);
|
||||
std::map< Array< Address,2 >,uint64_t >::const_iterator e(_lastUniteAttempt.find(uniteKey));
|
||||
if ((!force)&&(e != _lastUniteAttempt.end())&&((now - e->second) < ZT_MIN_UNITE_INTERVAL))
|
||||
uint64_t &luts = _lastUniteAttempt[_LastUniteKey(p1,p2)];
|
||||
if (((now - luts) < ZT_MIN_UNITE_INTERVAL)&&(!force))
|
||||
return false;
|
||||
else _lastUniteAttempt[uniteKey] = now;
|
||||
luts = now;
|
||||
}
|
||||
|
||||
std::pair<InetAddress,InetAddress> cg(Peer::findCommonGround(*p1p,*p2p,now));
|
||||
if ((!(cg.first))||(cg.first.ipScope() != cg.second.ipScope()))
|
||||
return false;
|
||||
|
||||
TRACE("unite: %s(%s) <> %s(%s)",p1.toString().c_str(),cg.second.toString().c_str(),p2.toString().c_str(),cg.first.toString().c_str());
|
||||
|
||||
/* Tell P1 where to find P2 and vice versa, sending the packets to P1 and
|
||||
@ -386,14 +376,14 @@ bool Switch::unite(const Address &p1,const Address &p2,bool force)
|
||||
return true;
|
||||
}
|
||||
|
||||
void Switch::rendezvous(const SharedPtr<Peer> &peer,const InetAddress &atAddr)
|
||||
void Switch::rendezvous(const SharedPtr<Peer> &peer,const InetAddress &localAddr,const InetAddress &atAddr)
|
||||
{
|
||||
TRACE("sending NAT-t message to %s(%s)",peer->address().toString().c_str(),atAddr.toString().c_str());
|
||||
const uint64_t now = RR->node->now();
|
||||
peer->attemptToContactAt(RR,atAddr,now);
|
||||
peer->attemptToContactAt(RR,localAddr,atAddr,now);
|
||||
{
|
||||
Mutex::Lock _l(_contactQueue_m);
|
||||
_contactQueue.push_back(ContactQueueEntry(peer,now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,atAddr));
|
||||
_contactQueue.push_back(ContactQueueEntry(peer,now + ZT_NAT_T_TACTICAL_ESCALATION_DELAY,localAddr,atAddr));
|
||||
}
|
||||
}
|
||||
|
||||
@ -402,10 +392,13 @@ void Switch::requestWhois(const Address &addr)
|
||||
bool inserted = false;
|
||||
{
|
||||
Mutex::Lock _l(_outstandingWhoisRequests_m);
|
||||
std::pair< std::map< Address,WhoisRequest >::iterator,bool > entry(_outstandingWhoisRequests.insert(std::pair<Address,WhoisRequest>(addr,WhoisRequest())));
|
||||
if ((inserted = entry.second))
|
||||
entry.first->second.lastSent = RR->node->now();
|
||||
entry.first->second.retries = 0; // reset retry count if entry already existed
|
||||
WhoisRequest &r = _outstandingWhoisRequests[addr];
|
||||
if (r.lastSent) {
|
||||
r.retries = 0; // reset retry count if entry already existed, but keep waiting and retry again after normal timeout
|
||||
} else {
|
||||
r.lastSent = RR->node->now();
|
||||
inserted = true;
|
||||
}
|
||||
}
|
||||
if (inserted)
|
||||
_sendWhoisRequest(addr,(const Address *)0,0);
|
||||
@ -435,11 +428,12 @@ void Switch::doAnythingWaitingForPeer(const SharedPtr<Peer> &peer)
|
||||
|
||||
{ // finish sending any packets waiting on peer's public key / identity
|
||||
Mutex::Lock _l(_txQueue_m);
|
||||
std::pair< std::multimap< Address,TXQueueEntry >::iterator,std::multimap< Address,TXQueueEntry >::iterator > waitingTxQueueItems(_txQueue.equal_range(peer->address()));
|
||||
for(std::multimap< Address,TXQueueEntry >::iterator txi(waitingTxQueueItems.first);txi!=waitingTxQueueItems.second;) {
|
||||
if (_trySend(txi->second.packet,txi->second.encrypt,txi->second.nwid))
|
||||
_txQueue.erase(txi++);
|
||||
else ++txi;
|
||||
for(std::list< TXQueueEntry >::iterator txi(_txQueue.begin());txi!=_txQueue.end();) {
|
||||
if (txi->dest == peer->address()) {
|
||||
if (_trySend(txi->packet,txi->encrypt,txi->nwid))
|
||||
_txQueue.erase(txi++);
|
||||
else ++txi;
|
||||
} else ++txi;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -459,14 +453,14 @@ unsigned long Switch::doTimerTasks(uint64_t now)
|
||||
} else {
|
||||
if (qi->strategyIteration == 0) {
|
||||
// First strategy: send packet directly to destination
|
||||
qi->peer->attemptToContactAt(RR,qi->inaddr,now);
|
||||
qi->peer->attemptToContactAt(RR,qi->localAddr,qi->inaddr,now);
|
||||
} else if (qi->strategyIteration <= 4) {
|
||||
// Strategies 1-4: try escalating ports for symmetric NATs that remap sequentially
|
||||
InetAddress tmpaddr(qi->inaddr);
|
||||
int p = (int)qi->inaddr.port() + qi->strategyIteration;
|
||||
if (p < 0xffff) {
|
||||
tmpaddr.setPort((unsigned int)p);
|
||||
qi->peer->attemptToContactAt(RR,tmpaddr,now);
|
||||
qi->peer->attemptToContactAt(RR,qi->localAddr,tmpaddr,now);
|
||||
} else qi->strategyIteration = 5;
|
||||
} else {
|
||||
// All strategies tried, expire entry
|
||||
@ -486,36 +480,37 @@ unsigned long Switch::doTimerTasks(uint64_t now)
|
||||
|
||||
{ // Retry outstanding WHOIS requests
|
||||
Mutex::Lock _l(_outstandingWhoisRequests_m);
|
||||
for(std::map< Address,WhoisRequest >::iterator i(_outstandingWhoisRequests.begin());i!=_outstandingWhoisRequests.end();) {
|
||||
unsigned long since = (unsigned long)(now - i->second.lastSent);
|
||||
Hashtable< Address,WhoisRequest >::Iterator i(_outstandingWhoisRequests);
|
||||
Address *a = (Address *)0;
|
||||
WhoisRequest *r = (WhoisRequest *)0;
|
||||
while (i.next(a,r)) {
|
||||
const unsigned long since = (unsigned long)(now - r->lastSent);
|
||||
if (since >= ZT_WHOIS_RETRY_DELAY) {
|
||||
if (i->second.retries >= ZT_MAX_WHOIS_RETRIES) {
|
||||
TRACE("WHOIS %s timed out",i->first.toString().c_str());
|
||||
_outstandingWhoisRequests.erase(i++);
|
||||
continue;
|
||||
if (r->retries >= ZT_MAX_WHOIS_RETRIES) {
|
||||
TRACE("WHOIS %s timed out",a->toString().c_str());
|
||||
_outstandingWhoisRequests.erase(*a);
|
||||
} else {
|
||||
i->second.lastSent = now;
|
||||
i->second.peersConsulted[i->second.retries] = _sendWhoisRequest(i->first,i->second.peersConsulted,i->second.retries);
|
||||
++i->second.retries;
|
||||
TRACE("WHOIS %s (retry %u)",i->first.toString().c_str(),i->second.retries);
|
||||
r->lastSent = now;
|
||||
r->peersConsulted[r->retries] = _sendWhoisRequest(*a,r->peersConsulted,r->retries);
|
||||
++r->retries;
|
||||
TRACE("WHOIS %s (retry %u)",a->toString().c_str(),r->retries);
|
||||
nextDelay = std::min(nextDelay,(unsigned long)ZT_WHOIS_RETRY_DELAY);
|
||||
}
|
||||
} else {
|
||||
nextDelay = std::min(nextDelay,ZT_WHOIS_RETRY_DELAY - since);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
{ // Time out TX queue packets that never got WHOIS lookups or other info.
|
||||
Mutex::Lock _l(_txQueue_m);
|
||||
for(std::multimap< Address,TXQueueEntry >::iterator i(_txQueue.begin());i!=_txQueue.end();) {
|
||||
if (_trySend(i->second.packet,i->second.encrypt,i->second.nwid))
|
||||
_txQueue.erase(i++);
|
||||
else if ((now - i->second.creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
|
||||
TRACE("TX %s -> %s timed out",i->second.packet.source().toString().c_str(),i->second.packet.destination().toString().c_str());
|
||||
_txQueue.erase(i++);
|
||||
} else ++i;
|
||||
for(std::list< TXQueueEntry >::iterator txi(_txQueue.begin());txi!=_txQueue.end();) {
|
||||
if (_trySend(txi->packet,txi->encrypt,txi->nwid))
|
||||
_txQueue.erase(txi++);
|
||||
else if ((now - txi->creationTime) > ZT_TRANSMIT_QUEUE_TIMEOUT) {
|
||||
TRACE("TX %s -> %s timed out",txi->packet.source().toString().c_str(),txi->packet.destination().toString().c_str());
|
||||
_txQueue.erase(txi++);
|
||||
} else ++txi;
|
||||
}
|
||||
}
|
||||
|
||||
@ -531,18 +526,32 @@ unsigned long Switch::doTimerTasks(uint64_t now)
|
||||
|
||||
{ // Time out packets that didn't get all their fragments.
|
||||
Mutex::Lock _l(_defragQueue_m);
|
||||
for(std::map< uint64_t,DefragQueueEntry >::iterator i(_defragQueue.begin());i!=_defragQueue.end();) {
|
||||
if ((now - i->second.creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) {
|
||||
TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",i->first);
|
||||
_defragQueue.erase(i++);
|
||||
} else ++i;
|
||||
Hashtable< uint64_t,DefragQueueEntry >::Iterator i(_defragQueue);
|
||||
uint64_t *packetId = (uint64_t *)0;
|
||||
DefragQueueEntry *qe = (DefragQueueEntry *)0;
|
||||
while (i.next(packetId,qe)) {
|
||||
if ((now - qe->creationTime) > ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT) {
|
||||
TRACE("incomplete fragmented packet %.16llx timed out, fragments discarded",*packetId);
|
||||
_defragQueue.erase(*packetId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{ // Remove really old last unite attempt entries to keep table size controlled
|
||||
Mutex::Lock _l(_lastUniteAttempt_m);
|
||||
Hashtable< _LastUniteKey,uint64_t >::Iterator i(_lastUniteAttempt);
|
||||
_LastUniteKey *k = (_LastUniteKey *)0;
|
||||
uint64_t *v = (uint64_t *)0;
|
||||
while (i.next(k,v)) {
|
||||
if ((now - *v) >= (ZT_MIN_UNITE_INTERVAL * 16))
|
||||
_lastUniteAttempt.erase(*k);
|
||||
}
|
||||
}
|
||||
|
||||
return nextDelay;
|
||||
}
|
||||
|
||||
void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void *data,unsigned int len)
|
||||
void Switch::_handleRemotePacketFragment(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len)
|
||||
{
|
||||
Packet::Fragment fragment(data,len);
|
||||
Address destination(fragment.destination());
|
||||
@ -577,32 +586,31 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void
|
||||
// seeing a Packet::Fragment?
|
||||
|
||||
Mutex::Lock _l(_defragQueue_m);
|
||||
std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
|
||||
DefragQueueEntry &dq = _defragQueue[pid];
|
||||
|
||||
if (dqe == _defragQueue.end()) {
|
||||
if (!dq.creationTime) {
|
||||
// We received a Packet::Fragment without its head, so queue it and wait
|
||||
|
||||
DefragQueueEntry &dq = _defragQueue[pid];
|
||||
dq.creationTime = RR->node->now();
|
||||
dq.frags[fno - 1] = fragment;
|
||||
dq.totalFragments = tf; // total fragment count is known
|
||||
dq.haveFragments = 1 << fno; // we have only this fragment
|
||||
//TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
|
||||
} else if (!(dqe->second.haveFragments & (1 << fno))) {
|
||||
} else if (!(dq.haveFragments & (1 << fno))) {
|
||||
// We have other fragments and maybe the head, so add this one and check
|
||||
|
||||
dqe->second.frags[fno - 1] = fragment;
|
||||
dqe->second.totalFragments = tf;
|
||||
dq.frags[fno - 1] = fragment;
|
||||
dq.totalFragments = tf;
|
||||
//TRACE("fragment (%u/%u) of %.16llx from %s",fno + 1,tf,pid,fromAddr.toString().c_str());
|
||||
|
||||
if (Utils::countBits(dqe->second.haveFragments |= (1 << fno)) == tf) {
|
||||
if (Utils::countBits(dq.haveFragments |= (1 << fno)) == tf) {
|
||||
// We have all fragments -- assemble and process full Packet
|
||||
//TRACE("packet %.16llx is complete, assembling and processing...",pid);
|
||||
|
||||
SharedPtr<IncomingPacket> packet(dqe->second.frag0);
|
||||
SharedPtr<IncomingPacket> packet(dq.frag0);
|
||||
for(unsigned int f=1;f<tf;++f)
|
||||
packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
|
||||
_defragQueue.erase(dqe);
|
||||
packet->append(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength());
|
||||
_defragQueue.erase(pid); // dq no longer valid after this
|
||||
|
||||
if (!packet->tryDecode(RR)) {
|
||||
Mutex::Lock _l(_rxQueue_m);
|
||||
@ -614,9 +622,9 @@ void Switch::_handleRemotePacketFragment(const InetAddress &fromAddr,const void
|
||||
}
|
||||
}
|
||||
|
||||
void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *data,unsigned int len)
|
||||
void Switch::_handleRemotePacketHead(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len)
|
||||
{
|
||||
SharedPtr<IncomingPacket> packet(new IncomingPacket(data,len,fromAddr,RR->node->now()));
|
||||
SharedPtr<IncomingPacket> packet(new IncomingPacket(data,len,localAddr,fromAddr,RR->node->now()));
|
||||
|
||||
Address source(packet->source());
|
||||
Address destination(packet->destination());
|
||||
@ -645,26 +653,27 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat
|
||||
|
||||
uint64_t pid = packet->packetId();
|
||||
Mutex::Lock _l(_defragQueue_m);
|
||||
std::map< uint64_t,DefragQueueEntry >::iterator dqe(_defragQueue.find(pid));
|
||||
DefragQueueEntry &dq = _defragQueue[pid];
|
||||
|
||||
if (dqe == _defragQueue.end()) {
|
||||
if (!dq.creationTime) {
|
||||
// If we have no other fragments yet, create an entry and save the head
|
||||
DefragQueueEntry &dq = _defragQueue[pid];
|
||||
|
||||
dq.creationTime = RR->node->now();
|
||||
dq.frag0 = packet;
|
||||
dq.totalFragments = 0; // 0 == unknown, waiting for Packet::Fragment
|
||||
dq.haveFragments = 1; // head is first bit (left to right)
|
||||
//TRACE("fragment (0/?) of %.16llx from %s",pid,fromAddr.toString().c_str());
|
||||
} else if (!(dqe->second.haveFragments & 1)) {
|
||||
} else if (!(dq.haveFragments & 1)) {
|
||||
// If we have other fragments but no head, see if we are complete with the head
|
||||
if ((dqe->second.totalFragments)&&(Utils::countBits(dqe->second.haveFragments |= 1) == dqe->second.totalFragments)) {
|
||||
|
||||
if ((dq.totalFragments)&&(Utils::countBits(dq.haveFragments |= 1) == dq.totalFragments)) {
|
||||
// We have all fragments -- assemble and process full Packet
|
||||
|
||||
//TRACE("packet %.16llx is complete, assembling and processing...",pid);
|
||||
// packet already contains head, so append fragments
|
||||
for(unsigned int f=1;f<dqe->second.totalFragments;++f)
|
||||
packet->append(dqe->second.frags[f - 1].payload(),dqe->second.frags[f - 1].payloadLength());
|
||||
_defragQueue.erase(dqe);
|
||||
for(unsigned int f=1;f<dq.totalFragments;++f)
|
||||
packet->append(dq.frags[f - 1].payload(),dq.frags[f - 1].payloadLength());
|
||||
_defragQueue.erase(pid); // dq no longer valid after this
|
||||
|
||||
if (!packet->tryDecode(RR)) {
|
||||
Mutex::Lock _l(_rxQueue_m);
|
||||
@ -672,7 +681,7 @@ void Switch::_handleRemotePacketHead(const InetAddress &fromAddr,const void *dat
|
||||
}
|
||||
} else {
|
||||
// Still waiting on more fragments, so queue the head
|
||||
dqe->second.frag0 = packet;
|
||||
dq.frag0 = packet;
|
||||
}
|
||||
} // else this is a duplicate head, ignore
|
||||
} else {
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "Network.hpp"
|
||||
#include "SharedPtr.hpp"
|
||||
#include "IncomingPacket.hpp"
|
||||
#include "Hashtable.hpp"
|
||||
|
||||
/* Ethernet frame types that might be relevant to us */
|
||||
#define ZT_ETHERTYPE_IPV4 0x0800
|
||||
@ -78,11 +79,12 @@ public:
|
||||
/**
|
||||
* Called when a packet is received from the real network
|
||||
*
|
||||
* @param localAddr Local interface address
|
||||
* @param fromAddr Internet IP address of origin
|
||||
* @param data Packet data
|
||||
* @param len Packet length
|
||||
*/
|
||||
void onRemotePacket(const InetAddress &fromAddr,const void *data,unsigned int len);
|
||||
void onRemotePacket(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len);
|
||||
|
||||
/**
|
||||
* Called when a packet comes from a local Ethernet tap
|
||||
@ -139,9 +141,10 @@ public:
|
||||
* Attempt NAT traversal to peer at a given physical address
|
||||
*
|
||||
* @param peer Peer to contact
|
||||
* @param localAddr Local interface address
|
||||
* @param atAddr Address of peer
|
||||
*/
|
||||
void rendezvous(const SharedPtr<Peer> &peer,const InetAddress &atAddr);
|
||||
void rendezvous(const SharedPtr<Peer> &peer,const InetAddress &localAddr,const InetAddress &atAddr);
|
||||
|
||||
/**
|
||||
* Request WHOIS on a given address
|
||||
@ -178,8 +181,8 @@ public:
|
||||
unsigned long doTimerTasks(uint64_t now);
|
||||
|
||||
private:
|
||||
void _handleRemotePacketFragment(const InetAddress &fromAddr,const void *data,unsigned int len);
|
||||
void _handleRemotePacketHead(const InetAddress &fromAddr,const void *data,unsigned int len);
|
||||
void _handleRemotePacketFragment(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len);
|
||||
void _handleRemotePacketHead(const InetAddress &localAddr,const InetAddress &fromAddr,const void *data,unsigned int len);
|
||||
Address _sendWhoisRequest(const Address &addr,const Address *peersAlreadyConsulted,unsigned int numPeersAlreadyConsulted);
|
||||
bool _trySend(const Packet &packet,bool encrypt,uint64_t nwid);
|
||||
|
||||
@ -189,64 +192,87 @@ private:
|
||||
// Outsanding WHOIS requests and how many retries they've undergone
|
||||
struct WhoisRequest
|
||||
{
|
||||
WhoisRequest() : lastSent(0),retries(0) {}
|
||||
uint64_t lastSent;
|
||||
Address peersConsulted[ZT_MAX_WHOIS_RETRIES]; // by retry
|
||||
unsigned int retries; // 0..ZT_MAX_WHOIS_RETRIES
|
||||
};
|
||||
std::map< Address,WhoisRequest > _outstandingWhoisRequests;
|
||||
Hashtable< Address,WhoisRequest > _outstandingWhoisRequests;
|
||||
Mutex _outstandingWhoisRequests_m;
|
||||
|
||||
// Packet defragmentation queue -- comes before RX queue in path
|
||||
struct DefragQueueEntry
|
||||
{
|
||||
DefragQueueEntry() : creationTime(0),totalFragments(0),haveFragments(0) {}
|
||||
uint64_t creationTime;
|
||||
SharedPtr<IncomingPacket> frag0;
|
||||
Packet::Fragment frags[ZT_MAX_PACKET_FRAGMENTS - 1];
|
||||
unsigned int totalFragments; // 0 if only frag0 received, waiting for frags
|
||||
uint32_t haveFragments; // bit mask, LSB to MSB
|
||||
};
|
||||
std::map< uint64_t,DefragQueueEntry > _defragQueue;
|
||||
Hashtable< uint64_t,DefragQueueEntry > _defragQueue;
|
||||
Mutex _defragQueue_m;
|
||||
|
||||
// ZeroTier-layer RX queue of incoming packets in the process of being decoded
|
||||
std::list< SharedPtr<IncomingPacket> > _rxQueue;
|
||||
Mutex _rxQueue_m;
|
||||
|
||||
// ZeroTier-layer TX queue by destination ZeroTier address
|
||||
// ZeroTier-layer TX queue entry
|
||||
struct TXQueueEntry
|
||||
{
|
||||
TXQueueEntry() {}
|
||||
TXQueueEntry(uint64_t ct,const Packet &p,bool enc,uint64_t nw) :
|
||||
TXQueueEntry(Address d,uint64_t ct,const Packet &p,bool enc,uint64_t nw) :
|
||||
dest(d),
|
||||
creationTime(ct),
|
||||
nwid(nw),
|
||||
packet(p),
|
||||
encrypt(enc) {}
|
||||
|
||||
Address dest;
|
||||
uint64_t creationTime;
|
||||
uint64_t nwid;
|
||||
Packet packet; // unencrypted/unMAC'd packet -- this is done at send time
|
||||
bool encrypt;
|
||||
};
|
||||
std::multimap< Address,TXQueueEntry > _txQueue;
|
||||
std::list< TXQueueEntry > _txQueue;
|
||||
Mutex _txQueue_m;
|
||||
|
||||
// Tracks sending of VERB_RENDEZVOUS to relaying peers
|
||||
std::map< Array< Address,2 >,uint64_t > _lastUniteAttempt; // key is always sorted in ascending order, for set-like behavior
|
||||
struct _LastUniteKey
|
||||
{
|
||||
_LastUniteKey() : x(0),y(0) {}
|
||||
_LastUniteKey(const Address &a1,const Address &a2)
|
||||
{
|
||||
if (a1 > a2) {
|
||||
x = a2.toInt();
|
||||
y = a1.toInt();
|
||||
} else {
|
||||
x = a1.toInt();
|
||||
y = a2.toInt();
|
||||
}
|
||||
}
|
||||
inline unsigned long hashCode() const throw() { return ((unsigned long)x ^ (unsigned long)y); }
|
||||
inline bool operator==(const _LastUniteKey &k) const throw() { return ((x == k.x)&&(y == k.y)); }
|
||||
uint64_t x,y;
|
||||
};
|
||||
Hashtable< _LastUniteKey,uint64_t > _lastUniteAttempt; // key is always sorted in ascending order, for set-like behavior
|
||||
Mutex _lastUniteAttempt_m;
|
||||
|
||||
// Active attempts to contact remote peers, including state of multi-phase NAT traversal
|
||||
struct ContactQueueEntry
|
||||
{
|
||||
ContactQueueEntry() {}
|
||||
ContactQueueEntry(const SharedPtr<Peer> &p,uint64_t ft,const InetAddress &a) :
|
||||
ContactQueueEntry(const SharedPtr<Peer> &p,uint64_t ft,const InetAddress &laddr,const InetAddress &a) :
|
||||
peer(p),
|
||||
fireAtTime(ft),
|
||||
inaddr(a),
|
||||
localAddr(laddr),
|
||||
strategyIteration(0) {}
|
||||
|
||||
SharedPtr<Peer> peer;
|
||||
uint64_t fireAtTime;
|
||||
InetAddress inaddr;
|
||||
InetAddress localAddr;
|
||||
unsigned int strategyIteration;
|
||||
};
|
||||
std::list<ContactQueueEntry> _contactQueue;
|
||||
|
@ -62,7 +62,7 @@ void Topology::setRootServers(const std::map< Identity,std::vector<InetAddress>
|
||||
if (!p)
|
||||
p = SharedPtr<Peer>(new Peer(RR->identity,i->first));
|
||||
for(std::vector<InetAddress>::const_iterator j(i->second.begin());j!=i->second.end();++j)
|
||||
p->addPath(RemotePath(*j,true));
|
||||
p->addPath(RemotePath(InetAddress(),*j,true));
|
||||
p->use(now);
|
||||
_rootPeers.push_back(p);
|
||||
}
|
||||
@ -103,7 +103,7 @@ SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
|
||||
const uint64_t now = RR->node->now();
|
||||
Mutex::Lock _l(_lock);
|
||||
|
||||
SharedPtr<Peer> p(_activePeers.insert(std::pair< Address,SharedPtr<Peer> >(peer->address(),peer)).first->second);
|
||||
SharedPtr<Peer> &p = _activePeers.set(peer->address(),peer);
|
||||
p->use(now);
|
||||
_saveIdentity(p->identity());
|
||||
|
||||
@ -160,9 +160,9 @@ SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCou
|
||||
if (++sna == _rootAddresses.end())
|
||||
sna = _rootAddresses.begin(); // wrap around at end
|
||||
if (*sna != RR->identity.address()) { // pick one other than us -- starting from me+1 in sorted set order
|
||||
std::map< Address,SharedPtr<Peer> >::const_iterator p(_activePeers.find(*sna));
|
||||
if ((p != _activePeers.end())&&(p->second->hasActiveDirectPath(now))) {
|
||||
bestRoot = p->second;
|
||||
SharedPtr<Peer> *p = _activePeers.get(*sna);
|
||||
if ((p)&&((*p)->hasActiveDirectPath(now))) {
|
||||
bestRoot = *p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -249,10 +249,12 @@ bool Topology::isRoot(const Identity &id) const
|
||||
void Topology::clean(uint64_t now)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
for(std::map< Address,SharedPtr<Peer> >::iterator p(_activePeers.begin());p!=_activePeers.end();) {
|
||||
if (((now - p->second->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),p->first) == _rootAddresses.end())) {
|
||||
_activePeers.erase(p++);
|
||||
} else ++p;
|
||||
Hashtable< Address,SharedPtr<Peer> >::Iterator i(_activePeers);
|
||||
Address *a = (Address *)0;
|
||||
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
|
||||
while (i.next(a,p))
|
||||
if (((now - (*p)->lastUsed()) >= ZT_PEER_IN_MEMORY_EXPIRATION)&&(std::find(_rootAddresses.begin(),_rootAddresses.end(),*a) == _rootAddresses.end())) {
|
||||
_activePeers.erase(*a);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "Mutex.hpp"
|
||||
#include "InetAddress.hpp"
|
||||
#include "Dictionary.hpp"
|
||||
#include "Hashtable.hpp"
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
@ -163,17 +164,20 @@ public:
|
||||
inline void eachPeer(F f)
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
for(std::map< Address,SharedPtr<Peer> >::const_iterator p(_activePeers.begin());p!=_activePeers.end();++p)
|
||||
f(*this,p->second);
|
||||
Hashtable< Address,SharedPtr<Peer> >::Iterator i(_activePeers);
|
||||
Address *a = (Address *)0;
|
||||
SharedPtr<Peer> *p = (SharedPtr<Peer> *)0;
|
||||
while (i.next(a,p))
|
||||
f(*this,*p);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return All currently active peers by address
|
||||
*/
|
||||
inline std::map< Address,SharedPtr<Peer> > allPeers() const
|
||||
inline std::vector< std::pair< Address,SharedPtr<Peer> > > allPeers() const
|
||||
{
|
||||
Mutex::Lock _l(_lock);
|
||||
return _activePeers;
|
||||
return _activePeers.entries();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -190,7 +194,7 @@ private:
|
||||
|
||||
const RuntimeEnvironment *RR;
|
||||
|
||||
std::map< Address,SharedPtr<Peer> > _activePeers;
|
||||
Hashtable< Address,SharedPtr<Peer> > _activePeers;
|
||||
std::map< Identity,std::vector<InetAddress> > _roots;
|
||||
std::vector< Address > _rootAddresses;
|
||||
std::vector< SharedPtr<Peer> > _rootPeers;
|
||||
|
10
one.cpp
10
one.cpp
@ -73,7 +73,7 @@
|
||||
|
||||
#include "service/OneService.hpp"
|
||||
|
||||
#define ZT1_PID_PATH "zerotier-one.pid"
|
||||
#define ZT_PID_PATH "zerotier-one.pid"
|
||||
|
||||
using namespace ZeroTier;
|
||||
|
||||
@ -910,7 +910,7 @@ static void printHelp(const char *cn,FILE *out)
|
||||
fprintf(out," -h - Display this help"ZT_EOL_S);
|
||||
fprintf(out," -v - Show version"ZT_EOL_S);
|
||||
fprintf(out," -U - Run as unprivileged user (skip privilege check)"ZT_EOL_S);
|
||||
fprintf(out," -p<port> - Port for UDP and TCP/HTTP (default: 9993)"ZT_EOL_S);
|
||||
fprintf(out," -p<port> - Port for UDP and TCP/HTTP (default: 9993, 0 for random)"ZT_EOL_S);
|
||||
//fprintf(out," -T<path> - Override root topology, do not authenticate or update"ZT_EOL_S);
|
||||
|
||||
#ifdef __UNIX_LIKE__
|
||||
@ -976,7 +976,7 @@ int main(int argc,char **argv)
|
||||
|
||||
std::string overrideRootTopology;
|
||||
std::string homeDir;
|
||||
unsigned int port = ZT1_DEFAULT_PORT;
|
||||
unsigned int port = ZT_DEFAULT_PORT;
|
||||
bool skipRootCheck = false;
|
||||
|
||||
for(int i=1;i<argc;++i) {
|
||||
@ -985,7 +985,7 @@ int main(int argc,char **argv)
|
||||
|
||||
case 'p': // port -- for both UDP and TCP, packets and control plane
|
||||
port = Utils::strToUInt(argv[i] + 2);
|
||||
if ((port > 0xffff)||(port == 0)) {
|
||||
if (port > 0xffff) {
|
||||
printHelp(argv[0],stdout);
|
||||
return 1;
|
||||
}
|
||||
@ -1154,7 +1154,7 @@ int main(int argc,char **argv)
|
||||
#endif // __WINDOWS__
|
||||
|
||||
#ifdef __UNIX_LIKE__
|
||||
std::string pidPath(homeDir + ZT_PATH_SEPARATOR_S + ZT1_PID_PATH);
|
||||
std::string pidPath(homeDir + ZT_PATH_SEPARATOR_S + ZT_PID_PATH);
|
||||
{
|
||||
// Write .pid file to home folder
|
||||
FILE *pf = fopen(pidPath.c_str(),"w");
|
||||
|
134
osdep/Arp.cpp
Normal file
134
osdep/Arp.cpp
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* ZeroTier One - Network Virtualization Everywhere
|
||||
* Copyright (C) 2011-2015 ZeroTier, Inc.
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* --
|
||||
*
|
||||
* ZeroTier may be used and distributed under the terms of the GPLv3, which
|
||||
* are available at: http://www.gnu.org/licenses/gpl-3.0.html
|
||||
*
|
||||
* If you would like to embed ZeroTier into a commercial application or
|
||||
* redistribute it in a modified binary form, please contact ZeroTier Networks
|
||||
* LLC. Start here: http://www.zerotier.com/
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "Arp.hpp"
|
||||
#include "OSUtils.hpp"
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
static const uint8_t ARP_REQUEST_HEADER[8] = { 0x00,0x01,0x08,0x00,0x06,0x04,0x00,0x01 };
|
||||
static const uint8_t ARP_RESPONSE_HEADER[8] = { 0x00,0x01,0x08,0x00,0x06,0x04,0x00,0x02 };
|
||||
|
||||
Arp::Arp() :
|
||||
_cache(256),
|
||||
_lastCleaned(OSUtils::now())
|
||||
{
|
||||
}
|
||||
|
||||
void Arp::addLocal(uint32_t ip,const MAC &mac)
|
||||
{
|
||||
_ArpEntry &e = _cache[ip];
|
||||
e.lastQuerySent = 0; // local IP
|
||||
e.lastResponseReceived = 0; // local IP
|
||||
e.mac = mac;
|
||||
e.local = true;
|
||||
}
|
||||
|
||||
void Arp::remove(uint32_t ip)
|
||||
{
|
||||
_cache.erase(ip);
|
||||
}
|
||||
|
||||
uint32_t Arp::processIncomingArp(const void *arp,unsigned int len,void *response,unsigned int &responseLen,MAC &responseDest)
|
||||
{
|
||||
const uint64_t now = OSUtils::now();
|
||||
uint32_t ip = 0;
|
||||
|
||||
responseLen = 0;
|
||||
responseDest.zero();
|
||||
|
||||
if (len > 28) {
|
||||
if (!memcmp(arp,ARP_REQUEST_HEADER,8)) {
|
||||
// Respond to ARP requests for locally-known IPs
|
||||
_ArpEntry *targetEntry = _cache.get(reinterpret_cast<const uint32_t *>(arp)[6]);
|
||||
if ((targetEntry)&&(targetEntry->local)) {
|
||||
memcpy(response,ARP_RESPONSE_HEADER,8);
|
||||
targetEntry->mac.copyTo(reinterpret_cast<uint8_t *>(response) + 8,6);
|
||||
memcpy(reinterpret_cast<uint8_t *>(response) + 14,reinterpret_cast<const uint8_t *>(arp) + 24,4);
|
||||
memcpy(reinterpret_cast<uint8_t *>(response) + 18,reinterpret_cast<const uint8_t *>(arp) + 8,10);
|
||||
responseLen = 28;
|
||||
responseDest.setTo(reinterpret_cast<const uint8_t *>(arp) + 8,6);
|
||||
}
|
||||
} else if (!memcmp(arp,ARP_RESPONSE_HEADER,8)) {
|
||||
// Learn cache entries for remote IPs from relevant ARP replies
|
||||
uint32_t responseIp = 0;
|
||||
memcpy(&responseIp,reinterpret_cast<const uint8_t *>(arp) + 14,4);
|
||||
_ArpEntry *queryEntry = _cache.get(responseIp);
|
||||
if ((queryEntry)&&(!queryEntry->local)&&((now - queryEntry->lastQuerySent) <= ZT_ARP_QUERY_MAX_TTL)) {
|
||||
queryEntry->lastResponseReceived = now;
|
||||
queryEntry->mac.setTo(reinterpret_cast<const uint8_t *>(arp) + 8,6);
|
||||
ip = responseIp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((now - _lastCleaned) >= ZT_ARP_EXPIRE) {
|
||||
_lastCleaned = now;
|
||||
Hashtable< uint32_t,_ArpEntry >::Iterator i(_cache);
|
||||
uint32_t *k = (uint32_t *)0;
|
||||
_ArpEntry *v = (_ArpEntry *)0;
|
||||
while (i.next(k,v)) {
|
||||
if ((!v->local)&&((now - v->lastResponseReceived) >= ZT_ARP_EXPIRE))
|
||||
_cache.erase(*k);
|
||||
}
|
||||
}
|
||||
|
||||
return ip;
|
||||
}
|
||||
|
||||
MAC Arp::query(const MAC &localMac,uint32_t ip,void *query,unsigned int &queryLen,MAC &queryDest)
|
||||
{
|
||||
const uint64_t now = OSUtils::now();
|
||||
|
||||
_ArpEntry &e = _cache[ip];
|
||||
|
||||
if ( ((e.mac)&&((now - e.lastResponseReceived) >= (ZT_ARP_EXPIRE / 3))) ||
|
||||
((!e.mac)&&((now - e.lastQuerySent) >= ZT_ARP_QUERY_INTERVAL)) ) {
|
||||
e.lastQuerySent = now;
|
||||
|
||||
uint8_t *q = reinterpret_cast<uint8_t *>(query);
|
||||
memcpy(q,ARP_REQUEST_HEADER,8); q += 8; // ARP request header information, always the same
|
||||
localMac.copyTo(q,6); q += 6; // sending host address
|
||||
memset(q,0,10); q += 10; // sending IP and target media address are ignored in requests
|
||||
memcpy(q,&ip,4); // target IP address for resolution (IP already in big-endian byte order)
|
||||
queryLen = 28;
|
||||
if (e.mac)
|
||||
queryDest = e.mac; // confirmation query, send directly to address holder
|
||||
else queryDest = (uint64_t)0xffffffffffffULL; // broadcast query
|
||||
} else {
|
||||
queryLen = 0;
|
||||
queryDest.zero();
|
||||
}
|
||||
|
||||
return e.mac;
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
156
osdep/Arp.hpp
Normal file
156
osdep/Arp.hpp
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* ZeroTier One - Network Virtualization Everywhere
|
||||
* Copyright (C) 2011-2015 ZeroTier, Inc.
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* --
|
||||
*
|
||||
* ZeroTier may be used and distributed under the terms of the GPLv3, which
|
||||
* are available at: http://www.gnu.org/licenses/gpl-3.0.html
|
||||
*
|
||||
* If you would like to embed ZeroTier into a commercial application or
|
||||
* redistribute it in a modified binary form, please contact ZeroTier Networks
|
||||
* LLC. Start here: http://www.zerotier.com/
|
||||
*/
|
||||
|
||||
#ifndef ZT_ARP_HPP
|
||||
#define ZT_ARP_HPP
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "../node/Constants.hpp"
|
||||
#include "../node/Hashtable.hpp"
|
||||
#include "../node/MAC.hpp"
|
||||
|
||||
/**
|
||||
* Maximum possible ARP length
|
||||
*
|
||||
* ARPs are 28 bytes in length, but specify a 128 byte buffer since
|
||||
* some weird extensions we may support in the future can pad them
|
||||
* out to as long as 72 bytes.
|
||||
*/
|
||||
#define ZT_ARP_BUF_LENGTH 128
|
||||
|
||||
/**
|
||||
* Minimum permitted interval between sending ARP queries for a given IP
|
||||
*/
|
||||
#define ZT_ARP_QUERY_INTERVAL 2000
|
||||
|
||||
/**
|
||||
* Maximum time between query and response, otherwise responses are discarded to prevent poisoning
|
||||
*/
|
||||
#define ZT_ARP_QUERY_MAX_TTL 5000
|
||||
|
||||
/**
|
||||
* ARP expiration time
|
||||
*/
|
||||
#define ZT_ARP_EXPIRE 600000
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
/**
|
||||
* ARP cache and resolver
|
||||
*
|
||||
* To implement ARP:
|
||||
*
|
||||
* (1) Call processIncomingArp() on all ARP packets received and then always
|
||||
* check responseLen after calling. If it is non-zero, send the contents
|
||||
* of response to responseDest.
|
||||
*
|
||||
* (2) Call query() to look up IP addresses, and then check queryLen. If it
|
||||
* is non-zero, send the contents of query to queryDest (usually broadcast).
|
||||
*
|
||||
* Note that either of these functions can technically generate a response or
|
||||
* a query at any time, so their result parameters for sending ARPs should
|
||||
* always be checked.
|
||||
*
|
||||
* This class is not thread-safe and must be guarded if used in multi-threaded
|
||||
* code.
|
||||
*/
|
||||
class Arp
|
||||
{
|
||||
public:
|
||||
Arp();
|
||||
|
||||
/**
|
||||
* Set a local IP entry that we should respond to ARPs for
|
||||
*
|
||||
* @param mac Our local MAC address
|
||||
* @param ip IP in big-endian byte order (sin_addr.s_addr)
|
||||
*/
|
||||
void addLocal(uint32_t ip,const MAC &mac);
|
||||
|
||||
/**
|
||||
* Delete a local IP entry or a cached ARP entry
|
||||
*
|
||||
* @param ip IP in big-endian byte order (sin_addr.s_addr)
|
||||
*/
|
||||
void remove(uint32_t ip);
|
||||
|
||||
/**
|
||||
* Process ARP packets
|
||||
*
|
||||
* For ARP queries, a response is generated and responseLen is set to its
|
||||
* frame payload length in bytes.
|
||||
*
|
||||
* For ARP responses, the cache is populated and the IP address entry that
|
||||
* was learned is returned.
|
||||
*
|
||||
* @param arp ARP frame data
|
||||
* @param len Length of ARP frame (usually 28)
|
||||
* @param response Response buffer -- MUST be a minimum of ZT_ARP_BUF_LENGTH in size
|
||||
* @param responseLen Response length, or set to 0 if no response
|
||||
* @param responseDest Destination of response, or set to null if no response
|
||||
* @return IP address learned or 0 if no new IPs in cache
|
||||
*/
|
||||
uint32_t processIncomingArp(const void *arp,unsigned int len,void *response,unsigned int &responseLen,MAC &responseDest);
|
||||
|
||||
/**
|
||||
* Get the MAC corresponding to an IP, generating a query if needed
|
||||
*
|
||||
* This returns a MAC for a remote IP. The local MAC is returned for local
|
||||
* IPs as well. It may also generate a query if the IP is not known or the
|
||||
* entry needs to be refreshed. In this case queryLen will be set to a
|
||||
* non-zero value, so this should always be checked on return even if the
|
||||
* MAC returned is non-null.
|
||||
*
|
||||
* @param localMac Local MAC address of host interface
|
||||
* @param ip IP to look up
|
||||
* @param query Buffer for generated query -- MUST be a minimum of ZT_ARP_BUF_LENGTH in size
|
||||
* @param queryLen Length of generated query, or set to 0 if no query generated
|
||||
* @param queryDest Destination of query, or set to null if no query generated
|
||||
* @return MAC or 0 if no cached entry for this IP
|
||||
*/
|
||||
MAC query(const MAC &localMac,uint32_t ip,void *query,unsigned int &queryLen,MAC &queryDest);
|
||||
|
||||
private:
|
||||
struct _ArpEntry
|
||||
{
|
||||
_ArpEntry() : lastQuerySent(0),lastResponseReceived(0),mac(),local(false) {}
|
||||
uint64_t lastQuerySent; // Time last query was sent or 0 for local IP
|
||||
uint64_t lastResponseReceived; // Time of last ARP response or 0 for local IP
|
||||
MAC mac; // MAC address of device responsible for IP or null if not known yet
|
||||
bool local; // True if this is a local ARP entry
|
||||
};
|
||||
|
||||
Hashtable< uint32_t,_ArpEntry > _cache;
|
||||
uint64_t _lastCleaned;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
||||
#endif
|
@ -68,7 +68,7 @@ struct HttpPhyHandler
|
||||
inline void phyOnTcpConnect(PhySocket *sock,void **uptr,bool success)
|
||||
{
|
||||
if (success) {
|
||||
phy->tcpSetNotifyWritable(sock,true);
|
||||
phy->setNotifyWritable(sock,true);
|
||||
} else {
|
||||
*responseBody = "connection failed";
|
||||
error = true;
|
||||
@ -92,12 +92,12 @@ struct HttpPhyHandler
|
||||
inline void phyOnTcpWritable(PhySocket *sock,void **uptr)
|
||||
{
|
||||
if (writePtr < writeSize) {
|
||||
long n = phy->tcpSend(sock,writeBuf + writePtr,writeSize - writePtr,true);
|
||||
long n = phy->streamSend(sock,writeBuf + writePtr,writeSize - writePtr,true);
|
||||
if (n > 0)
|
||||
writePtr += n;
|
||||
}
|
||||
if (writePtr >= writeSize)
|
||||
phy->tcpSetNotifyWritable(sock,false);
|
||||
phy->setNotifyWritable(sock,false);
|
||||
}
|
||||
|
||||
http_parser parser;
|
||||
|
310
osdep/Phy.hpp
310
osdep/Phy.hpp
@ -46,6 +46,7 @@
|
||||
#define ZT_PHY_SOCKFD_VALID(s) ((s) != INVALID_SOCKET)
|
||||
#define ZT_PHY_CLOSE_SOCKET(s) ::closesocket(s)
|
||||
#define ZT_PHY_MAX_SOCKETS (FD_SETSIZE)
|
||||
#define ZT_PHY_MAX_INTERCEPTS ZT_PHY_MAX_SOCKETS
|
||||
#define ZT_PHY_SOCKADDR_STORAGE_TYPE struct sockaddr_storage
|
||||
|
||||
#else // not Windows
|
||||
@ -58,6 +59,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/un.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/tcp.h>
|
||||
@ -67,8 +69,14 @@
|
||||
#define ZT_PHY_SOCKFD_VALID(s) ((s) > -1)
|
||||
#define ZT_PHY_CLOSE_SOCKET(s) ::close(s)
|
||||
#define ZT_PHY_MAX_SOCKETS (FD_SETSIZE)
|
||||
#define ZT_PHY_MAX_INTERCEPTS ZT_PHY_MAX_SOCKETS
|
||||
#define ZT_PHY_SOCKADDR_STORAGE_TYPE struct sockaddr_storage
|
||||
|
||||
#if defined(__linux__) || defined(linux) || defined(__LINUX__) || defined(__linux)
|
||||
#define ZT_PHY_HAVE_EVENTFD 1
|
||||
#include <sys/eventfd.h>
|
||||
#endif
|
||||
|
||||
#endif // Windows or not
|
||||
|
||||
namespace ZeroTier {
|
||||
@ -87,6 +95,8 @@ typedef void PhySocket;
|
||||
* This class is templated on a pointer to a handler class which must
|
||||
* implement the following functions:
|
||||
*
|
||||
* For all platforms:
|
||||
*
|
||||
* phyOnDatagram(PhySocket *sock,void **uptr,const struct sockaddr *from,void *data,unsigned long len)
|
||||
* phyOnTcpConnect(PhySocket *sock,void **uptr,bool success)
|
||||
* phyOnTcpAccept(PhySocket *sockL,PhySocket *sockN,void **uptrL,void **uptrN,const struct sockaddr *from)
|
||||
@ -94,6 +104,16 @@ typedef void PhySocket;
|
||||
* phyOnTcpData(PhySocket *sock,void **uptr,void *data,unsigned long len)
|
||||
* phyOnTcpWritable(PhySocket *sock,void **uptr)
|
||||
*
|
||||
* On Linux/OSX/Unix only (not required/used on Windows or elsewhere):
|
||||
*
|
||||
* phyOnUnixAccept(PhySocket *sockL,PhySocket *sockN,void **uptrL,void **uptrN)
|
||||
* phyOnUnixClose(PhySocket *sock,void **uptr)
|
||||
* phyOnUnixData(PhySocket *sock,void **uptr,void *data,unsigned long len)
|
||||
* phyOnUnixWritable(PhySocket *sock,void **uptr)
|
||||
* phyOnSocketPairEndpointClose(PhySocket *sock,void **uptr)
|
||||
* phyOnSocketPairEndpointData(PhySocket *sock,void **uptr,void *data,unsigned long len)
|
||||
* phyOnSocketPairEndpointWritable(PhySocket *sock,void **uptr)
|
||||
*
|
||||
* These templates typically refer to function objects. Templates are used to
|
||||
* avoid the call overhead of indirection, which is surprisingly high for high
|
||||
* bandwidth applications pushing a lot of packets.
|
||||
@ -129,7 +149,10 @@ private:
|
||||
ZT_PHY_SOCKET_TCP_IN = 0x03,
|
||||
ZT_PHY_SOCKET_TCP_LISTEN = 0x04,
|
||||
ZT_PHY_SOCKET_RAW = 0x05,
|
||||
ZT_PHY_SOCKET_UDP = 0x06
|
||||
ZT_PHY_SOCKET_UDP = 0x06,
|
||||
ZT_PHY_SOCKET_UNIX_IN = 0x07,
|
||||
ZT_PHY_SOCKET_UNIX_LISTEN = 0x08,
|
||||
ZT_PHY_SOCKET_PAIR_ENDPOINT = 0x09
|
||||
};
|
||||
|
||||
struct PhySocketImpl
|
||||
@ -217,8 +240,17 @@ public:
|
||||
ZT_PHY_CLOSE_SOCKET(_whackSendSocket);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param s Socket object
|
||||
* @return Underlying OS-type (usually int or long) file descriptor associated with object
|
||||
*/
|
||||
static inline ZT_PHY_SOCKFD_TYPE getDescriptor(PhySocket *s) throw() { return reinterpret_cast<PhySocketImpl *>(s)->sock; }
|
||||
|
||||
/**
|
||||
* Cause poll() to stop waiting immediately
|
||||
*
|
||||
* This can be used to reset the polling loop after changes that require
|
||||
* attention, or to shut down a background thread that is waiting, etc.
|
||||
*/
|
||||
inline void whack()
|
||||
{
|
||||
@ -239,6 +271,58 @@ public:
|
||||
*/
|
||||
inline unsigned long maxCount() const throw() { return ZT_PHY_MAX_SOCKETS; }
|
||||
|
||||
#ifdef __UNIX_LIKE__
|
||||
/**
|
||||
* Create a two-way socket pair
|
||||
*
|
||||
* This uses socketpair() to create a local domain pair. The returned
|
||||
* PhySocket holds the local side of the socket pair, while the
|
||||
* supplied fd variable is set to the descriptor for the remote side.
|
||||
*
|
||||
* The local side is set to O_NONBLOCK to work with our poll loop, but
|
||||
* the remote descriptor is left untouched. It's up to the caller to
|
||||
* set any required fcntl(), ioctl(), or setsockopt() settings there.
|
||||
* It's also up to the caller to close the remote descriptor when
|
||||
* done, if necessary.
|
||||
*
|
||||
* @param remoteSocketDescriptor Result parameter set to remote end of socket pair's socket FD
|
||||
* @param uptr Pointer to associate with local side of socket pair
|
||||
* @return PhySocket for local side of socket pair
|
||||
*/
|
||||
inline PhySocket *createSocketPair(ZT_PHY_SOCKFD_TYPE &remoteSocketDescriptor,void *uptr = (void *)0)
|
||||
{
|
||||
if (_socks.size() >= ZT_PHY_MAX_SOCKETS)
|
||||
return (PhySocket *)0;
|
||||
|
||||
int fd[2]; fd[0] = -1; fd[1] = -1;
|
||||
if ((::socketpair(PF_LOCAL,SOCK_STREAM,0,fd) != 0)||(fd[0] <= 0)||(fd[1] <= 0))
|
||||
return (PhySocket *)0;
|
||||
fcntl(fd[0],F_SETFL,O_NONBLOCK);
|
||||
|
||||
try {
|
||||
_socks.push_back(PhySocketImpl());
|
||||
} catch ( ... ) {
|
||||
ZT_PHY_CLOSE_SOCKET(fd[0]);
|
||||
ZT_PHY_CLOSE_SOCKET(fd[1]);
|
||||
return (PhySocket *)0;
|
||||
}
|
||||
PhySocketImpl &sws = _socks.back();
|
||||
|
||||
if ((long)fd[0] > _nfds)
|
||||
_nfds = (long)fd[0];
|
||||
FD_SET(fd[0],&_readfds);
|
||||
sws.type = ZT_PHY_SOCKET_PAIR_ENDPOINT;
|
||||
sws.sock = fd[0];
|
||||
sws.uptr = uptr;
|
||||
memset(&(sws.saddr),0,sizeof(struct sockaddr_storage));
|
||||
// no sockaddr for this socket type, leave saddr null
|
||||
|
||||
remoteSocketDescriptor = fd[1];
|
||||
|
||||
return (PhySocket *)&sws;
|
||||
}
|
||||
#endif // __UNIX_LIKE__
|
||||
|
||||
/**
|
||||
* Bind a UDP socket
|
||||
*
|
||||
@ -358,6 +442,64 @@ public:
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __UNIX_LIKE__
|
||||
/**
|
||||
* Listen for connections on a Unix domain socket
|
||||
*
|
||||
* @param path Path to Unix domain socket
|
||||
* @param uptr Arbitrary pointer to associate
|
||||
* @return PhySocket or NULL if cannot bind
|
||||
*/
|
||||
inline PhySocket *unixListen(const char *path,void *uptr = (void *)0)
|
||||
{
|
||||
struct sockaddr_un sun;
|
||||
|
||||
if (_socks.size() >= ZT_PHY_MAX_SOCKETS)
|
||||
return (PhySocket *)0;
|
||||
|
||||
memset(&sun,0,sizeof(sun));
|
||||
sun.sun_family = AF_UNIX;
|
||||
if (strlen(path) >= sizeof(sun.sun_path))
|
||||
return (PhySocket *)0;
|
||||
strcpy(sun.sun_path,path);
|
||||
|
||||
ZT_PHY_SOCKFD_TYPE s = ::socket(PF_UNIX,SOCK_STREAM,0);
|
||||
if (!ZT_PHY_SOCKFD_VALID(s))
|
||||
return (PhySocket *)0;
|
||||
|
||||
::fcntl(s,F_SETFL,O_NONBLOCK);
|
||||
|
||||
::unlink(path);
|
||||
if (::bind(s,(struct sockaddr *)&sun,sizeof(struct sockaddr_un)) != 0) {
|
||||
ZT_PHY_CLOSE_SOCKET(s);
|
||||
return (PhySocket *)0;
|
||||
}
|
||||
if (::listen(s,128) != 0) {
|
||||
ZT_PHY_CLOSE_SOCKET(s);
|
||||
return (PhySocket *)0;
|
||||
}
|
||||
|
||||
try {
|
||||
_socks.push_back(PhySocketImpl());
|
||||
} catch ( ... ) {
|
||||
ZT_PHY_CLOSE_SOCKET(s);
|
||||
return (PhySocket *)0;
|
||||
}
|
||||
PhySocketImpl &sws = _socks.back();
|
||||
|
||||
if ((long)s > _nfds)
|
||||
_nfds = (long)s;
|
||||
FD_SET(s,&_readfds);
|
||||
sws.type = ZT_PHY_SOCKET_UNIX_LISTEN;
|
||||
sws.sock = s;
|
||||
sws.uptr = uptr;
|
||||
memset(&(sws.saddr),0,sizeof(struct sockaddr_storage));
|
||||
memcpy(&(sws.saddr),&sun,sizeof(struct sockaddr_un));
|
||||
|
||||
return (PhySocket *)&sws;
|
||||
}
|
||||
#endif // __UNIX_LIKE__
|
||||
|
||||
/**
|
||||
* Bind a local listen socket to listen for new TCP connections
|
||||
*
|
||||
@ -523,19 +665,21 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to send data to a TCP connection (non-blocking)
|
||||
* Attempt to send data to a stream socket (non-blocking)
|
||||
*
|
||||
* If -1 is returned, the socket should no longer be used as it is now
|
||||
* destroyed. If callCloseHandler is true, the close handler will be
|
||||
* called before the function returns.
|
||||
*
|
||||
* @param sock An open TCP socket (other socket types will fail)
|
||||
* This can be used with TCP, Unix, or socket pair sockets.
|
||||
*
|
||||
* @param sock An open stream socket (other socket types will fail)
|
||||
* @param data Data to send
|
||||
* @param len Length of data
|
||||
* @param callCloseHandler If true, call close handler on socket closing failure condition (default: true)
|
||||
* @return Number of bytes actually sent or -1 on fatal error (socket closure)
|
||||
*/
|
||||
inline long tcpSend(PhySocket *sock,const void *data,unsigned long len,bool callCloseHandler = true)
|
||||
inline long streamSend(PhySocket *sock,const void *data,unsigned long len,bool callCloseHandler = true)
|
||||
{
|
||||
PhySocketImpl &sws = *(reinterpret_cast<PhySocketImpl *>(sock));
|
||||
#if defined(_WIN32) || defined(_WIN64)
|
||||
@ -573,17 +717,58 @@ public:
|
||||
return n;
|
||||
}
|
||||
|
||||
#ifdef __UNIX_LIKE__
|
||||
/**
|
||||
* Set whether we want to be notified via the TCP writability handler when a socket is writable
|
||||
* Attempt to send data to a Unix domain socket connection (non-blocking)
|
||||
*
|
||||
* If -1 is returned, the socket should no longer be used as it is now
|
||||
* destroyed. If callCloseHandler is true, the close handler will be
|
||||
* called before the function returns.
|
||||
*
|
||||
* @param sock An open Unix socket (other socket types will fail)
|
||||
* @param data Data to send
|
||||
* @param len Length of data
|
||||
* @param callCloseHandler If true, call close handler on socket closing failure condition (default: true)
|
||||
* @return Number of bytes actually sent or -1 on fatal error (socket closure)
|
||||
*/
|
||||
inline long unixSend(PhySocket *sock,const void *data,unsigned long len,bool callCloseHandler = true)
|
||||
{
|
||||
PhySocketImpl &sws = *(reinterpret_cast<PhySocketImpl *>(sock));
|
||||
long n = (long)::write(sws.sock,data,len);
|
||||
if (n < 0) {
|
||||
switch(errno) {
|
||||
#ifdef EAGAIN
|
||||
case EAGAIN:
|
||||
#endif
|
||||
#if defined(EWOULDBLOCK) && ( !defined(EAGAIN) || (EWOULDBLOCK != EAGAIN) )
|
||||
case EWOULDBLOCK:
|
||||
#endif
|
||||
#ifdef EINTR
|
||||
case EINTR:
|
||||
#endif
|
||||
return 0;
|
||||
default:
|
||||
this->close(sock,callCloseHandler);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
#endif // __UNIX_LIKE__
|
||||
|
||||
/**
|
||||
* For streams, sets whether we want to be notified that the socket is writable
|
||||
*
|
||||
* This can be used with TCP, Unix, or socket pair sockets.
|
||||
*
|
||||
* Call whack() if this is being done from another thread and you want
|
||||
* it to take effect immediately. Otherwise it is only guaranteed to
|
||||
* take effect on the next poll().
|
||||
*
|
||||
* @param sock TCP connection socket (other types are not valid)
|
||||
* @param sock Stream connection socket
|
||||
* @param notifyWritable Want writable notifications?
|
||||
*/
|
||||
inline const void tcpSetNotifyWritable(PhySocket *sock,bool notifyWritable)
|
||||
inline const void setNotifyWritable(PhySocket *sock,bool notifyWritable)
|
||||
{
|
||||
PhySocketImpl &sws = *(reinterpret_cast<PhySocketImpl *>(sock));
|
||||
if (notifyWritable) {
|
||||
@ -727,6 +912,77 @@ public:
|
||||
}
|
||||
break;
|
||||
|
||||
case ZT_PHY_SOCKET_UNIX_IN: {
|
||||
#ifdef __UNIX_LIKE__
|
||||
ZT_PHY_SOCKFD_TYPE sock = s->sock; // if closed, s->sock becomes invalid as s is no longer dereferencable
|
||||
if (FD_ISSET(sock,&rfds)) {
|
||||
long n = (long)::read(sock,buf,sizeof(buf));
|
||||
if (n <= 0) {
|
||||
this->close((PhySocket *)&(*s),true);
|
||||
} else {
|
||||
try {
|
||||
_handler->phyOnUnixData((PhySocket *)&(*s),&(s->uptr),(void *)buf,(unsigned long)n);
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
}
|
||||
if ((FD_ISSET(sock,&wfds))&&(FD_ISSET(sock,&_writefds))) {
|
||||
try {
|
||||
_handler->phyOnUnixWritable((PhySocket *)&(*s),&(s->uptr));
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
#endif // __UNIX_LIKE__
|
||||
} break;
|
||||
|
||||
case ZT_PHY_SOCKET_UNIX_LISTEN:
|
||||
#ifdef __UNIX_LIKE__
|
||||
if (FD_ISSET(s->sock,&rfds)) {
|
||||
memset(&ss,0,sizeof(ss));
|
||||
socklen_t slen = sizeof(ss);
|
||||
ZT_PHY_SOCKFD_TYPE newSock = ::accept(s->sock,(struct sockaddr *)&ss,&slen);
|
||||
if (ZT_PHY_SOCKFD_VALID(newSock)) {
|
||||
if (_socks.size() >= ZT_PHY_MAX_SOCKETS) {
|
||||
ZT_PHY_CLOSE_SOCKET(newSock);
|
||||
} else {
|
||||
fcntl(newSock,F_SETFL,O_NONBLOCK);
|
||||
_socks.push_back(PhySocketImpl());
|
||||
PhySocketImpl &sws = _socks.back();
|
||||
FD_SET(newSock,&_readfds);
|
||||
if ((long)newSock > _nfds)
|
||||
_nfds = (long)newSock;
|
||||
sws.type = ZT_PHY_SOCKET_UNIX_IN;
|
||||
sws.sock = newSock;
|
||||
sws.uptr = (void *)0;
|
||||
memcpy(&(sws.saddr),&ss,sizeof(struct sockaddr_storage));
|
||||
try {
|
||||
_handler->phyOnUnixAccept((PhySocket *)&(*s),(PhySocket *)&(_socks.back()),&(s->uptr),&(sws.uptr));
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // __UNIX_LIKE__
|
||||
break;
|
||||
|
||||
case ZT_PHY_SOCKET_PAIR_ENDPOINT: {
|
||||
#ifdef __UNIX_LIKE__
|
||||
ZT_PHY_SOCKFD_TYPE sock = s->sock; // if closed, s->sock becomes invalid as s is no longer dereferencable
|
||||
if (FD_ISSET(sock,&rfds)) {
|
||||
long n = (long)::read(sock,buf,sizeof(buf));
|
||||
if (n <= 0) {
|
||||
this->close((PhySocket *)&(*s),true);
|
||||
} else {
|
||||
try {
|
||||
_handler->phyOnSocketPairEndpointData((PhySocket *)&(*s),&(s->uptr),(void *)buf,(unsigned long)n);
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
}
|
||||
if ((FD_ISSET(sock,&wfds))&&(FD_ISSET(sock,&_writefds))) {
|
||||
try {
|
||||
_handler->phyOnSocketPairEndpointWritable((PhySocket *)&(*s),&(s->uptr));
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
#endif // __UNIX_LIKE__
|
||||
} break;
|
||||
|
||||
default:
|
||||
break;
|
||||
|
||||
@ -758,24 +1014,40 @@ public:
|
||||
|
||||
ZT_PHY_CLOSE_SOCKET(sws.sock);
|
||||
|
||||
switch(sws.type) {
|
||||
case ZT_PHY_SOCKET_TCP_OUT_PENDING:
|
||||
if (callHandlers) {
|
||||
#ifdef __UNIX_LIKE__
|
||||
if (sws.type == ZT_PHY_SOCKET_UNIX_LISTEN)
|
||||
::unlink(((struct sockaddr_un *)(&(sws.saddr)))->sun_path);
|
||||
#endif // __UNIX_LIKE__
|
||||
|
||||
if (callHandlers) {
|
||||
switch(sws.type) {
|
||||
case ZT_PHY_SOCKET_TCP_OUT_PENDING:
|
||||
try {
|
||||
_handler->phyOnTcpConnect(sock,&(sws.uptr),false);
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
break;
|
||||
case ZT_PHY_SOCKET_TCP_OUT_CONNECTED:
|
||||
case ZT_PHY_SOCKET_TCP_IN:
|
||||
if (callHandlers) {
|
||||
break;
|
||||
case ZT_PHY_SOCKET_TCP_OUT_CONNECTED:
|
||||
case ZT_PHY_SOCKET_TCP_IN:
|
||||
try {
|
||||
_handler->phyOnTcpClose(sock,&(sws.uptr));
|
||||
} catch ( ... ) {}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
break;
|
||||
case ZT_PHY_SOCKET_UNIX_IN:
|
||||
#ifdef __UNIX_LIKE__
|
||||
try {
|
||||
_handler->phyOnUnixClose(sock,&(sws.uptr));
|
||||
} catch ( ... ) {}
|
||||
#endif // __UNIX_LIKE__
|
||||
break;
|
||||
case ZT_PHY_SOCKET_PAIR_ENDPOINT:
|
||||
#ifdef __UNIX_LIKE__
|
||||
try {
|
||||
_handler->phyOnSocketPairEndpointClose(sock,&(sws.uptr));
|
||||
} catch ( ... ) {}
|
||||
#endif // __UNIX_LIKE__
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Causes entry to be deleted from list in poll(), ignored elsewhere
|
||||
|
159
selftest.cpp
159
selftest.cpp
@ -36,6 +36,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "node/Constants.hpp"
|
||||
#include "node/Hashtable.hpp"
|
||||
#include "node/RuntimeEnvironment.hpp"
|
||||
#include "node/InetAddress.hpp"
|
||||
#include "node/Utils.hpp"
|
||||
@ -578,6 +579,148 @@ static int testPacket()
|
||||
|
||||
static int testOther()
|
||||
{
|
||||
std::cout << "[other] Testing Hashtable... "; std::cout.flush();
|
||||
{
|
||||
Hashtable<uint64_t,std::string> ht;
|
||||
Hashtable<uint64_t,std::string> ht2;
|
||||
std::map<uint64_t,std::string> ref; // assume std::map works correctly :)
|
||||
for(int x=0;x<2;++x) {
|
||||
for(int i=0;i<25000;++i) {
|
||||
uint64_t k = rand();
|
||||
while ((k == 0)||(ref.count(k) > 0))
|
||||
++k;
|
||||
std::string v("!");
|
||||
for(int j=0;j<(int)(k % 64);++j)
|
||||
v.push_back("0123456789"[rand() % 10]);
|
||||
ht.set(k,v);
|
||||
ref[k] = v;
|
||||
}
|
||||
if (ht.size() != ref.size()) {
|
||||
std::cout << "FAILED! (size mismatch, original)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
ht2 = ht;
|
||||
Hashtable<uint64_t,std::string> ht3(ht2);
|
||||
if (ht2.size() != ref.size()) {
|
||||
std::cout << "FAILED! (size mismatch, assigned)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (ht3.size() != ref.size()) {
|
||||
std::cout << "FAILED! (size mismatch, copied)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
for(std::map<uint64_t,std::string>::iterator i(ref.begin());i!=ref.end();++i) {
|
||||
std::string *v = ht.get(i->first);
|
||||
if (!v) {
|
||||
std::cout << "FAILED! (key " << i->first << " not found, original)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (*v != i->second) {
|
||||
std::cout << "FAILED! (key " << i->first << " not equal, original)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
v = ht2.get(i->first);
|
||||
if (!v) {
|
||||
std::cout << "FAILED! (key " << i->first << " not found, assigned)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (*v != i->second) {
|
||||
std::cout << "FAILED! (key " << i->first << " not equal, assigned)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
v = ht3.get(i->first);
|
||||
if (!v) {
|
||||
std::cout << "FAILED! (key " << i->first << " not found, copied)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
if (*v != i->second) {
|
||||
std::cout << "FAILED! (key " << i->first << " not equal, copied)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
{
|
||||
uint64_t *k;
|
||||
std::string *v;
|
||||
Hashtable<uint64_t,std::string>::Iterator i(ht);
|
||||
unsigned long ic = 0;
|
||||
while (i.next(k,v)) {
|
||||
if (ref[*k] != *v) {
|
||||
std::cout << "FAILED! (iterate)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
++ic;
|
||||
}
|
||||
if (ic != ht.size()) {
|
||||
std::cout << "FAILED! (iterate coverage)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
for(std::map<uint64_t,std::string>::iterator i(ref.begin());i!=ref.end();) {
|
||||
if (!ht.get(i->first)) {
|
||||
std::cout << "FAILED! (erase, check if exists)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
ht.erase(i->first);
|
||||
if (ht.get(i->first)) {
|
||||
std::cout << "FAILED! (erase, check if erased)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
ref.erase(i++);
|
||||
if (ht.size() != ref.size()) {
|
||||
std::cout << "FAILED! (erase, size)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (!ht.empty()) {
|
||||
std::cout << "FAILED! (erase, empty)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
for(int i=0;i<10000;++i) {
|
||||
uint64_t k = rand();
|
||||
while ((k == 0)||(ref.count(k) > 0))
|
||||
++k;
|
||||
std::string v;
|
||||
for(int j=0;j<(int)(k % 64);++j)
|
||||
v.push_back("0123456789"[rand() % 10]);
|
||||
ht.set(k,v);
|
||||
ref[k] = v;
|
||||
}
|
||||
if (ht.size() != ref.size()) {
|
||||
std::cout << "FAILED! (second populate)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
ht.clear();
|
||||
ref.clear();
|
||||
if (ht.size() != ref.size()) {
|
||||
std::cout << "FAILED! (clear)" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
for(int i=0;i<10000;++i) {
|
||||
uint64_t k = rand();
|
||||
while ((k == 0)||(ref.count(k) > 0))
|
||||
++k;
|
||||
std::string v;
|
||||
for(int j=0;j<(int)(k % 64);++j)
|
||||
v.push_back("0123456789"[rand() % 10]);
|
||||
ht.set(k,v);
|
||||
ref[k] = v;
|
||||
}
|
||||
{
|
||||
Hashtable<uint64_t,std::string>::Iterator i(ht);
|
||||
uint64_t *k;
|
||||
std::string *v;
|
||||
while (i.next(k,v))
|
||||
ht.erase(*k);
|
||||
}
|
||||
ref.clear();
|
||||
if (ht.size() != ref.size()) {
|
||||
std::cout << "FAILED! (clear by iterate, " << ht.size() << ")" << std::endl;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::cout << "PASS" << std::endl;
|
||||
|
||||
std::cout << "[other] Testing hex encode/decode... "; std::cout.flush();
|
||||
for(unsigned int k=0;k<1000;++k) {
|
||||
unsigned int flen = (rand() % 8194) + 1;
|
||||
@ -652,7 +795,7 @@ struct TestPhyHandlers
|
||||
{
|
||||
++phyTestTcpAcceptCount;
|
||||
*uptrN = new std::string(ZT_TEST_PHY_TCP_MESSAGE_SIZE,(char)0xff);
|
||||
testPhyInstance->tcpSetNotifyWritable(sockN,true);
|
||||
testPhyInstance->setNotifyWritable(sockN,true);
|
||||
}
|
||||
|
||||
inline void phyOnTcpClose(PhySocket *sock,void **uptr)
|
||||
@ -669,7 +812,7 @@ struct TestPhyHandlers
|
||||
{
|
||||
std::string *testMessage = (std::string *)*uptr;
|
||||
if ((testMessage)&&(testMessage->length() > 0)) {
|
||||
long sent = testPhyInstance->tcpSend(sock,(const void *)testMessage->data(),(unsigned long)testMessage->length(),true);
|
||||
long sent = testPhyInstance->streamSend(sock,(const void *)testMessage->data(),(unsigned long)testMessage->length(),true);
|
||||
if (sent > 0)
|
||||
testMessage->erase(0,sent);
|
||||
}
|
||||
@ -677,6 +820,16 @@ struct TestPhyHandlers
|
||||
testPhyInstance->close(sock,true);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __UNIX_LIKE__
|
||||
inline void phyOnUnixAccept(PhySocket *sockL,PhySocket *sockN,void **uptrL,void **uptrN) {}
|
||||
inline void phyOnUnixClose(PhySocket *sock,void **uptr) {}
|
||||
inline void phyOnUnixData(PhySocket *sock,void **uptr,void *data,unsigned long len) {}
|
||||
inline void phyOnUnixWritable(PhySocket *sock,void **uptr) {}
|
||||
inline void phyOnSocketPairEndpointClose(PhySocket *sock,void **uptr) {}
|
||||
inline void phyOnSocketPairEndpointData(PhySocket *sock,void **uptr,void *data,unsigned long len) {}
|
||||
inline void phyOnSocketPairEndpointWritable(PhySocket *sock,void **uptr) {}
|
||||
#endif // __UNIX_LIKE__
|
||||
};
|
||||
static int testPhy()
|
||||
{
|
||||
@ -909,9 +1062,9 @@ int main(int argc,char **argv)
|
||||
srand((unsigned int)time(0));
|
||||
|
||||
r |= testSqliteNetworkController();
|
||||
r |= testOther();
|
||||
r |= testCrypto();
|
||||
r |= testPacket();
|
||||
r |= testOther();
|
||||
r |= testIdentity();
|
||||
r |= testCertificate();
|
||||
r |= testPhy();
|
||||
|
@ -64,7 +64,7 @@ static std::string _jsonEscape(const char *s)
|
||||
}
|
||||
static std::string _jsonEscape(const std::string &s) { return _jsonEscape(s.c_str()); }
|
||||
|
||||
static std::string _jsonEnumerate(const ZT1_MulticastGroup *mg,unsigned int count)
|
||||
static std::string _jsonEnumerate(const ZT_MulticastGroup *mg,unsigned int count)
|
||||
{
|
||||
std::string buf;
|
||||
char tmp[128];
|
||||
@ -101,7 +101,7 @@ static std::string _jsonEnumerate(const struct sockaddr_storage *ss,unsigned int
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void _jsonAppend(unsigned int depth,std::string &buf,const ZT1_VirtualNetworkConfig *nc,const std::string &portDeviceName)
|
||||
static void _jsonAppend(unsigned int depth,std::string &buf,const ZT_VirtualNetworkConfig *nc,const std::string &portDeviceName)
|
||||
{
|
||||
char json[4096];
|
||||
char prefix[32];
|
||||
@ -114,16 +114,16 @@ static void _jsonAppend(unsigned int depth,std::string &buf,const ZT1_VirtualNet
|
||||
|
||||
const char *nstatus = "",*ntype = "";
|
||||
switch(nc->status) {
|
||||
case ZT1_NETWORK_STATUS_REQUESTING_CONFIGURATION: nstatus = "REQUESTING_CONFIGURATION"; break;
|
||||
case ZT1_NETWORK_STATUS_OK: nstatus = "OK"; break;
|
||||
case ZT1_NETWORK_STATUS_ACCESS_DENIED: nstatus = "ACCESS_DENIED"; break;
|
||||
case ZT1_NETWORK_STATUS_NOT_FOUND: nstatus = "NOT_FOUND"; break;
|
||||
case ZT1_NETWORK_STATUS_PORT_ERROR: nstatus = "PORT_ERROR"; break;
|
||||
case ZT1_NETWORK_STATUS_CLIENT_TOO_OLD: nstatus = "CLIENT_TOO_OLD"; break;
|
||||
case ZT_NETWORK_STATUS_REQUESTING_CONFIGURATION: nstatus = "REQUESTING_CONFIGURATION"; break;
|
||||
case ZT_NETWORK_STATUS_OK: nstatus = "OK"; break;
|
||||
case ZT_NETWORK_STATUS_ACCESS_DENIED: nstatus = "ACCESS_DENIED"; break;
|
||||
case ZT_NETWORK_STATUS_NOT_FOUND: nstatus = "NOT_FOUND"; break;
|
||||
case ZT_NETWORK_STATUS_PORT_ERROR: nstatus = "PORT_ERROR"; break;
|
||||
case ZT_NETWORK_STATUS_CLIENT_TOO_OLD: nstatus = "CLIENT_TOO_OLD"; break;
|
||||
}
|
||||
switch(nc->type) {
|
||||
case ZT1_NETWORK_TYPE_PRIVATE: ntype = "PRIVATE"; break;
|
||||
case ZT1_NETWORK_TYPE_PUBLIC: ntype = "PUBLIC"; break;
|
||||
case ZT_NETWORK_TYPE_PRIVATE: ntype = "PRIVATE"; break;
|
||||
case ZT_NETWORK_TYPE_PUBLIC: ntype = "PUBLIC"; break;
|
||||
}
|
||||
|
||||
Utils::snprintf(json,sizeof(json),
|
||||
@ -162,7 +162,7 @@ static void _jsonAppend(unsigned int depth,std::string &buf,const ZT1_VirtualNet
|
||||
buf.append(json);
|
||||
}
|
||||
|
||||
static std::string _jsonEnumerate(unsigned int depth,const ZT1_PeerPhysicalPath *pp,unsigned int count)
|
||||
static std::string _jsonEnumerate(unsigned int depth,const ZT_PeerPhysicalPath *pp,unsigned int count)
|
||||
{
|
||||
char json[1024];
|
||||
char prefix[32];
|
||||
@ -198,7 +198,7 @@ static std::string _jsonEnumerate(unsigned int depth,const ZT1_PeerPhysicalPath
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void _jsonAppend(unsigned int depth,std::string &buf,const ZT1_Peer *peer)
|
||||
static void _jsonAppend(unsigned int depth,std::string &buf,const ZT_Peer *peer)
|
||||
{
|
||||
char json[1024];
|
||||
char prefix[32];
|
||||
@ -211,9 +211,9 @@ static void _jsonAppend(unsigned int depth,std::string &buf,const ZT1_Peer *peer
|
||||
|
||||
const char *prole = "";
|
||||
switch(peer->role) {
|
||||
case ZT1_PEER_ROLE_LEAF: prole = "LEAF"; break;
|
||||
case ZT1_PEER_ROLE_RELAY: prole = "RELAY"; break;
|
||||
case ZT1_PEER_ROLE_ROOT: prole = "ROOT"; break;
|
||||
case ZT_PEER_ROLE_LEAF: prole = "LEAF"; break;
|
||||
case ZT_PEER_ROLE_RELAY: prole = "RELAY"; break;
|
||||
case ZT_PEER_ROLE_ROOT: prole = "ROOT"; break;
|
||||
}
|
||||
|
||||
Utils::snprintf(json,sizeof(json),
|
||||
@ -356,7 +356,7 @@ unsigned int ControlPlane::handleRequest(
|
||||
|
||||
if (ps[0] == "status") {
|
||||
responseContentType = "application/json";
|
||||
ZT1_NodeStatus status;
|
||||
ZT_NodeStatus status;
|
||||
_node->status(&status);
|
||||
Utils::snprintf(json,sizeof(json),
|
||||
"{\n"
|
||||
@ -386,7 +386,7 @@ unsigned int ControlPlane::handleRequest(
|
||||
responseBody = "{}"; // TODO
|
||||
scode = 200;
|
||||
} else if (ps[0] == "network") {
|
||||
ZT1_VirtualNetworkList *nws = _node->networks();
|
||||
ZT_VirtualNetworkList *nws = _node->networks();
|
||||
if (nws) {
|
||||
if (ps.size() == 1) {
|
||||
// Return [array] of all networks
|
||||
@ -415,7 +415,7 @@ unsigned int ControlPlane::handleRequest(
|
||||
_node->freeQueryResult((void *)nws);
|
||||
} else scode = 500;
|
||||
} else if (ps[0] == "peer") {
|
||||
ZT1_PeerList *pl = _node->peers();
|
||||
ZT_PeerList *pl = _node->peers();
|
||||
if (pl) {
|
||||
if (ps.size() == 1) {
|
||||
// Return [array] of all peers
|
||||
@ -473,7 +473,7 @@ unsigned int ControlPlane::handleRequest(
|
||||
if (ps.size() == 2) {
|
||||
uint64_t wantnw = Utils::hexStrToU64(ps[1].c_str());
|
||||
_node->join(wantnw); // does nothing if we are a member
|
||||
ZT1_VirtualNetworkList *nws = _node->networks();
|
||||
ZT_VirtualNetworkList *nws = _node->networks();
|
||||
if (nws) {
|
||||
for(unsigned long i=0;i<nws->networkCount;++i) {
|
||||
if (nws->networks[i].nwid == wantnw) {
|
||||
@ -506,7 +506,7 @@ unsigned int ControlPlane::handleRequest(
|
||||
if (ps[0] == "config") {
|
||||
// TODO
|
||||
} else if (ps[0] == "network") {
|
||||
ZT1_VirtualNetworkList *nws = _node->networks();
|
||||
ZT_VirtualNetworkList *nws = _node->networks();
|
||||
if (nws) {
|
||||
if (ps.size() == 2) {
|
||||
uint64_t wantnw = Utils::hexStrToU64(ps[1].c_str());
|
||||
|
@ -118,20 +118,20 @@ namespace ZeroTier { typedef BSDEthernetTap EthernetTap; }
|
||||
#define ZT_TAP_CHECK_MULTICAST_INTERVAL 30000
|
||||
|
||||
// Path under ZT1 home for controller database if controller is enabled
|
||||
#define ZT1_CONTROLLER_DB_PATH "controller.db"
|
||||
#define ZT_CONTROLLER_DB_PATH "controller.db"
|
||||
|
||||
// TCP fallback relay host -- geo-distributed using Amazon Route53 geo-aware DNS
|
||||
#define ZT1_TCP_FALLBACK_RELAY "tcp-fallback.zerotier.com"
|
||||
#define ZT1_TCP_FALLBACK_RELAY_PORT 443
|
||||
#define ZT_TCP_FALLBACK_RELAY "tcp-fallback.zerotier.com"
|
||||
#define ZT_TCP_FALLBACK_RELAY_PORT 443
|
||||
|
||||
// Frequency at which we re-resolve the TCP fallback relay
|
||||
#define ZT1_TCP_FALLBACK_RERESOLVE_DELAY 86400000
|
||||
#define ZT_TCP_FALLBACK_RERESOLVE_DELAY 86400000
|
||||
|
||||
// Attempt to engage TCP fallback after this many ms of no reply to packets sent to global-scope IPs
|
||||
#define ZT1_TCP_FALLBACK_AFTER 60000
|
||||
#define ZT_TCP_FALLBACK_AFTER 60000
|
||||
|
||||
// How often to check for local interface addresses
|
||||
#define ZT1_LOCAL_INTERFACE_CHECK_INTERVAL 300000
|
||||
#define ZT_LOCAL_INTERFACE_CHECK_INTERVAL 300000
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
@ -340,12 +340,12 @@ static BackgroundSoftwareUpdateChecker backgroundSoftwareUpdateChecker;
|
||||
|
||||
class OneServiceImpl;
|
||||
|
||||
static int SnodeVirtualNetworkConfigFunction(ZT1_Node *node,void *uptr,uint64_t nwid,enum ZT1_VirtualNetworkConfigOperation op,const ZT1_VirtualNetworkConfig *nwconf);
|
||||
static void SnodeEventCallback(ZT1_Node *node,void *uptr,enum ZT1_Event event,const void *metaData);
|
||||
static long SnodeDataStoreGetFunction(ZT1_Node *node,void *uptr,const char *name,void *buf,unsigned long bufSize,unsigned long readIndex,unsigned long *totalSize);
|
||||
static int SnodeDataStorePutFunction(ZT1_Node *node,void *uptr,const char *name,const void *data,unsigned long len,int secure);
|
||||
static int SnodeWirePacketSendFunction(ZT1_Node *node,void *uptr,const struct sockaddr_storage *addr,const void *data,unsigned int len);
|
||||
static void SnodeVirtualNetworkFrameFunction(ZT1_Node *node,void *uptr,uint64_t nwid,uint64_t sourceMac,uint64_t destMac,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len);
|
||||
static int SnodeVirtualNetworkConfigFunction(ZT_Node *node,void *uptr,uint64_t nwid,enum ZT_VirtualNetworkConfigOperation op,const ZT_VirtualNetworkConfig *nwconf);
|
||||
static void SnodeEventCallback(ZT_Node *node,void *uptr,enum ZT_Event event,const void *metaData);
|
||||
static long SnodeDataStoreGetFunction(ZT_Node *node,void *uptr,const char *name,void *buf,unsigned long bufSize,unsigned long readIndex,unsigned long *totalSize);
|
||||
static int SnodeDataStorePutFunction(ZT_Node *node,void *uptr,const char *name,const void *data,unsigned long len,int secure);
|
||||
static int SnodeWirePacketSendFunction(ZT_Node *node,void *uptr,const struct sockaddr_storage *localAddr,const struct sockaddr_storage *addr,const void *data,unsigned int len);
|
||||
static void SnodeVirtualNetworkFrameFunction(ZT_Node *node,void *uptr,uint64_t nwid,uint64_t sourceMac,uint64_t destMac,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len);
|
||||
|
||||
static void StapFrameHandler(void *uptr,uint64_t nwid,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len);
|
||||
|
||||
@ -401,9 +401,9 @@ class OneServiceImpl : public OneService
|
||||
public:
|
||||
OneServiceImpl(const char *hp,unsigned int port,const char *overrideRootTopology) :
|
||||
_homePath((hp) ? hp : "."),
|
||||
_tcpFallbackResolver(ZT1_TCP_FALLBACK_RELAY),
|
||||
_tcpFallbackResolver(ZT_TCP_FALLBACK_RELAY),
|
||||
#ifdef ZT_ENABLE_NETWORK_CONTROLLER
|
||||
_controller((_homePath + ZT_PATH_SEPARATOR_S + ZT1_CONTROLLER_DB_PATH).c_str()),
|
||||
_controller((_homePath + ZT_PATH_SEPARATOR_S + ZT_CONTROLLER_DB_PATH).c_str()),
|
||||
#endif
|
||||
_phy(this,false,true),
|
||||
_overrideRootTopology((overrideRootTopology) ? overrideRootTopology : ""),
|
||||
@ -415,38 +415,78 @@ public:
|
||||
_nextBackgroundTaskDeadline(0),
|
||||
_tcpFallbackTunnel((TcpConnection *)0),
|
||||
_termReason(ONE_STILL_RUNNING),
|
||||
_port(port),
|
||||
_port(0),
|
||||
#ifdef ZT_USE_MINIUPNPC
|
||||
_upnpClient((int)port),
|
||||
_v4UpnpUdpSocket((PhySocket *)0),
|
||||
_upnpClient((UPNPClient *)0),
|
||||
#endif
|
||||
_run(true)
|
||||
{
|
||||
struct sockaddr_in in4;
|
||||
struct sockaddr_in6 in6;
|
||||
const int portTrials = (port == 0) ? 256 : 1; // if port is 0, pick random
|
||||
for(int k=0;k<portTrials;++k) {
|
||||
if (port == 0) {
|
||||
unsigned int randp = 0;
|
||||
Utils::getSecureRandom(&randp,sizeof(randp));
|
||||
port = 40000 + (randp % 25500);
|
||||
}
|
||||
|
||||
::memset((void *)&in4,0,sizeof(in4));
|
||||
in4.sin_family = AF_INET;
|
||||
in4.sin_port = Utils::hton((uint16_t)port);
|
||||
_v4UdpSocket = _phy.udpBind((const struct sockaddr *)&in4,this,131072);
|
||||
if (!_v4UdpSocket)
|
||||
throw std::runtime_error("cannot bind to port (UDP/IPv4)");
|
||||
in4.sin_addr.s_addr = Utils::hton((uint32_t)0x7f000001); // right now we just listen for TCP @localhost
|
||||
_v4TcpListenSocket = _phy.tcpListen((const struct sockaddr *)&in4,this);
|
||||
if (!_v4TcpListenSocket) {
|
||||
_phy.close(_v4UdpSocket);
|
||||
throw std::runtime_error("cannot bind to port (TCP/IPv4)");
|
||||
_v4LocalAddress = InetAddress((uint32_t)0,port);
|
||||
_v4UdpSocket = _phy.udpBind((const struct sockaddr *)&_v4LocalAddress,reinterpret_cast<void *>(&_v4LocalAddress),131072);
|
||||
|
||||
if (_v4UdpSocket) {
|
||||
struct sockaddr_in in4;
|
||||
memset(&in4,0,sizeof(in4));
|
||||
in4.sin_family = AF_INET;
|
||||
in4.sin_addr.s_addr = Utils::hton((uint32_t)0x7f000001); // right now we just listen for TCP @localhost
|
||||
in4.sin_port = Utils::hton((uint16_t)port);
|
||||
_v4TcpListenSocket = _phy.tcpListen((const struct sockaddr *)&in4,this);
|
||||
|
||||
if (_v4TcpListenSocket) {
|
||||
_v6LocalAddress = InetAddress("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0",16,port);
|
||||
_v6UdpSocket = _phy.udpBind((const struct sockaddr *)&_v6LocalAddress,reinterpret_cast<void *>(&_v6LocalAddress),131072);
|
||||
|
||||
struct sockaddr_in6 in6;
|
||||
memset((void *)&in6,0,sizeof(in6));
|
||||
in6.sin6_family = AF_INET6;
|
||||
in6.sin6_port = in4.sin_port;
|
||||
in6.sin6_addr.s6_addr[15] = 1; // IPv6 localhost == ::1
|
||||
_v6TcpListenSocket = _phy.tcpListen((const struct sockaddr *)&in6,this);
|
||||
|
||||
_port = port;
|
||||
break; // success!
|
||||
} else {
|
||||
_phy.close(_v4UdpSocket,false);
|
||||
}
|
||||
}
|
||||
|
||||
port = 0;
|
||||
}
|
||||
|
||||
::memset((void *)&in6,0,sizeof(in6));
|
||||
in6.sin6_family = AF_INET6;
|
||||
in6.sin6_port = in4.sin_port;
|
||||
_v6UdpSocket = _phy.udpBind((const struct sockaddr *)&in6,this,131072);
|
||||
in6.sin6_addr.s6_addr[15] = 1; // listen for TCP only at localhost
|
||||
_v6TcpListenSocket = _phy.tcpListen((const struct sockaddr *)&in6,this);
|
||||
if (_port == 0)
|
||||
throw std::runtime_error("cannot bind to port");
|
||||
|
||||
char portstr[64];
|
||||
Utils::snprintf(portstr,sizeof(portstr),"%u",port);
|
||||
Utils::snprintf(portstr,sizeof(portstr),"%u",_port);
|
||||
OSUtils::writeFile((_homePath + ZT_PATH_SEPARATOR_S + "zerotier-one.port").c_str(),std::string(portstr));
|
||||
|
||||
#ifdef ZT_USE_MINIUPNPC
|
||||
// Bind a random secondary port for use with uPnP, since some NAT routers
|
||||
// (cough Ubiquity Edge cough) barf up a lung if you do both conventional
|
||||
// NAT-t and uPnP from behind the same port. I think this is a bug, but
|
||||
// everyone else's router bugs are our problem. :P
|
||||
for(int k=0;k<256;++k) {
|
||||
unsigned int randp = 0;
|
||||
Utils::getSecureRandom(&randp,sizeof(randp));
|
||||
unsigned int upnport = 40000 + (randp % 25500);
|
||||
|
||||
_v4UpnpLocalAddress = InetAddress(0,upnport);
|
||||
_v4UpnpUdpSocket = _phy.udpBind((const struct sockaddr *)&_v4UpnpLocalAddress,reinterpret_cast<void *>(&_v4UpnpLocalAddress),131072);
|
||||
if (_v4UpnpUdpSocket) {
|
||||
_upnpClient = new UPNPClient(upnport);
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
virtual ~OneServiceImpl()
|
||||
@ -455,6 +495,10 @@ public:
|
||||
_phy.close(_v6UdpSocket);
|
||||
_phy.close(_v4TcpListenSocket);
|
||||
_phy.close(_v6TcpListenSocket);
|
||||
#ifdef ZT_USE_MINIUPNPC
|
||||
_phy.close(_v4UpnpUdpSocket);
|
||||
delete _upnpClient;
|
||||
#endif
|
||||
}
|
||||
|
||||
virtual ReasonForTermination run()
|
||||
@ -515,7 +559,7 @@ public:
|
||||
_lastRestart = clockShouldBe;
|
||||
uint64_t lastTapMulticastGroupCheck = 0;
|
||||
uint64_t lastTcpFallbackResolve = 0;
|
||||
uint64_t lastLocalInterfaceAddressCheck = (OSUtils::now() - ZT1_LOCAL_INTERFACE_CHECK_INTERVAL) + 15000; // do this in 15s to give UPnP time to configure and other things time to settle
|
||||
uint64_t lastLocalInterfaceAddressCheck = (OSUtils::now() - ZT_LOCAL_INTERFACE_CHECK_INTERVAL) + 15000; // do this in 15s to give UPnP time to configure and other things time to settle
|
||||
#ifdef ZT_AUTO_UPDATE
|
||||
uint64_t lastSoftwareUpdateCheck = 0;
|
||||
#endif // ZT_AUTO_UPDATE
|
||||
@ -543,17 +587,17 @@ public:
|
||||
|
||||
#ifdef ZT_AUTO_UPDATE
|
||||
if ((now - lastSoftwareUpdateCheck) >= ZT_AUTO_UPDATE_CHECK_PERIOD) {
|
||||
lastSoftwareUpdateCheck = OSUtils::now();
|
||||
lastSoftwareUpdateCheck = now;
|
||||
Thread::start(&backgroundSoftwareUpdateChecker);
|
||||
}
|
||||
#endif // ZT_AUTO_UPDATE
|
||||
|
||||
if ((now - lastTcpFallbackResolve) >= ZT1_TCP_FALLBACK_RERESOLVE_DELAY) {
|
||||
if ((now - lastTcpFallbackResolve) >= ZT_TCP_FALLBACK_RERESOLVE_DELAY) {
|
||||
lastTcpFallbackResolve = now;
|
||||
_tcpFallbackResolver.resolveNow();
|
||||
}
|
||||
|
||||
if ((_tcpFallbackTunnel)&&((now - _lastDirectReceiveFromGlobal) < (ZT1_TCP_FALLBACK_AFTER / 2)))
|
||||
if ((_tcpFallbackTunnel)&&((now - _lastDirectReceiveFromGlobal) < (ZT_TCP_FALLBACK_AFTER / 2)))
|
||||
_phy.close(_tcpFallbackTunnel->sock);
|
||||
|
||||
if ((now - lastTapMulticastGroupCheck) >= ZT_TAP_CHECK_MULTICAST_INTERVAL) {
|
||||
@ -569,7 +613,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if ((now - lastLocalInterfaceAddressCheck) >= ZT1_LOCAL_INTERFACE_CHECK_INTERVAL) {
|
||||
if ((now - lastLocalInterfaceAddressCheck) >= ZT_LOCAL_INTERFACE_CHECK_INTERVAL) {
|
||||
lastLocalInterfaceAddressCheck = now;
|
||||
|
||||
#ifdef __UNIX_LIKE__
|
||||
@ -583,9 +627,9 @@ public:
|
||||
_node->clearLocalInterfaceAddresses();
|
||||
|
||||
#ifdef ZT_USE_MINIUPNPC
|
||||
std::vector<InetAddress> upnpAddresses(_upnpClient.get());
|
||||
std::vector<InetAddress> upnpAddresses(_upnpClient->get());
|
||||
for(std::vector<InetAddress>::const_iterator ext(upnpAddresses.begin());ext!=upnpAddresses.end();++ext)
|
||||
_node->addLocalInterfaceAddress(reinterpret_cast<const struct sockaddr_storage *>(&(*ext)),0,ZT1_LOCAL_INTERFACE_ADDRESS_TRUST_NORMAL);
|
||||
_node->addLocalInterfaceAddress(reinterpret_cast<const struct sockaddr_storage *>(&(*ext)),0,ZT_LOCAL_INTERFACE_ADDRESS_TRUST_NORMAL);
|
||||
#endif
|
||||
|
||||
struct ifaddrs *ifatbl = (struct ifaddrs *)0;
|
||||
@ -603,7 +647,7 @@ public:
|
||||
if (!isZT) {
|
||||
InetAddress ip(ifa->ifa_addr);
|
||||
ip.setPort(_port);
|
||||
_node->addLocalInterfaceAddress(reinterpret_cast<const struct sockaddr_storage *>(&ip),0,ZT1_LOCAL_INTERFACE_ADDRESS_TRUST_NORMAL);
|
||||
_node->addLocalInterfaceAddress(reinterpret_cast<const struct sockaddr_storage *>(&ip),0,ZT_LOCAL_INTERFACE_ADDRESS_TRUST_NORMAL);
|
||||
}
|
||||
}
|
||||
ifa = ifa->ifa_next;
|
||||
@ -637,7 +681,7 @@ public:
|
||||
while (ua) {
|
||||
InetAddress ip(ua->Address.lpSockaddr);
|
||||
ip.setPort(_port);
|
||||
_node->addLocalInterfaceAddress(reinterpret_cast<const struct sockaddr_storage *>(&ip),0,ZT1_LOCAL_INTERFACE_ADDRESS_TRUST_NORMAL);
|
||||
_node->addLocalInterfaceAddress(reinterpret_cast<const struct sockaddr_storage *>(&ip),0,ZT_LOCAL_INTERFACE_ADDRESS_TRUST_NORMAL);
|
||||
ua = ua->Next;
|
||||
}
|
||||
}
|
||||
@ -725,13 +769,14 @@ public:
|
||||
#endif
|
||||
if ((len >= 16)&&(reinterpret_cast<const InetAddress *>(from)->ipScope() == InetAddress::IP_SCOPE_GLOBAL))
|
||||
_lastDirectReceiveFromGlobal = OSUtils::now();
|
||||
ZT1_ResultCode rc = _node->processWirePacket(
|
||||
ZT_ResultCode rc = _node->processWirePacket(
|
||||
OSUtils::now(),
|
||||
reinterpret_cast<const struct sockaddr_storage *>(*uptr),
|
||||
(const struct sockaddr_storage *)from, // Phy<> uses sockaddr_storage, so it'll always be that big
|
||||
data,
|
||||
len,
|
||||
&_nextBackgroundTaskDeadline);
|
||||
if (ZT1_ResultCode_isFatal(rc)) {
|
||||
if (ZT_ResultCode_isFatal(rc)) {
|
||||
char tmp[256];
|
||||
Utils::snprintf(tmp,sizeof(tmp),"fatal error code from processWirePacket: %d",(int)rc);
|
||||
Mutex::Lock _l(_termReason_m);
|
||||
@ -772,7 +817,7 @@ public:
|
||||
tc->writeBuf.push_back((char)ZEROTIER_ONE_VERSION_MINOR);
|
||||
tc->writeBuf.push_back((char)((ZEROTIER_ONE_VERSION_REVISION >> 8) & 0xff));
|
||||
tc->writeBuf.push_back((char)(ZEROTIER_ONE_VERSION_REVISION & 0xff));
|
||||
_phy.tcpSetNotifyWritable(sock,true);
|
||||
_phy.setNotifyWritable(sock,true);
|
||||
|
||||
_tcpFallbackTunnel = tc;
|
||||
}
|
||||
@ -873,13 +918,14 @@ public:
|
||||
}
|
||||
|
||||
if (from) {
|
||||
ZT1_ResultCode rc = _node->processWirePacket(
|
||||
ZT_ResultCode rc = _node->processWirePacket(
|
||||
OSUtils::now(),
|
||||
0,
|
||||
reinterpret_cast<struct sockaddr_storage *>(&from),
|
||||
data,
|
||||
plen,
|
||||
&_nextBackgroundTaskDeadline);
|
||||
if (ZT1_ResultCode_isFatal(rc)) {
|
||||
if (ZT_ResultCode_isFatal(rc)) {
|
||||
char tmp[256];
|
||||
Utils::snprintf(tmp,sizeof(tmp),"fatal error code from processWirePacket: %d",(int)rc);
|
||||
Mutex::Lock _l(_termReason_m);
|
||||
@ -907,12 +953,12 @@ public:
|
||||
TcpConnection *tc = reinterpret_cast<TcpConnection *>(*uptr);
|
||||
Mutex::Lock _l(tc->writeBuf_m);
|
||||
if (tc->writeBuf.length() > 0) {
|
||||
long sent = (long)_phy.tcpSend(sock,tc->writeBuf.data(),(unsigned long)tc->writeBuf.length(),true);
|
||||
long sent = (long)_phy.streamSend(sock,tc->writeBuf.data(),(unsigned long)tc->writeBuf.length(),true);
|
||||
if (sent > 0) {
|
||||
tc->lastActivity = OSUtils::now();
|
||||
if ((unsigned long)sent >= (unsigned long)tc->writeBuf.length()) {
|
||||
tc->writeBuf = "";
|
||||
_phy.tcpSetNotifyWritable(sock,false);
|
||||
_phy.setNotifyWritable(sock,false);
|
||||
if (!tc->shouldKeepAlive)
|
||||
_phy.close(sock); // will call close handler to delete from _tcpConnections
|
||||
} else {
|
||||
@ -920,16 +966,24 @@ public:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_phy.tcpSetNotifyWritable(sock,false);
|
||||
_phy.setNotifyWritable(sock,false);
|
||||
}
|
||||
}
|
||||
|
||||
inline int nodeVirtualNetworkConfigFunction(uint64_t nwid,enum ZT1_VirtualNetworkConfigOperation op,const ZT1_VirtualNetworkConfig *nwc)
|
||||
inline void phyOnUnixAccept(PhySocket *sockL,PhySocket *sockN,void **uptrL,void **uptrN) {}
|
||||
inline void phyOnUnixClose(PhySocket *sock,void **uptr) {}
|
||||
inline void phyOnUnixData(PhySocket *sock,void **uptr,void *data,unsigned long len) {}
|
||||
inline void phyOnUnixWritable(PhySocket *sock,void **uptr) {}
|
||||
inline void phyOnSocketPairEndpointClose(PhySocket *sock,void **uptr) {}
|
||||
inline void phyOnSocketPairEndpointData(PhySocket *sock,void **uptr,void *data,unsigned long len) {}
|
||||
inline void phyOnSocketPairEndpointWritable(PhySocket *sock,void **uptr) {}
|
||||
|
||||
inline int nodeVirtualNetworkConfigFunction(uint64_t nwid,enum ZT_VirtualNetworkConfigOperation op,const ZT_VirtualNetworkConfig *nwc)
|
||||
{
|
||||
Mutex::Lock _l(_taps_m);
|
||||
std::map< uint64_t,EthernetTap * >::iterator t(_taps.find(nwid));
|
||||
switch(op) {
|
||||
case ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_UP:
|
||||
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP:
|
||||
if (t == _taps.end()) {
|
||||
try {
|
||||
char friendlyName[1024];
|
||||
@ -959,7 +1013,7 @@ public:
|
||||
}
|
||||
}
|
||||
// fall through...
|
||||
case ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE:
|
||||
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE:
|
||||
if (t != _taps.end()) {
|
||||
t->second->setEnabled(nwc->enabled != 0);
|
||||
|
||||
@ -982,8 +1036,8 @@ public:
|
||||
return -999; // tap init failed
|
||||
}
|
||||
break;
|
||||
case ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN:
|
||||
case ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY:
|
||||
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN:
|
||||
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY:
|
||||
if (t != _taps.end()) {
|
||||
#ifdef __WINDOWS__
|
||||
std::string winInstanceId(t->second->instanceId());
|
||||
@ -992,7 +1046,7 @@ public:
|
||||
_taps.erase(t);
|
||||
_tapAssignedIps.erase(nwid);
|
||||
#ifdef __WINDOWS__
|
||||
if ((op == ZT1_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY)&&(winInstanceId.length() > 0))
|
||||
if ((op == ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY)&&(winInstanceId.length() > 0))
|
||||
WindowsEthernetTap::deletePersistentTapDevice(winInstanceId.c_str());
|
||||
#endif
|
||||
}
|
||||
@ -1001,17 +1055,17 @@ public:
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline void nodeEventCallback(enum ZT1_Event event,const void *metaData)
|
||||
inline void nodeEventCallback(enum ZT_Event event,const void *metaData)
|
||||
{
|
||||
switch(event) {
|
||||
case ZT1_EVENT_FATAL_ERROR_IDENTITY_COLLISION: {
|
||||
case ZT_EVENT_FATAL_ERROR_IDENTITY_COLLISION: {
|
||||
Mutex::Lock _l(_termReason_m);
|
||||
_termReason = ONE_IDENTITY_COLLISION;
|
||||
_fatalErrorMessage = "identity/address collision";
|
||||
this->terminate();
|
||||
} break;
|
||||
|
||||
case ZT1_EVENT_TRACE: {
|
||||
case ZT_EVENT_TRACE: {
|
||||
if (metaData) {
|
||||
::fprintf(stderr,"%s"ZT_EOL_S,(const char *)metaData);
|
||||
::fflush(stderr);
|
||||
@ -1077,8 +1131,22 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
inline int nodeWirePacketSendFunction(const struct sockaddr_storage *addr,const void *data,unsigned int len)
|
||||
inline int nodeWirePacketSendFunction(const struct sockaddr_storage *localAddr,const struct sockaddr_storage *addr,const void *data,unsigned int len)
|
||||
{
|
||||
#ifdef ZT_USE_MINIUPNPC
|
||||
if ((localAddr->ss_family == AF_INET)&&(reinterpret_cast<const struct sockaddr_in *>(localAddr)->sin_port == reinterpret_cast<const struct sockaddr_in *>(&_v4UpnpLocalAddress)->sin_port)) {
|
||||
#ifdef ZT_BREAK_UDP
|
||||
if (!OSUtils::fileExists("/tmp/ZT_BREAK_UDP")) {
|
||||
#endif
|
||||
if (addr->ss_family == AF_INET)
|
||||
return ((_phy.udpSend(_v4UpnpUdpSocket,(const struct sockaddr *)addr,data,len) != 0) ? 0 : -1);
|
||||
else return -1;
|
||||
#ifdef ZT_BREAK_UDP
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif // ZT_USE_MINIUPNPC
|
||||
|
||||
int result = -1;
|
||||
switch(addr->ss_family) {
|
||||
case AF_INET:
|
||||
@ -1091,19 +1159,19 @@ public:
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ZT1_TCP_FALLBACK_RELAY
|
||||
#ifdef ZT_TCP_FALLBACK_RELAY
|
||||
// TCP fallback tunnel support
|
||||
if ((len >= 16)&&(reinterpret_cast<const InetAddress *>(addr)->ipScope() == InetAddress::IP_SCOPE_GLOBAL)) {
|
||||
uint64_t now = OSUtils::now();
|
||||
|
||||
// Engage TCP tunnel fallback if we haven't received anything valid from a global
|
||||
// IP address in ZT1_TCP_FALLBACK_AFTER milliseconds. If we do start getting
|
||||
// IP address in ZT_TCP_FALLBACK_AFTER milliseconds. If we do start getting
|
||||
// valid direct traffic we'll stop using it and close the socket after a while.
|
||||
if (((now - _lastDirectReceiveFromGlobal) > ZT1_TCP_FALLBACK_AFTER)&&((now - _lastRestart) > ZT1_TCP_FALLBACK_AFTER)) {
|
||||
if (((now - _lastDirectReceiveFromGlobal) > ZT_TCP_FALLBACK_AFTER)&&((now - _lastRestart) > ZT_TCP_FALLBACK_AFTER)) {
|
||||
if (_tcpFallbackTunnel) {
|
||||
Mutex::Lock _l(_tcpFallbackTunnel->writeBuf_m);
|
||||
if (!_tcpFallbackTunnel->writeBuf.length())
|
||||
_phy.tcpSetNotifyWritable(_tcpFallbackTunnel->sock,true);
|
||||
_phy.setNotifyWritable(_tcpFallbackTunnel->sock,true);
|
||||
unsigned long mlen = len + 7;
|
||||
_tcpFallbackTunnel->writeBuf.push_back((char)0x17);
|
||||
_tcpFallbackTunnel->writeBuf.push_back((char)0x03);
|
||||
@ -1115,7 +1183,7 @@ public:
|
||||
_tcpFallbackTunnel->writeBuf.append(reinterpret_cast<const char *>(reinterpret_cast<const void *>(&(reinterpret_cast<const struct sockaddr_in *>(addr)->sin_port))),2);
|
||||
_tcpFallbackTunnel->writeBuf.append((const char *)data,len);
|
||||
result = 0;
|
||||
} else if (((now - _lastSendToGlobal) < ZT1_TCP_FALLBACK_AFTER)&&((now - _lastSendToGlobal) > (ZT_PING_CHECK_INVERVAL / 2))) {
|
||||
} else if (((now - _lastSendToGlobal) < ZT_TCP_FALLBACK_AFTER)&&((now - _lastSendToGlobal) > (ZT_PING_CHECK_INVERVAL / 2))) {
|
||||
std::vector<InetAddress> tunnelIps(_tcpFallbackResolver.get());
|
||||
if (tunnelIps.empty()) {
|
||||
if (!_tcpFallbackResolver.running())
|
||||
@ -1123,7 +1191,7 @@ public:
|
||||
} else {
|
||||
bool connected = false;
|
||||
InetAddress addr(tunnelIps[(unsigned long)now % tunnelIps.size()]);
|
||||
addr.setPort(ZT1_TCP_FALLBACK_RELAY_PORT);
|
||||
addr.setPort(ZT_TCP_FALLBACK_RELAY_PORT);
|
||||
_phy.tcpConnect(reinterpret_cast<const struct sockaddr *>(&addr),connected);
|
||||
}
|
||||
}
|
||||
@ -1131,9 +1199,10 @@ public:
|
||||
|
||||
_lastSendToGlobal = now;
|
||||
}
|
||||
#endif // ZT1_TCP_FALLBACK_RELAY
|
||||
#endif // ZT_TCP_FALLBACK_RELAY
|
||||
|
||||
break;
|
||||
|
||||
case AF_INET6:
|
||||
#ifdef ZT_BREAK_UDP
|
||||
if (!OSUtils::fileExists("/tmp/ZT_BREAK_UDP")) {
|
||||
@ -1144,6 +1213,7 @@ public:
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
@ -1206,7 +1276,7 @@ public:
|
||||
tc->writeBuf.append(data);
|
||||
}
|
||||
|
||||
_phy.tcpSetNotifyWritable(tc->sock,true);
|
||||
_phy.setNotifyWritable(tc->sock,true);
|
||||
}
|
||||
|
||||
inline void onHttpResponseFromClient(TcpConnection *tc)
|
||||
@ -1241,6 +1311,7 @@ private:
|
||||
Phy<OneServiceImpl *> _phy;
|
||||
std::string _overrideRootTopology;
|
||||
Node *_node;
|
||||
InetAddress _v4LocalAddress,_v6LocalAddress;
|
||||
PhySocket *_v4UdpSocket;
|
||||
PhySocket *_v6UdpSocket;
|
||||
PhySocket *_v4TcpListenSocket;
|
||||
@ -1265,24 +1336,26 @@ private:
|
||||
unsigned int _port;
|
||||
|
||||
#ifdef ZT_USE_MINIUPNPC
|
||||
UPNPClient _upnpClient;
|
||||
InetAddress _v4UpnpLocalAddress;
|
||||
PhySocket *_v4UpnpUdpSocket;
|
||||
UPNPClient *_upnpClient;
|
||||
#endif
|
||||
|
||||
bool _run;
|
||||
Mutex _run_m;
|
||||
};
|
||||
|
||||
static int SnodeVirtualNetworkConfigFunction(ZT1_Node *node,void *uptr,uint64_t nwid,enum ZT1_VirtualNetworkConfigOperation op,const ZT1_VirtualNetworkConfig *nwconf)
|
||||
static int SnodeVirtualNetworkConfigFunction(ZT_Node *node,void *uptr,uint64_t nwid,enum ZT_VirtualNetworkConfigOperation op,const ZT_VirtualNetworkConfig *nwconf)
|
||||
{ return reinterpret_cast<OneServiceImpl *>(uptr)->nodeVirtualNetworkConfigFunction(nwid,op,nwconf); }
|
||||
static void SnodeEventCallback(ZT1_Node *node,void *uptr,enum ZT1_Event event,const void *metaData)
|
||||
static void SnodeEventCallback(ZT_Node *node,void *uptr,enum ZT_Event event,const void *metaData)
|
||||
{ reinterpret_cast<OneServiceImpl *>(uptr)->nodeEventCallback(event,metaData); }
|
||||
static long SnodeDataStoreGetFunction(ZT1_Node *node,void *uptr,const char *name,void *buf,unsigned long bufSize,unsigned long readIndex,unsigned long *totalSize)
|
||||
static long SnodeDataStoreGetFunction(ZT_Node *node,void *uptr,const char *name,void *buf,unsigned long bufSize,unsigned long readIndex,unsigned long *totalSize)
|
||||
{ return reinterpret_cast<OneServiceImpl *>(uptr)->nodeDataStoreGetFunction(name,buf,bufSize,readIndex,totalSize); }
|
||||
static int SnodeDataStorePutFunction(ZT1_Node *node,void *uptr,const char *name,const void *data,unsigned long len,int secure)
|
||||
static int SnodeDataStorePutFunction(ZT_Node *node,void *uptr,const char *name,const void *data,unsigned long len,int secure)
|
||||
{ return reinterpret_cast<OneServiceImpl *>(uptr)->nodeDataStorePutFunction(name,data,len,secure); }
|
||||
static int SnodeWirePacketSendFunction(ZT1_Node *node,void *uptr,const struct sockaddr_storage *addr,const void *data,unsigned int len)
|
||||
{ return reinterpret_cast<OneServiceImpl *>(uptr)->nodeWirePacketSendFunction(addr,data,len); }
|
||||
static void SnodeVirtualNetworkFrameFunction(ZT1_Node *node,void *uptr,uint64_t nwid,uint64_t sourceMac,uint64_t destMac,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
|
||||
static int SnodeWirePacketSendFunction(ZT_Node *node,void *uptr,const struct sockaddr_storage *localAddr,const struct sockaddr_storage *addr,const void *data,unsigned int len)
|
||||
{ return reinterpret_cast<OneServiceImpl *>(uptr)->nodeWirePacketSendFunction(localAddr,addr,data,len); }
|
||||
static void SnodeVirtualNetworkFrameFunction(ZT_Node *node,void *uptr,uint64_t nwid,uint64_t sourceMac,uint64_t destMac,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
|
||||
{ reinterpret_cast<OneServiceImpl *>(uptr)->nodeVirtualNetworkFrameFunction(nwid,sourceMac,destMac,etherType,vlanId,data,len); }
|
||||
|
||||
static void StapFrameHandler(void *uptr,uint64_t nwid,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
|
||||
|
@ -89,8 +89,12 @@ public:
|
||||
* Once created, you must call the run() method to actually start
|
||||
* processing.
|
||||
*
|
||||
* The port is saved to a file in the home path called zerotier-one.port,
|
||||
* which is used by the CLI and can be used to see which port was chosen if
|
||||
* 0 (random port) is picked.
|
||||
*
|
||||
* @param hp Home path
|
||||
* @param port TCP and UDP port for packets and HTTP control
|
||||
* @param port TCP and UDP port for packets and HTTP control (if 0, pick random port)
|
||||
* @param overrideRootTopology String-serialized root topology (for testing, default: NULL)
|
||||
*/
|
||||
static OneService *newInstance(
|
||||
|
@ -41,6 +41,6 @@
|
||||
/**
|
||||
* Revision
|
||||
*/
|
||||
#define ZEROTIER_ONE_VERSION_REVISION 5
|
||||
#define ZEROTIER_ONE_VERSION_REVISION 6
|
||||
|
||||
#endif
|
||||
|
@ -90,7 +90,7 @@ restart_node:
|
||||
_service = (ZeroTier::OneService *)0; // in case newInstance() fails
|
||||
_service = ZeroTier::OneService::newInstance(
|
||||
ZeroTier::OneService::platformDefaultHomePath().c_str(),
|
||||
ZT1_DEFAULT_PORT);
|
||||
ZT_DEFAULT_PORT);
|
||||
}
|
||||
switch(_service->run()) {
|
||||
case ZeroTier::OneService::ONE_UNRECOVERABLE_ERROR: {
|
||||
|
Loading…
x
Reference in New Issue
Block a user