Work in progress on Cluster for new root infrastructure, multi-homing.

This commit is contained in:
Adam Ierymenko 2015-10-14 14:12:12 -07:00
parent c312ae221f
commit 619e113748
10 changed files with 794 additions and 16 deletions

View File

@ -6,7 +6,7 @@ ifeq ($(origin CXX),default)
endif
INCLUDES=
DEFS=
DEFS=-DZT_ENABLE_CLUSTER
LIBS=
ARCH_FLAGS=-arch x86_64

398
node/Cluster.cpp Normal file
View File

@ -0,0 +1,398 @@
/*
* ZeroTier One - Network Virtualization Everywhere
* Copyright (C) 2011-2015 ZeroTier, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* --
*
* ZeroTier may be used and distributed under the terms of the GPLv3, which
* are available at: http://www.gnu.org/licenses/gpl-3.0.html
*
* If you would like to embed ZeroTier into a commercial application or
* redistribute it in a modified binary form, please contact ZeroTier Networks
* LLC. Start here: http://www.zerotier.com/
*/
#ifdef ZT_ENABLE_CLUSTER
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <utility>
#include "Cluster.hpp"
#include "RuntimeEnvironment.hpp"
#include "MulticastGroup.hpp"
#include "CertificateOfMembership.hpp"
#include "Salsa20.hpp"
#include "Poly1305.hpp"
#include "Packet.hpp"
#include "Peer.hpp"
#include "Switch.hpp"
#include "Node.hpp"
namespace ZeroTier {
Cluster::Cluster(const RuntimeEnvironment *renv,uint16_t id,DistanceAlgorithm da,int32_t x,int32_t y,int32_t z,void (*sendFunction)(void *,uint16_t,const void *,unsigned int),void *arg) :
RR(renv),
_sendFunction(sendFunction),
_arg(arg),
_x(x),
_y(y),
_z(z),
_da(da),
_id(id)
{
uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
// Generate master secret by hashing the secret from our Identity key pair
RR->identity.sha512PrivateKey(_masterSecret);
// Generate our inbound message key, which is the master secret XORed with our ID and hashed twice
memcpy(stmp,_masterSecret,sizeof(stmp));
stmp[0] ^= Utils::hton(id);
SHA512::hash(stmp,stmp,sizeof(stmp));
SHA512::hash(stmp,stmp,sizeof(stmp));
memcpy(_key,stmp,sizeof(_key));
Utils::burn(stmp,sizeof(stmp));
}
Cluster::~Cluster()
{
Utils::burn(_masterSecret,sizeof(_masterSecret));
Utils::burn(_key,sizeof(_key));
}
void Cluster::handleIncomingStateMessage(const void *msg,unsigned int len)
{
Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> dmsg;
{
// FORMAT: <[16] iv><[8] MAC><... data>
if ((len < 24)||(len > ZT_CLUSTER_MAX_MESSAGE_LENGTH))
return;
// 16-byte IV: first 8 bytes XORed with key, last 8 bytes used as Salsa20 64-bit IV
char keytmp[32];
memcpy(keytmp,_key,32);
for(int i=0;i<8;++i)
keytmp[i] ^= reinterpret_cast<const char *>(msg)[i];
Salsa20 s20(keytmp,256,reinterpret_cast<const char *>(msg) + 8);
Utils::burn(keytmp,sizeof(keytmp));
// One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
char polykey[ZT_POLY1305_KEY_LEN];
memset(polykey,0,sizeof(polykey));
s20.encrypt12(polykey,polykey,sizeof(polykey));
// Compute 16-byte MAC
char mac[ZT_POLY1305_MAC_LEN];
Poly1305::compute(mac,reinterpret_cast<const char *>(msg) + 24,len - 24,polykey);
// Check first 8 bytes of MAC against 64-bit MAC in stream
if (!Utils::secureEq(mac,reinterpret_cast<const char *>(msg) + 16,8))
return;
// Decrypt!
dmsg.setSize(len - 16);
s20.decrypt12(reinterpret_cast<const char *>(msg) + 16,const_cast<void *>(dmsg.data()),dmsg.size());
}
if (dmsg.size() < 2)
return;
const uint16_t fromMemberId = dmsg.at<uint16_t>(0);
unsigned int ptr = 2;
_Member &m = _members[fromMemberId];
Mutex::Lock mlck(m.lock);
m.lastReceivedFrom = RR->node->now();
try {
while (ptr < dmsg.size()) {
const unsigned int mlen = dmsg.at<uint16_t>(ptr); ptr += 2;
const unsigned int nextPtr = ptr + mlen;
int mtype = -1;
try {
switch((StateMessageType)(mtype = (int)dmsg[ptr++])) {
default:
break;
case STATE_MESSAGE_ALIVE: {
ptr += 7; // skip version stuff, not used yet
m.x = dmsg.at<int32_t>(ptr); ptr += 4;
m.y = dmsg.at<int32_t>(ptr); ptr += 4;
m.z = dmsg.at<int32_t>(ptr); ptr += 4;
ptr += 8; // skip local clock, not used
m.load = dmsg.at<uint64_t>(ptr); ptr += 8;
ptr += 8; // skip flags, unused
m.physicalAddressCount = dmsg[ptr++];
if (m.physicalAddressCount > ZT_CLUSTER_MEMBER_MAX_PHYSICAL_ADDRS)
m.physicalAddressCount = ZT_CLUSTER_MEMBER_MAX_PHYSICAL_ADDRS;
for(unsigned int i=0;i<m.physicalAddressCount;++i)
ptr += m.physicalAddresses[i].deserialize(dmsg,ptr);
m.lastReceivedAliveAnnouncement = RR->node->now();
} break;
case STATE_MESSAGE_HAVE_PEER: {
try {
Identity id;
ptr += id.deserialize(dmsg,ptr);
RR->topology->saveIdentity(id);
{ // Add or update peer affinity entry
_PeerAffinity pa(id.address(),fromMemberId,RR->node->now());
Mutex::Lock _l2(_peerAffinities_m);
std::vector<_PeerAffinity>::iterator i(std::lower_bound(_peerAffinities.begin(),_peerAffinities.end(),pa)); // O(log(n))
if ((i != _peerAffinities.end())&&(i->key == pa.key)) {
i->timestamp = pa.timestamp;
} else {
_peerAffinities.push_back(pa);
std::sort(_peerAffinities.begin(),_peerAffinities.end()); // probably a more efficient way to insert but okay for now
}
}
} catch ( ... ) {
// ignore invalid identities
}
} break;
case STATE_MESSAGE_MULTICAST_LIKE: {
const uint64_t nwid = dmsg.at<uint64_t>(ptr); ptr += 8;
const Address address(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH); ptr += ZT_ADDRESS_LENGTH;
const MAC mac(dmsg.field(ptr,6),6); ptr += 6;
const uint32_t adi = dmsg.at<uint32_t>(ptr); ptr += 4;
RR->mc->add(RR->node->now(),nwid,MulticastGroup(mac,adi),address);
} break;
case STATE_MESSAGE_COM: {
// TODO: not used yet
} break;
case STATE_MESSAGE_RELAY: {
const unsigned int numRemotePeerPaths = dmsg[ptr++];
InetAddress remotePeerPaths[256]; // size is 8-bit, so 256 is max
for(unsigned int i=0;i<numRemotePeerPaths;++i)
ptr += remotePeerPaths[i].deserialize(dmsg,ptr);
const unsigned int packetLen = dmsg.at<uint16_t>(ptr); ptr += 2;
const void *packet = (const void *)dmsg.field(ptr,packetLen); ptr += packetLen;
if (packetLen >= ZT_PROTO_MIN_FRAGMENT_LENGTH) { // ignore anything too short to contain a dest address
const Address destinationAddress(reinterpret_cast<const char *>(packet) + 8,ZT_ADDRESS_LENGTH);
SharedPtr<Peer> destinationPeer(RR->topology->getPeer(destinationAddress));
if (destinationPeer) {
RemotePath *destinationPath = destinationPeer->send(RR,packet,packetLen,RR->node->now());
if ((destinationPath)&&(numRemotePeerPaths > 0)&&(packetLen >= 18)&&(reinterpret_cast<const unsigned char *>(packet)[ZT_PACKET_FRAGMENT_IDX_FRAGMENT_INDICATOR] == ZT_PACKET_FRAGMENT_INDICATOR)) {
// If remote peer paths were sent with this relayed packet, we do
// RENDEZVOUS. It's handled here for cluster-relayed packets since
// we don't have both Peer records so this is a different path.
const Address remotePeerAddress(reinterpret_cast<const char *>(packet) + 13,ZT_ADDRESS_LENGTH);
InetAddress bestDestV4,bestDestV6;
destinationPeer->getBestActiveAddresses(RR->node->now(),bestDestV4,bestDestV6);
InetAddress bestRemoteV4,bestRemoteV6;
for(unsigned int i=0;i<numRemotePeerPaths;++i) {
if ((bestRemoteV4)&&(bestRemoteV6))
break;
switch(remotePeerPaths[i].ss_family) {
case AF_INET:
if (!bestRemoteV4)
bestRemoteV4 = remotePeerPaths[i];
break;
case AF_INET6:
if (!bestRemoteV6)
bestRemoteV6 = remotePeerPaths[i];
break;
}
}
Packet rendezvousForDest(destinationAddress,RR->identity.address(),Packet::VERB_RENDEZVOUS);
rendezvousForDest.append((uint8_t)0);
remotePeerAddress.appendTo(rendezvousForDest);
Buffer<2048> rendezvousForOtherEnd;
rendezvousForOtherEnd.addSize(2); // leave room for payload size
rendezvousForOtherEnd.append((uint8_t)STATE_MESSAGE_PROXY_SEND);
remotePeerAddress.appendTo(rendezvousForOtherEnd);
rendezvousForOtherEnd.append((uint8_t)Packet::VERB_RENDEZVOUS);
const unsigned int rendezvousForOtherEndPayloadSizePtr = rendezvousForOtherEnd.size();
rendezvousForOtherEnd.addSize(2); // space for actual packet payload length
rendezvousForOtherEnd.append((uint8_t)0); // flags == 0
destinationAddress.appendTo(rendezvousForOtherEnd);
bool haveMatch = false;
if ((bestDestV6)&&(bestRemoteV6)) {
haveMatch = true;
rendezvousForDest.append((uint16_t)bestRemoteV6.port());
rendezvousForDest.append((uint8_t)16);
rendezvousForDest.append(bestRemoteV6.rawIpData(),16);
rendezvousForOtherEnd.append((uint16_t)bestDestV6.port());
rendezvousForOtherEnd.append((uint8_t)16);
rendezvousForOtherEnd.append(bestDestV6.rawIpData(),16);
rendezvousForOtherEnd.setAt<uint16_t>(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 16));
} else if ((bestDestV4)&&(bestRemoteV4)) {
haveMatch = true;
rendezvousForDest.append((uint16_t)bestRemoteV4.port());
rendezvousForDest.append((uint8_t)4);
rendezvousForDest.append(bestRemoteV4.rawIpData(),4);
rendezvousForOtherEnd.append((uint16_t)bestDestV4.port());
rendezvousForOtherEnd.append((uint8_t)4);
rendezvousForOtherEnd.append(bestDestV4.rawIpData(),4);
rendezvousForOtherEnd.setAt<uint16_t>(rendezvousForOtherEndPayloadSizePtr,(uint16_t)(9 + 4));
}
if (haveMatch) {
RR->sw->send(rendezvousForDest,true,0);
rendezvousForOtherEnd.setAt<uint16_t>(0,(uint16_t)(rendezvousForOtherEnd.size() - 2));
_send(fromMemberId,rendezvousForOtherEnd.data(),rendezvousForOtherEnd.size());
}
}
}
}
} break;
case STATE_MESSAGE_PROXY_SEND: {
const Address rcpt(dmsg.field(ptr,ZT_ADDRESS_LENGTH),ZT_ADDRESS_LENGTH);
const Packet::Verb verb = (Packet::Verb)dmsg[ptr++];
const unsigned int len = dmsg.at<uint16_t>(ptr); ptr += 2;
Packet outp(rcpt,RR->identity.address(),verb);
outp.append(dmsg.field(ptr,len),len);
RR->sw->send(outp,true,0);
} break;
}
} catch ( ... ) {
TRACE("invalid message of size %u type %d (inner decode), discarding",mlen,mtype);
// drop invalids
}
ptr = nextPtr;
}
} catch ( ... ) {
TRACE("invalid message (outer loop), discarding");
// drop invalids
}
}
void Cluster::replicateHavePeer(const Address &peerAddress)
{
}
void Cluster::replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group)
{
}
void Cluster::replicateCertificateOfNetworkMembership(const CertificateOfMembership &com)
{
}
void Cluster::doPeriodicTasks()
{
// Go ahead and flush whenever possible right now
{
Mutex::Lock _l(_memberIds_m);
for(std::vector<uint16_t>::const_iterator mid(_memberIds.begin());mid!=_memberIds.end();++mid) {
Mutex::Lock _l2(_members[*mid].lock);
_flush(*mid);
}
}
}
void Cluster::addMember(uint16_t memberId)
{
Mutex::Lock _l2(_members[memberId].lock);
Mutex::Lock _l(_memberIds_m);
_memberIds.push_back(memberId);
std::sort(_memberIds.begin(),_memberIds.end());
// Generate this member's message key from the master and its ID
uint16_t stmp[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
memcpy(stmp,_masterSecret,sizeof(stmp));
stmp[0] ^= Utils::hton(memberId);
SHA512::hash(stmp,stmp,sizeof(stmp));
SHA512::hash(stmp,stmp,sizeof(stmp));
memcpy(_members[memberId].key,stmp,sizeof(_members[memberId].key));
Utils::burn(stmp,sizeof(stmp));
// Prepare q
_members[memberId].q.clear();
char iv[16];
Utils::getSecureRandom(iv,16);
_members[memberId].q.append(iv,16);
_members[memberId].q.addSize(8); // room for MAC
}
void Cluster::_send(uint16_t memberId,const void *msg,unsigned int len)
{
_Member &m = _members[memberId];
// assumes m.lock is locked!
for(;;) {
if ((m.q.size() + len) > ZT_CLUSTER_MAX_MESSAGE_LENGTH)
_flush(memberId);
else {
m.q.append(msg,len);
break;
}
}
}
void Cluster::_flush(uint16_t memberId)
{
_Member &m = _members[memberId];
// assumes m.lock is locked!
if (m.q.size() > 24) {
// Create key from member's key and IV
char keytmp[32];
memcpy(keytmp,m.key,32);
for(int i=0;i<8;++i)
keytmp[i] ^= m.q[i];
Salsa20 s20(keytmp,256,m.q.field(8,8));
Utils::burn(keytmp,sizeof(keytmp));
// One-time-use Poly1305 key from first 32 bytes of Salsa20 keystream (as per DJB/NaCl "standard")
char polykey[ZT_POLY1305_KEY_LEN];
memset(polykey,0,sizeof(polykey));
s20.encrypt12(polykey,polykey,sizeof(polykey));
// Encrypt m.q in place
s20.encrypt12(reinterpret_cast<const char *>(m.q.data()) + 24,const_cast<char *>(reinterpret_cast<const char *>(m.q.data())) + 24,m.q.size() - 24);
// Add MAC for authentication (encrypt-then-MAC)
char mac[ZT_POLY1305_MAC_LEN];
Poly1305::compute(mac,reinterpret_cast<const char *>(m.q.data()) + 24,m.q.size() - 24,polykey);
memcpy(m.q.field(16,8),mac,8);
// Send!
_sendFunction(_arg,memberId,m.q.data(),m.q.size());
// Prepare for more
m.q.clear();
char iv[16];
Utils::getSecureRandom(iv,16);
m.q.append(iv,16);
m.q.addSize(8); // room for MAC
}
}
} // namespace ZeroTier
#endif // ZT_ENABLE_CLUSTER

331
node/Cluster.hpp Normal file
View File

@ -0,0 +1,331 @@
/*
* ZeroTier One - Network Virtualization Everywhere
* Copyright (C) 2011-2015 ZeroTier, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* --
*
* ZeroTier may be used and distributed under the terms of the GPLv3, which
* are available at: http://www.gnu.org/licenses/gpl-3.0.html
*
* If you would like to embed ZeroTier into a commercial application or
* redistribute it in a modified binary form, please contact ZeroTier Networks
* LLC. Start here: http://www.zerotier.com/
*/
#ifndef ZT_CLUSTER_HPP
#define ZT_CLUSTER_HPP
#ifdef ZT_ENABLE_CLUSTER
#include <vector>
#include <algorithm>
#include "Constants.hpp"
#include "Address.hpp"
#include "InetAddress.hpp"
#include "SHA512.hpp"
#include "Utils.hpp"
#include "Buffer.hpp"
#include "Mutex.hpp"
/**
* Timeout for cluster members being considered "alive"
*/
#define ZT_CLUSTER_TIMEOUT ZT_PEER_ACTIVITY_TIMEOUT
/**
* Maximum cluster message length in bytes
*
* Cluster nodes speak via TCP, with data encapsulated into individually
* encrypted and authenticated messages. The maximum message size is
* 65535 (0xffff) since the TCP stream uses 16-bit message size headers
* (and this is a reasonable chunk size anyway).
*/
#define ZT_CLUSTER_MAX_MESSAGE_LENGTH 65535
/**
* Maximum number of physical addresses we will cache for a cluster member
*/
#define ZT_CLUSTER_MEMBER_MAX_PHYSICAL_ADDRS 8
namespace ZeroTier {
class RuntimeEnvironment;
class CertificateOfMembership;
class MulticastGroup;
/**
* Multi-homing cluster state replication and packet relaying
*
* Multi-homing means more than one node sharing the same ZeroTier identity.
* There is nothing in the protocol to prevent this, but to make it work well
* requires the devices sharing an identity to cooperate and share some
* information.
*
* There are three use cases we want to fulfill:
*
* (1) Multi-homing of root servers with handoff for efficient routing,
* HA, and load balancing across many commodity nodes.
* (2) Multi-homing of network controllers for the same reason.
* (3) Multi-homing of nodes on virtual networks, such as domain servers
* and other important endpoints.
*
* These use cases are in order of escalating difficulty. The initial
* version of Cluster is aimed at satisfying the first, though you are
* free to try #2 and #3.
*/
class Cluster
{
public:
/**
* Which distance algorithm is this cluster using?
*/
enum DistanceAlgorithm
{
/**
* Simple linear distance in three dimensions
*/
DISTANCE_SIMPLE = 0,
/**
* Haversine formula using X,Y as lat,long and ignoring Z
*/
DISTANCE_HAVERSINE = 1
};
/**
* State message types
*/
enum StateMessageType
{
STATE_MESSAGE_NOP = 0,
/**
* This cluster member is alive:
* <[2] version minor>
* <[2] version major>
* <[2] version revision>
* <[1] protocol version>
* <[4] X location (signed 32-bit)>
* <[4] Y location (signed 32-bit)>
* <[4] Z location (signed 32-bit)>
* <[8] local clock at this member>
* <[8] load average>
* <[8] flags (currently unused, must be zero)>
* <[1] number of preferred ZeroTier endpoints>
* <[...] InetAddress(es) of preferred ZeroTier endpoint(s)>
*/
STATE_MESSAGE_ALIVE = 1,
/**
* Cluster member has this peer:
* <[...] binary serialized peer identity>
*/
STATE_MESSAGE_HAVE_PEER = 2,
/**
* Peer subscription to multicast group:
* <[8] network ID>
* <[5] peer ZeroTier address>
* <[6] MAC address of multicast group>
* <[4] 32-bit multicast group ADI>
*/
STATE_MESSAGE_MULTICAST_LIKE = 3,
/**
* Certificate of network membership for a peer:
* <[...] serialized COM>
*/
STATE_MESSAGE_COM = 4,
/**
* Relay a packet to a peer:
* <[1] 8-bit number of sending peer active path addresses>
* <[...] series of serialized InetAddresses of sending peer's paths>
* <[2] 16-bit packet length>
* <[...] packet or packet fragment>
*/
STATE_MESSAGE_RELAY = 5,
/**
* Request to send a packet to a locally-known peer:
* <[5] ZeroTier address of recipient>
* <[1] packet verb>
* <[2] length of packet payload>
* <[...] packet payload>
*
* This differs from RELAY in that it requests the receiving cluster
* member to actually compose a ZeroTier Packet from itself to the
* provided recipient. RELAY simply says "please forward this blob."
* RELAY is used to implement peer-to-peer relaying with RENDEZVOUS,
* while PROXY_SEND is used to implement proxy sending (which right
* now is only used to send RENDEZVOUS).
*/
STATE_MESSAGE_PROXY_SEND = 6
};
/**
* Construct a new cluster
*
* @param renv Runtime environment
* @param id This member's ID in the cluster
* @param da Distance algorithm this cluster uses to compute distance and hand off peers
* @param x My X
* @param y My Y
* @param z My Z
* @param sendFunction Function to call to send messages to other cluster members
* @param arg First argument to sendFunction
*/
Cluster(
const RuntimeEnvironment *renv,
uint16_t id,
DistanceAlgorithm da,
int32_t x,
int32_t y,
int32_t z,
void (*sendFunction)(void *,uint16_t,const void *,unsigned int),
void *arg);
~Cluster();
/**
* @return This cluster member's ID
*/
inline uint16_t id() const throw() { return _id; }
/**
* Handle an incoming intra-cluster message
*
* @param data Message data
* @param len Message length (max: ZT_CLUSTER_MAX_MESSAGE_LENGTH)
*/
void handleIncomingStateMessage(const void *msg,unsigned int len);
/**
* Advertise to the cluster that we have this peer
*
* @param peerAddress Peer address that we have
*/
void replicateHavePeer(const Address &peerAddress);
/**
* Advertise a multicast LIKE to the cluster
*
* @param nwid Network ID
* @param peerAddress Peer address that sent LIKE
* @param group Multicast group
*/
void replicateMulticastLike(uint64_t nwid,const Address &peerAddress,const MulticastGroup &group);
/**
* Advertise a network COM to the cluster
*
* @param com Certificate of network membership (contains peer and network ID)
*/
void replicateCertificateOfNetworkMembership(const CertificateOfMembership &com);
/**
* This should be called no less frequently than once every 10 seconds.
*/
void doPeriodicTasks();
/**
* Add a member ID to this cluster
*
* @param memberId Member ID
*/
void addMember(uint16_t memberId);
private:
void _send(uint16_t memberId,const void *msg,unsigned int len);
void _flush(uint16_t memberId);
// These are initialized in the constructor and remain static
uint16_t _masterSecret[ZT_SHA512_DIGEST_LEN / sizeof(uint16_t)];
unsigned char _key[ZT_PEER_SECRET_KEY_LENGTH];
const RuntimeEnvironment *RR;
void (*_sendFunction)(void *,uint16_t,const void *,unsigned int);
void *_arg;
const int32_t _x;
const int32_t _y;
const int32_t _z;
const DistanceAlgorithm _da;
const uint16_t _id;
struct _Member
{
unsigned char key[ZT_PEER_SECRET_KEY_LENGTH];
uint64_t lastReceivedFrom;
uint64_t lastReceivedAliveAnnouncement;
uint64_t lastSentTo;
uint64_t lastAnnouncedAliveTo;
uint64_t load;
int32_t x,y,z;
InetAddress physicalAddresses[ZT_CLUSTER_MEMBER_MAX_PHYSICAL_ADDRS];
unsigned int physicalAddressCount;
Buffer<ZT_CLUSTER_MAX_MESSAGE_LENGTH> q;
Mutex lock;
_Member() :
lastReceivedFrom(0),
lastReceivedAliveAnnouncement(0),
lastSentTo(0),
lastAnnouncedAliveTo(0),
load(0),
x(0),
y(0),
z(0),
physicalAddressCount(0) {}
~_Member() { Utils::burn(key,sizeof(key)); }
};
_Member _members[65536]; // cluster IDs can be from 0 to 65535 (16-bit)
std::vector<uint16_t> _memberIds;
Mutex _memberIds_m;
// Record tracking which members have which peers and how recently they claimed this
struct _PeerAffinity
{
_PeerAffinity(const Address &a,uint16_t mid,uint64_t ts) :
key((a.toInt() << 16) | (uint64_t)mid),
timestamp(ts) {}
uint64_t key;
uint64_t timestamp;
inline Address address() const throw() { return Address(key >> 16); }
inline uint16_t clusterMemberId() const throw() { return (uint16_t)(key & 0xffff); }
inline bool operator<(const _PeerAffinity &pi) const throw() { return (key < pi.key); }
};
// A memory-efficient packed map of _PeerAffinity records searchable with std::binary_search() and std::lower_bound()
std::vector<_PeerAffinity> _peerAffinities;
Mutex _peerAffinities_m;
};
} // namespace ZeroTier
#endif // ZT_ENABLE_CLUSTER
#endif

View File

@ -182,7 +182,7 @@
#define ZT_FRAGMENTED_PACKET_RECEIVE_TIMEOUT 1000
/**
* Length of secret key in bytes -- 256-bit for Salsa20
* Length of secret key in bytes -- 256-bit -- do not change
*/
#define ZT_PEER_SECRET_KEY_LENGTH 32

View File

@ -38,6 +38,7 @@
#include "Address.hpp"
#include "C25519.hpp"
#include "Buffer.hpp"
#include "SHA512.hpp"
namespace ZeroTier {
@ -91,8 +92,7 @@ public:
}
template<unsigned int C>
Identity(const Buffer<C> &b,unsigned int startAt = 0)
throw(std::out_of_range,std::invalid_argument) :
Identity(const Buffer<C> &b,unsigned int startAt = 0) :
_privateKey((C25519::Private *)0)
{
deserialize(b,startAt);
@ -137,6 +137,21 @@ public:
*/
inline bool hasPrivate() const throw() { return (_privateKey != (C25519::Private *)0); }
/**
* Compute the SHA512 hash of our private key (if we have one)
*
* @param sha Buffer to receive SHA512 (MUST be ZT_SHA512_DIGEST_LEN (64) bytes in length)
* @return True on success, false if no private key
*/
inline bool sha512PrivateKey(void *sha) const
{
if (_privateKey) {
SHA512::hash(sha,_privateKey->data,ZT_C25519_PRIVATE_KEY_LEN);
return true;
}
return false;
}
/**
* Sign a message with this identity (private key required)
*

View File

@ -623,6 +623,10 @@ public:
* may also ignore these messages if a peer is not known or is not being
* actively communicated with.
*
* Unfortunately the physical address format in this message pre-dates
* InetAddress's serialization format. :( ZeroTier is four years old and
* yes we've accumulated a tiny bit of cruft here and there.
*
* No OK or ERROR is generated.
*/
VERB_RENDEZVOUS = 5,

View File

@ -560,7 +560,8 @@ private:
void _sortPaths(const uint64_t now);
RemotePath *_getBestPath(const uint64_t now);
unsigned char _key[ZT_PEER_SECRET_KEY_LENGTH];
unsigned char _key[ZT_PEER_SECRET_KEY_LENGTH]; // computed with key agreement, not serialized
uint64_t _lastUsed;
uint64_t _lastReceive; // direct or indirect
uint64_t _lastUnicastFrame;

View File

@ -136,7 +136,7 @@ SharedPtr<Peer> Topology::addPeer(const SharedPtr<Peer> &peer)
SharedPtr<Peer> &p = _peers.set(peer->address(),peer);
p->use(now);
_saveIdentity(p->identity());
saveIdentity(p->identity());
return p;
}
@ -172,6 +172,26 @@ SharedPtr<Peer> Topology::getPeer(const Address &zta)
return SharedPtr<Peer>();
}
Identity Topology::getIdentity(const Address &zta)
{
{
Mutex::Lock _l(_lock);
SharedPtr<Peer> &ap = _peers[zta];
if (ap)
return ap->identity();
}
return _getIdentity(zta);
}
void saveIdentity(const Identity &id)
{
if (id) {
char p[128];
Utils::snprintf(p,sizeof(p),"iddb.d/%.10llx",(unsigned long long)id.address().toInt());
RR->node->dataStorePut(p,id.toString(false),false);
}
}
SharedPtr<Peer> Topology::getBestRoot(const Address *avoid,unsigned int avoidCount,bool strictAvoid)
{
SharedPtr<Peer> bestRoot;
@ -315,15 +335,6 @@ Identity Topology::_getIdentity(const Address &zta)
return Identity();
}
void Topology::_saveIdentity(const Identity &id)
{
if (id) {
char p[128];
Utils::snprintf(p,sizeof(p),"iddb.d/%.10llx",(unsigned long long)id.address().toInt());
RR->node->dataStorePut(p,id.toString(false),false);
}
}
void Topology::_setWorld(const World &newWorld)
{
// assumed _lock is locked (or in constructor)

View File

@ -78,6 +78,24 @@ public:
*/
SharedPtr<Peer> getPeer(const Address &zta);
/**
* Get the identity of a peer
*
* @param zta ZeroTier address of peer
* @return Identity or NULL Identity if not found
*/
Identity getIdentity(const Address &zta);
/**
* Cache an identity
*
* This is done automatically on addPeer(), and so is only useful for
* cluster identity replication.
*
* @param id Identity to cache
*/
void saveIdentity(const Identity &id);
/**
* @return Vector of peers that are root servers
*/
@ -210,7 +228,6 @@ public:
private:
Identity _getIdentity(const Address &zta);
void _saveIdentity(const Identity &id);
void _setWorld(const World &newWorld);
const RuntimeEnvironment *RR;

View File

@ -4,6 +4,7 @@ OBJS=\
ext/http-parser/http_parser.o \
node/C25519.o \
node/CertificateOfMembership.o \
node/Cluster.o \
node/Dictionary.o \
node/Identity.o \
node/IncomingPacket.o \