This commit is contained in:
Grant Limberg 2018-11-13 16:00:17 -08:00
commit 01e6df4d46
6 changed files with 113 additions and 102 deletions

View File

@ -1354,12 +1354,14 @@ int main(int argc,char **argv)
#ifdef __UNIX_LIKE__ #ifdef __UNIX_LIKE__
signal(SIGHUP,&_sighandlerHup); signal(SIGHUP,&_sighandlerHup);
signal(SIGPIPE,SIG_IGN); signal(SIGPIPE,SIG_IGN);
signal(SIGIO,SIG_IGN);
signal(SIGUSR1,SIG_IGN); signal(SIGUSR1,SIG_IGN);
signal(SIGUSR2,SIG_IGN); signal(SIGUSR2,SIG_IGN);
signal(SIGALRM,SIG_IGN); signal(SIGALRM,SIG_IGN);
signal(SIGINT,&_sighandlerQuit); signal(SIGINT,&_sighandlerQuit);
signal(SIGTERM,&_sighandlerQuit); signal(SIGTERM,&_sighandlerQuit);
signal(SIGQUIT,&_sighandlerQuit); signal(SIGQUIT,&_sighandlerQuit);
signal(SIGINT,&_sighandlerQuit);
/* Ensure that there are no inherited file descriptors open from a previous /* Ensure that there are no inherited file descriptors open from a previous
* incarnation. This is a hack to ensure that GitHub issue #61 or variants * incarnation. This is a hack to ensure that GitHub issue #61 or variants

View File

@ -32,6 +32,8 @@
#include <condition_variable> #include <condition_variable>
#include <chrono> #include <chrono>
#include "Thread.hpp"
namespace ZeroTier { namespace ZeroTier {
/** /**
@ -52,11 +54,27 @@ public:
c.notify_one(); c.notify_one();
} }
inline void postLimit(T t,const unsigned long limit)
{
std::unique_lock<std::mutex> lock(m);
for(;;) {
if (q.size() < limit) {
q.push(t);
c.notify_one();
break;
}
if (!r)
break;
gc.wait(lock);
}
}
inline void stop(void) inline void stop(void)
{ {
std::lock_guard<std::mutex> lock(m); std::lock_guard<std::mutex> lock(m);
r = false; r = false;
c.notify_all(); c.notify_all();
gc.notify_all();
} }
inline bool get(T &value) inline bool get(T &value)
@ -65,10 +83,14 @@ public:
if (!r) return false; if (!r) return false;
while (q.empty()) { while (q.empty()) {
c.wait(lock); c.wait(lock);
if (!r) return false; if (!r) {
gc.notify_all();
return false;
}
} }
value = q.front(); value = q.front();
q.pop(); q.pop();
gc.notify_all();
return true; return true;
} }
@ -98,8 +120,8 @@ public:
private: private:
volatile bool r; volatile bool r;
std::queue<T> q; std::queue<T> q;
std::mutex m; mutable std::mutex m;
std::condition_variable c; mutable std::condition_variable c,gc;
}; };
} // namespace ZeroTier } // namespace ZeroTier

View File

@ -147,7 +147,7 @@ MacEthernetTap::MacEthernetTap(
_agentStdin2 = agentStdin[0]; _agentStdin2 = agentStdin[0];
_agentStdout2 = agentStdout[1]; _agentStdout2 = agentStdout[1];
_agentStderr2 = agentStderr[1]; _agentStderr2 = agentStderr[1];
long apid = (long)vfork(); long apid = (long)fork();
if (apid < 0) { if (apid < 0) {
throw std::runtime_error("fork failed"); throw std::runtime_error("fork failed");
} else if (apid == 0) { } else if (apid == 0) {
@ -155,10 +155,13 @@ MacEthernetTap::MacEthernetTap(
::dup2(agentStdout[1],STDOUT_FILENO); ::dup2(agentStdout[1],STDOUT_FILENO);
::dup2(agentStderr[1],STDERR_FILENO); ::dup2(agentStderr[1],STDERR_FILENO);
::close(agentStdin[0]); ::close(agentStdin[0]);
::close(agentStdin[1]);
::close(agentStdout[0]);
::close(agentStdout[1]); ::close(agentStdout[1]);
::close(agentStderr[0]);
::close(agentStderr[1]); ::close(agentStderr[1]);
::execl(agentPath.c_str(),agentPath.c_str(),devnostr,ethaddr,mtustr,metricstr,(char *)0); ::execl(agentPath.c_str(),agentPath.c_str(),devnostr,ethaddr,mtustr,metricstr,(char *)0);
::exit(-1); ::_exit(-1);
} else { } else {
_agentPid = apid; _agentPid = apid;
} }
@ -284,7 +287,9 @@ void MacEthernetTap::put(const MAC &from,const MAC &to,unsigned int etherType,co
iov[1].iov_len = 15; iov[1].iov_len = 15;
iov[2].iov_base = const_cast<void *>(data); iov[2].iov_base = const_cast<void *>(data);
iov[2].iov_len = len; iov[2].iov_len = len;
_putLock.lock();
writev(_agentStdin,iov,3); writev(_agentStdin,iov,3);
_putLock.unlock();
} }
} }
@ -356,8 +361,8 @@ void MacEthernetTap::threadMain()
const int nfds = std::max(std::max(_shutdownSignalPipe[0],_agentStdout),_agentStderr) + 1; const int nfds = std::max(std::max(_shutdownSignalPipe[0],_agentStdout),_agentStderr) + 1;
long agentReadPtr = 0; long agentReadPtr = 0;
fcntl(_agentStdout,F_SETFL,O_NONBLOCK); fcntl(_agentStdout,F_SETFL,fcntl(_agentStdout,F_GETFL)|O_NONBLOCK);
fcntl(_agentStderr,F_SETFL,O_NONBLOCK); fcntl(_agentStderr,F_SETFL,fcntl(_agentStderr,F_GETFL)|O_NONBLOCK);
FD_ZERO(&readfds); FD_ZERO(&readfds);
FD_ZERO(&nullfds); FD_ZERO(&nullfds);
@ -393,8 +398,6 @@ void MacEthernetTap::threadMain()
break; break;
} }
} }
} else {
break;
} }
} }
if (FD_ISSET(_agentStderr,&readfds)) { if (FD_ISSET(_agentStderr,&readfds)) {

View File

@ -38,6 +38,7 @@
#include "../node/MAC.hpp" #include "../node/MAC.hpp"
#include "../node/InetAddress.hpp" #include "../node/InetAddress.hpp"
#include "../node/MulticastGroup.hpp" #include "../node/MulticastGroup.hpp"
#include "../node/Mutex.hpp"
#include "Thread.hpp" #include "Thread.hpp"
@ -80,6 +81,7 @@ private:
std::string _homePath; std::string _homePath;
std::string _dev; std::string _dev;
std::vector<MulticastGroup> _multicastGroups; std::vector<MulticastGroup> _multicastGroups;
Mutex _putLock;
unsigned int _mtu; unsigned int _mtu;
unsigned int _metric; unsigned int _metric;
int _shutdownSignalPipe[2]; int _shutdownSignalPipe[2];

View File

@ -175,7 +175,7 @@ static int run(const char *path,...)
} else if (pid == 0) { } else if (pid == 0) {
dup2(STDERR_FILENO,STDOUT_FILENO); dup2(STDERR_FILENO,STDOUT_FILENO);
execv(args[0],args); execv(args[0],args);
exit(-1); _exit(-1);
} }
int rv = 0; int rv = 0;
waitpid(pid,&rv,0); waitpid(pid,&rv,0);
@ -322,10 +322,6 @@ int main(int argc,char **argv)
return ZT_MACETHERNETTAPAGENT_EXIT_CODE_UNABLE_TO_CREATE; return ZT_MACETHERNETTAPAGENT_EXIT_CODE_UNABLE_TO_CREATE;
} }
fcntl(STDIN_FILENO,F_SETFL,fcntl(STDIN_FILENO,F_GETFL)|O_NONBLOCK);
fcntl(s_ndrvfd,F_SETFL,fcntl(s_ndrvfd,F_GETFL)|O_NONBLOCK);
fcntl(s_bpffd,F_SETFL,fcntl(s_bpffd,F_GETFL)|O_NONBLOCK);
fprintf(stderr,"I %s %s %d.%d.%d.%d\n",s_deviceName,s_peerDeviceName,ZEROTIER_ONE_VERSION_MAJOR,ZEROTIER_ONE_VERSION_MINOR,ZEROTIER_ONE_VERSION_REVISION,ZEROTIER_ONE_VERSION_BUILD); fprintf(stderr,"I %s %s %d.%d.%d.%d\n",s_deviceName,s_peerDeviceName,ZEROTIER_ONE_VERSION_MAJOR,ZEROTIER_ONE_VERSION_MINOR,ZEROTIER_ONE_VERSION_REVISION,ZEROTIER_ONE_VERSION_BUILD);
FD_ZERO(&rfds); FD_ZERO(&rfds);
@ -358,8 +354,6 @@ int main(int argc,char **argv)
} }
p += BPF_WORDALIGN(h->bh_hdrlen + h->bh_caplen); p += BPF_WORDALIGN(h->bh_hdrlen + h->bh_caplen);
} }
} else {
return ZT_MACETHERNETTAPAGENT_EXIT_CODE_READ_ERROR;
} }
} }
@ -381,6 +375,7 @@ int main(int argc,char **argv)
} }
} }
break; break;
case ZT_MACETHERNETTAPAGENT_STDIN_CMD_IFCONFIG: { case ZT_MACETHERNETTAPAGENT_STDIN_CMD_IFCONFIG: {
char *args[16]; char *args[16];
args[0] = P_IFCONFIG; args[0] = P_IFCONFIG;
@ -404,18 +399,19 @@ int main(int argc,char **argv)
} }
args[argNo] = (char *)0; args[argNo] = (char *)0;
if (argNo > 2) { if (argNo > 2) {
pid_t pid = vfork(); pid_t pid = fork();
if (pid < 0) { if (pid < 0) {
return -1; return -1;
} else if (pid == 0) { } else if (pid == 0) {
dup2(STDERR_FILENO,STDOUT_FILENO); dup2(STDERR_FILENO,STDOUT_FILENO);
execv(args[0],args); execv(args[0],args);
exit(-1); _exit(-1);
} }
int rv = 0; int rv = 0;
waitpid(pid,&rv,0); waitpid(pid,&rv,0);
} }
} break; } break;
case ZT_MACETHERNETTAPAGENT_STDIN_CMD_EXIT: case ZT_MACETHERNETTAPAGENT_STDIN_CMD_EXIT:
return ZT_MACETHERNETTAPAGENT_EXIT_CODE_SUCCESS; return ZT_MACETHERNETTAPAGENT_EXIT_CODE_SUCCESS;
} }
@ -430,8 +426,6 @@ int main(int argc,char **argv)
break; break;
} }
} }
} else {
return ZT_MACETHERNETTAPAGENT_EXIT_CODE_READ_ERROR;
} }
} }
} }

View File

@ -60,6 +60,7 @@
#include "../osdep/PortMapper.hpp" #include "../osdep/PortMapper.hpp"
#include "../osdep/Binder.hpp" #include "../osdep/Binder.hpp"
#include "../osdep/ManagedRoute.hpp" #include "../osdep/ManagedRoute.hpp"
#include "../osdep/BlockingQueue.hpp"
#include "OneService.hpp" #include "OneService.hpp"
#include "SoftwareUpdater.hpp" #include "SoftwareUpdater.hpp"
@ -174,9 +175,6 @@ namespace ZeroTier { typedef BSDEthernetTap EthernetTap; }
// TCP activity timeout // TCP activity timeout
#define ZT_TCP_ACTIVITY_TIMEOUT 60000 #define ZT_TCP_ACTIVITY_TIMEOUT 60000
// Number of receive path threads to start
#define ZT_INCOMING_PACKET_THREAD_POOL_SIZE 8
#if ZT_VAULT_SUPPORT #if ZT_VAULT_SUPPORT
size_t curlResponseWrite(void *ptr, size_t size, size_t nmemb, std::string *data) size_t curlResponseWrite(void *ptr, size_t size, size_t nmemb, std::string *data)
{ {
@ -440,6 +438,15 @@ struct TcpConnection
Mutex writeq_m; Mutex writeq_m;
}; };
struct OneServiceIncomingPacket
{
uint64_t now;
int64_t sock;
struct sockaddr_storage from;
unsigned int size;
uint8_t data[ZT_MAX_MTU];
};
class OneServiceImpl : public OneService class OneServiceImpl : public OneService
{ {
public: public:
@ -465,17 +472,11 @@ public:
unsigned int _tertiaryPort; unsigned int _tertiaryPort;
volatile unsigned int _udpPortPickerCounter; volatile unsigned int _udpPortPickerCounter;
#ifdef ZT_INCOMING_PACKET_THREAD_POOL_SIZE unsigned long _incomingPacketConcurrency;
struct { std::vector<OneServiceIncomingPacket *> _incomingPacketMemoryPool;
uint8_t data[2048]; BlockingQueue<OneServiceIncomingPacket *> _incomingPacketQueue;
std::thread thr; std::vector<std::thread> _incomingPacketThreads;
int64_t sock; Mutex _incomingPacketMemoryPoolLock,_incomingPacketThreadsLock;
struct sockaddr_storage from;
int size;
std::condition_variable cond;
std::mutex lock;
} _incomingPacketWorker[ZT_INCOMING_PACKET_THREAD_POOL_SIZE];
#endif
// Local configuration and memo-ized information from it // Local configuration and memo-ized information from it
json _localConfig; json _localConfig;
@ -606,23 +607,21 @@ public:
_ports[1] = 0; _ports[1] = 0;
_ports[2] = 0; _ports[2] = 0;
#ifdef ZT_INCOMING_PACKET_THREAD_POOL_SIZE _incomingPacketConcurrency = std::max((unsigned long)1,std::min((unsigned long)16,(unsigned long)std::thread::hardware_concurrency()));
for(unsigned int tn=0;tn<ZT_INCOMING_PACKET_THREAD_POOL_SIZE;++tn) { for(long t=0;t<_incomingPacketConcurrency;++t) {
_incomingPacketWorker[tn].thr = std::thread([this,tn]() { _incomingPacketThreads.push_back(std::thread([this]() {
std::unique_lock<std::mutex> l(_incomingPacketWorker[tn].lock); OneServiceIncomingPacket *pkt = nullptr;
for(;;) { for(;;) {
_incomingPacketWorker[tn].cond.wait(l); if (!_incomingPacketQueue.get(pkt))
if (_incomingPacketWorker[tn].size < 0) {
break; break;
} else if (_incomingPacketWorker[tn].size > 0) { if (!pkt)
const ZT_ResultCode rc = _node->processWirePacket( break;
(void *)0,
OSUtils::now(), const ZT_ResultCode rc = _node->processWirePacket(nullptr,pkt->now,pkt->sock,&(pkt->from),pkt->data,pkt->size,&_nextBackgroundTaskDeadline);
_incomingPacketWorker[tn].sock, {
&(_incomingPacketWorker[tn].from), Mutex::Lock l(_incomingPacketMemoryPoolLock);
_incomingPacketWorker[tn].data, _incomingPacketMemoryPool.push_back(pkt);
(unsigned int)_incomingPacketWorker[tn].size, }
&_nextBackgroundTaskDeadline);
if (ZT_ResultCode_isFatal(rc)) { if (ZT_ResultCode_isFatal(rc)) {
char tmp[256]; char tmp[256];
OSUtils::ztsnprintf(tmp,sizeof(tmp),"fatal error code from processWirePacket: %d",(int)rc); OSUtils::ztsnprintf(tmp,sizeof(tmp),"fatal error code from processWirePacket: %d",(int)rc);
@ -633,10 +632,8 @@ public:
break; break;
} }
} }
}));
} }
});
}
#endif
#if ZT_VAULT_SUPPORT #if ZT_VAULT_SUPPORT
curl_global_init(CURL_GLOBAL_DEFAULT); curl_global_init(CURL_GLOBAL_DEFAULT);
@ -645,24 +642,27 @@ public:
virtual ~OneServiceImpl() virtual ~OneServiceImpl()
{ {
#ifdef ZT_INCOMING_PACKET_THREAD_POOL_SIZE _incomingPacketQueue.stop();
for(unsigned int tn=0;tn<ZT_INCOMING_PACKET_THREAD_POOL_SIZE;++tn) { _incomingPacketThreadsLock.lock();
_incomingPacketWorker[tn].lock.lock(); for(auto t=_incomingPacketThreads.begin();t!=_incomingPacketThreads.end();++t)
_incomingPacketWorker[tn].size = -1; t->join();
_incomingPacketWorker[tn].lock.unlock(); _incomingPacketThreadsLock.unlock();
_incomingPacketWorker[tn].cond.notify_all();
}
for(unsigned int tn=0;tn<ZT_INCOMING_PACKET_THREAD_POOL_SIZE;++tn) {
_incomingPacketWorker[tn].thr.join();
}
#endif
_binder.closeAll(_phy); _binder.closeAll(_phy);
_phy.close(_localControlSocket4); _phy.close(_localControlSocket4);
_phy.close(_localControlSocket6); _phy.close(_localControlSocket6);
#if ZT_VAULT_SUPPORT #if ZT_VAULT_SUPPORT
curl_global_cleanup(); curl_global_cleanup();
#endif #endif
_incomingPacketMemoryPoolLock.lock();
while (!_incomingPacketMemoryPool.empty()) {
delete _incomingPacketMemoryPool.back();
_incomingPacketMemoryPool.pop_back();
}
_incomingPacketMemoryPoolLock.unlock();
#ifdef ZT_USE_MINIUPNPC #ifdef ZT_USE_MINIUPNPC
delete _portMapper; delete _portMapper;
#endif #endif
@ -1900,39 +1900,27 @@ public:
inline void phyOnDatagram(PhySocket *sock,void **uptr,const struct sockaddr *localAddr,const struct sockaddr *from,void *data,unsigned long len) inline void phyOnDatagram(PhySocket *sock,void **uptr,const struct sockaddr *localAddr,const struct sockaddr *from,void *data,unsigned long len)
{ {
const uint64_t now = OSUtils::now();
if ((len >= 16)&&(reinterpret_cast<const InetAddress *>(from)->ipScope() == InetAddress::IP_SCOPE_GLOBAL)) if ((len >= 16)&&(reinterpret_cast<const InetAddress *>(from)->ipScope() == InetAddress::IP_SCOPE_GLOBAL))
_lastDirectReceiveFromGlobal = OSUtils::now(); _lastDirectReceiveFromGlobal = now;
#ifdef ZT_INCOMING_PACKET_THREAD_POOL_SIZE
unsigned long cksum = 0; OneServiceIncomingPacket *pkt;
for(unsigned int i=0;i<sizeof(struct sockaddr_storage);++i) { _incomingPacketMemoryPoolLock.lock();
cksum += ((uint8_t *)from)[i]; if (_incomingPacketMemoryPool.empty()) {
pkt = new OneServiceIncomingPacket;
} else {
pkt = _incomingPacketMemoryPool.back();
_incomingPacketMemoryPool.pop_back();
} }
const unsigned long tn = cksum % ZT_INCOMING_PACKET_THREAD_POOL_SIZE; _incomingPacketMemoryPoolLock.unlock();
_incomingPacketWorker[tn].lock.lock();
memcpy(_incomingPacketWorker[tn].data,data,len); pkt->now = now;
_incomingPacketWorker[tn].sock = reinterpret_cast<int64_t>(sock); pkt->sock = reinterpret_cast<int64_t>(sock);
memcpy(&_incomingPacketWorker[tn].from,from,sizeof(struct sockaddr_storage)); ZT_FAST_MEMCPY(&(pkt->from),from,sizeof(struct sockaddr_storage));
_incomingPacketWorker[tn].size = (int)len; pkt->size = (unsigned int)len;
_incomingPacketWorker[tn].lock.unlock(); ZT_FAST_MEMCPY(pkt->data,data,len);
_incomingPacketWorker[tn].cond.notify_all();
#else _incomingPacketQueue.postLimit(pkt,16 * _incomingPacketConcurrency);
const ZT_ResultCode rc = _node->processWirePacket(
(void *)0,
OSUtils::now(),
reinterpret_cast<int64_t>(sock),
reinterpret_cast<const struct sockaddr_storage *>(from), // Phy<> uses sockaddr_storage, so it'll always be that big
data,
len,
&_nextBackgroundTaskDeadline);
if (ZT_ResultCode_isFatal(rc)) {
char tmp[256];
OSUtils::ztsnprintf(tmp,sizeof(tmp),"fatal error code from processWirePacket: %d",(int)rc);
Mutex::Lock _l(_termReason_m);
_termReason = ONE_UNRECOVERABLE_ERROR;
_fatalErrorMessage = tmp;
this->terminate();
}
#endif
} }
inline void phyOnTcpConnect(PhySocket *sock,void **uptr,bool success) inline void phyOnTcpConnect(PhySocket *sock,void **uptr,bool success)