ZeroTierOne/node/Mutex.hpp

139 lines
3.5 KiB
C++
Raw Normal View History

/*
2019-08-23 16:23:39 +00:00
* Copyright (c)2019 ZeroTier, Inc.
*
2019-08-23 16:23:39 +00:00
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
2019-08-23 16:23:39 +00:00
* Change Date: 2023-01-01
*
2019-08-23 16:23:39 +00:00
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
2019-08-23 16:23:39 +00:00
/****/
#ifndef ZT_MUTEX_HPP
#define ZT_MUTEX_HPP
#include "Constants.hpp"
#ifdef __UNIX_LIKE__
#include <stdint.h>
#include <stdlib.h>
#include <pthread.h>
namespace ZeroTier {
2018-12-05 00:15:46 +00:00
#if defined(__GNUC__) && (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
2019-08-28 14:34:24 +00:00
// Inline ticket lock with yield for x64 systems, provides much better performance when there is no contention.
2018-01-27 02:34:56 +00:00
class Mutex
{
public:
ZT_ALWAYS_INLINE Mutex() : nextTicket(0),nowServing(0) {}
2019-08-23 19:34:45 +00:00
ZT_ALWAYS_INLINE void lock() const
{
const uint16_t myTicket = __sync_fetch_and_add(&(const_cast<Mutex *>(this)->nextTicket),1);
while (nowServing != myTicket) {
2019-08-28 14:34:24 +00:00
pthread_yield_np();
__asm__ __volatile__("rep;nop"::);
__asm__ __volatile__("":::"memory");
}
}
2019-08-23 19:34:45 +00:00
ZT_ALWAYS_INLINE void unlock() const { ++(const_cast<Mutex *>(this)->nowServing); }
2018-01-27 02:34:56 +00:00
class Lock
{
public:
2019-08-23 19:34:45 +00:00
ZT_ALWAYS_INLINE Lock(Mutex &m) : _m(&m) { m.lock(); }
ZT_ALWAYS_INLINE Lock(const Mutex &m) : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
private:
Mutex *const _m;
};
private:
inline Mutex(const Mutex &) {}
2018-01-27 02:34:56 +00:00
const Mutex &operator=(const Mutex &) { return *this; }
uint16_t nextTicket;
uint16_t nowServing;
};
#else
// libpthread based mutex lock
2018-01-27 02:34:56 +00:00
class Mutex
{
public:
ZT_ALWAYS_INLINE Mutex() { pthread_mutex_init(&_mh,(const pthread_mutexattr_t *)0); }
ZT_ALWAYS_INLINE ~Mutex() { pthread_mutex_destroy(&_mh); }
ZT_ALWAYS_INLINE void lock() const { pthread_mutex_lock(&((const_cast <Mutex *> (this))->_mh)); }
ZT_ALWAYS_INLINE void unlock() const { pthread_mutex_unlock(&((const_cast <Mutex *> (this))->_mh)); }
2018-01-27 02:34:56 +00:00
class Lock
{
public:
ZT_ALWAYS_INLINE Lock(Mutex &m) : _m(&m) { m.lock(); }
ZT_ALWAYS_INLINE Lock(const Mutex &m) : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
private:
Mutex *const _m;
};
private:
inline Mutex(const Mutex &) {}
2018-01-27 02:34:56 +00:00
const Mutex &operator=(const Mutex &) { return *this; }
pthread_mutex_t _mh;
};
#endif
} // namespace ZeroTier
#endif // Apple / Linux
#ifdef __WINDOWS__
#include <stdlib.h>
#include <Windows.h>
namespace ZeroTier {
// Windows critical section based lock
2018-01-27 02:34:56 +00:00
class Mutex
{
public:
ZT_ALWAYS_INLINE Mutex() { InitializeCriticalSection(&_cs); }
ZT_ALWAYS_INLINE ~Mutex() { DeleteCriticalSection(&_cs); }
ZT_ALWAYS_INLINE void lock() { EnterCriticalSection(&_cs); }
ZT_ALWAYS_INLINE void unlock() { LeaveCriticalSection(&_cs); }
ZT_ALWAYS_INLINE void lock() const { (const_cast <Mutex *> (this))->lock(); }
ZT_ALWAYS_INLINE void unlock() const { (const_cast <Mutex *> (this))->unlock(); }
2018-01-27 02:34:56 +00:00
class Lock
{
public:
ZT_ALWAYS_INLINE Lock(Mutex &m) : _m(&m) { m.lock(); }
ZT_ALWAYS_INLINE Lock(const Mutex &m) : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
ZT_ALWAYS_INLINE ~Lock() { _m->unlock(); }
private:
Mutex *const _m;
};
private:
inline Mutex(const Mutex &) {}
2018-01-27 02:34:56 +00:00
const Mutex &operator=(const Mutex &) { return *this; }
CRITICAL_SECTION _cs;
};
} // namespace ZeroTier
#endif // _WIN32
#endif