corda/src/x86.h

265 lines
7.1 KiB
C
Raw Normal View History

2010-12-06 03:21:09 +00:00
/* Copyright (c) 2008-2010, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
2007-10-23 01:00:57 +00:00
#ifndef X86_H
#define X86_H
#include "types.h"
#include "common.h"
#ifdef _MSC_VER
# include "windows.h"
# pragma push_macro("assert")
# include "intrin.h"
# pragma pop_macro("assert")
2009-12-02 15:49:10 +00:00
# undef interface
#endif
#ifdef ARCH_x86_32
2007-10-23 01:00:57 +00:00
2008-06-04 22:21:27 +00:00
# ifdef __APPLE__
# if __DARWIN_UNIX03 && defined(_STRUCT_X86_EXCEPTION_STATE32)
# define IP_REGISTER(context) (context->uc_mcontext->__ss.__eip)
# define BASE_REGISTER(context) (context->uc_mcontext->__ss.__ebp)
# define STACK_REGISTER(context) (context->uc_mcontext->__ss.__esp)
# define THREAD_REGISTER(context) (context->uc_mcontext->__ss.__ebx)
# else
# define IP_REGISTER(context) (context->uc_mcontext->ss.eip)
# define BASE_REGISTER(context) (context->uc_mcontext->ss.ebp)
# define STACK_REGISTER(context) (context->uc_mcontext->ss.esp)
# define THREAD_REGISTER(context) (context->uc_mcontext->ss.ebx)
# endif
# else
# define IP_REGISTER(context) (context->uc_mcontext.gregs[REG_EIP])
# define BASE_REGISTER(context) (context->uc_mcontext.gregs[REG_EBP])
# define STACK_REGISTER(context) (context->uc_mcontext.gregs[REG_ESP])
# define THREAD_REGISTER(context) (context->uc_mcontext.gregs[REG_EBX])
# endif
2007-10-23 01:00:57 +00:00
extern "C" uint64_t
2007-10-24 17:24:19 +00:00
vmNativeCall(void* function, void* stack, unsigned stackSize,
unsigned returnType);
2007-10-23 01:00:57 +00:00
namespace vm {
inline uint64_t
dynamicCall(void* function, uintptr_t* arguments, uint8_t*,
unsigned, unsigned argumentsSize, unsigned returnType)
{
2007-10-24 17:24:19 +00:00
return vmNativeCall(function, arguments, argumentsSize, returnType);
2007-10-23 01:00:57 +00:00
}
} // namespace vm
#elif defined ARCH_x86_64
2007-10-23 01:00:57 +00:00
2009-10-14 16:01:37 +00:00
# ifdef __APPLE__
# if __DARWIN_UNIX03 && defined(_STRUCT_X86_EXCEPTION_STATE32)
# define IP_REGISTER(context) (context->uc_mcontext->__ss.__rip)
# define BASE_REGISTER(context) (context->uc_mcontext->__ss.__rbp)
# define STACK_REGISTER(context) (context->uc_mcontext->__ss.__rsp)
# define THREAD_REGISTER(context) (context->uc_mcontext->__ss.__rbx)
# else
# define IP_REGISTER(context) (context->uc_mcontext->ss.rip)
# define BASE_REGISTER(context) (context->uc_mcontext->ss.rbp)
# define STACK_REGISTER(context) (context->uc_mcontext->ss.rsp)
# define THREAD_REGISTER(context) (context->uc_mcontext->ss.rbx)
# endif
# else
# define IP_REGISTER(context) (context->uc_mcontext.gregs[REG_RIP])
# define BASE_REGISTER(context) (context->uc_mcontext.gregs[REG_RBP])
# define STACK_REGISTER(context) (context->uc_mcontext.gregs[REG_RSP])
# define THREAD_REGISTER(context) (context->uc_mcontext.gregs[REG_RBX])
# endif
2008-06-04 22:21:27 +00:00
2007-10-23 01:00:57 +00:00
extern "C" uint64_t
# ifdef PLATFORM_WINDOWS
vmNativeCall(void* function, void* stack, unsigned stackSize,
unsigned returnType);
# else
2007-10-24 17:24:19 +00:00
vmNativeCall(void* function, void* stack, unsigned stackSize,
void* gprTable, void* sseTable, unsigned returnType);
# endif
2007-10-23 01:00:57 +00:00
namespace vm {
# ifdef PLATFORM_WINDOWS
inline uint64_t
dynamicCall(void* function, uint64_t* arguments, UNUSED uint8_t* argumentTypes,
unsigned argumentCount, unsigned, unsigned returnType)
{
return vmNativeCall(function, arguments, argumentCount, returnType);
}
# else
2007-10-23 01:00:57 +00:00
inline uint64_t
2009-10-14 16:01:37 +00:00
dynamicCall(void* function, uintptr_t* arguments, uint8_t* argumentTypes,
2007-10-23 01:00:57 +00:00
unsigned argumentCount, unsigned, unsigned returnType)
{
const unsigned GprCount = 6;
uint64_t gprTable[GprCount];
unsigned gprIndex = 0;
const unsigned SseCount = 8;
uint64_t sseTable[SseCount];
unsigned sseIndex = 0;
uint64_t stack[argumentCount];
unsigned stackIndex = 0;
for (unsigned i = 0; i < argumentCount; ++i) {
switch (argumentTypes[i]) {
case FLOAT_TYPE:
case DOUBLE_TYPE: {
if (sseIndex < SseCount) {
sseTable[sseIndex++] = arguments[i];
} else {
stack[stackIndex++] = arguments[i];
}
} break;
default: {
if (gprIndex < GprCount) {
gprTable[gprIndex++] = arguments[i];
} else {
stack[stackIndex++] = arguments[i];
}
} break;
}
}
2008-06-04 22:21:27 +00:00
return vmNativeCall(function, stack, stackIndex * BytesPerWord,
2007-10-24 17:24:19 +00:00
(gprIndex ? gprTable : 0),
(sseIndex ? sseTable : 0), returnType);
2007-10-23 01:00:57 +00:00
}
#endif
2007-10-23 01:00:57 +00:00
} // namespace vm
#else
2008-06-04 22:21:27 +00:00
# error unsupported architecture
2007-10-23 01:00:57 +00:00
#endif
2009-03-09 15:29:37 +00:00
namespace vm {
inline void
trap()
{
#ifdef _MSC_VER
__asm int 3
#else
2009-03-09 15:29:37 +00:00
asm("int3");
#endif
2009-03-09 15:29:37 +00:00
}
inline void
2009-12-02 15:49:10 +00:00
programOrderMemoryBarrier()
2009-03-09 15:29:37 +00:00
{
compileTimeMemoryBarrier();
2009-03-09 15:29:37 +00:00
}
inline void
storeStoreMemoryBarrier()
{
2009-12-02 15:49:10 +00:00
programOrderMemoryBarrier();
2009-03-09 15:29:37 +00:00
}
inline void
storeLoadMemoryBarrier()
{
2009-12-02 15:49:10 +00:00
#ifdef _MSC_VER
MemoryBarrier();
#elif defined ARCH_x86_32
__asm__ __volatile__("lock; addl $0,0(%%esp)": : :"memory");
#elif defined ARCH_x86_64
__asm__ __volatile__("mfence": : :"memory");
#endif // ARCH_x86_64
2009-03-09 15:29:37 +00:00
}
inline void
loadMemoryBarrier()
{
2009-12-02 15:49:10 +00:00
programOrderMemoryBarrier();
2009-03-09 15:29:37 +00:00
}
inline void
syncInstructionCache(const void*, unsigned)
{
2009-12-02 15:49:10 +00:00
programOrderMemoryBarrier();
2009-03-09 15:29:37 +00:00
}
#ifdef USE_ATOMIC_OPERATIONS
inline bool
atomicCompareAndSwap32(uint32_t* p, uint32_t old, uint32_t new_)
{
#ifdef _MSC_VER
2010-04-15 17:11:10 +00:00
return old == InterlockedCompareExchange
(reinterpret_cast<LONG*>(p), new_, old);
#elif (__GNUC__ >= 4) && (__GNUC_MINOR__ >= 1)
return __sync_bool_compare_and_swap(p, old, new_);
#else
uint8_t result;
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
: "=m"(*p), "=q"(result)
: "r"(new_), "a"(old), "m"(*p)
: "memory");
return result != 0;
#endif
}
inline bool
atomicCompareAndSwap64(uint64_t* p, uint64_t old, uint64_t new_)
{
#ifdef _MSC_VER
2010-04-15 17:11:10 +00:00
return old == InterlockedCompareExchange64
(reinterpret_cast<LONGLONG*>(p), new_, old);
#elif (__GNUC__ >= 4) && (__GNUC_MINOR__ >= 1)
return __sync_bool_compare_and_swap(p, old, new_);
#elif defined ARCH_x86_32
uint8_t result;
__asm__ __volatile__("lock; cmpxchg8b %0; setz %1"
: "=m"(*p), "=q"(result)
: "a"(static_cast<uint32_t>(old)),
"d"(static_cast<uint32_t>(old >> 32)),
"b"(static_cast<uint32_t>(new_)),
"c"(static_cast<uint32_t>(new_ >> 32)),
"m"(*p)
: "memory");
return result != 0;
#else
uint8_t result;
__asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1"
: "=m"(*p), "=q"(result)
: "r"(new_), "a"(old), "m"(*p)
: "memory");
return result != 0;
#endif
}
inline bool
atomicCompareAndSwap(uintptr_t* p, uintptr_t old, uintptr_t new_)
{
#ifdef ARCH_x86_32
2009-12-01 15:23:11 +00:00
return atomicCompareAndSwap32(reinterpret_cast<uint32_t*>(p), old, new_);
#elif defined ARCH_x86_64
2009-12-01 15:23:11 +00:00
return atomicCompareAndSwap64(reinterpret_cast<uint64_t*>(p), old, new_);
#endif // ARCH_x86_64
}
#endif // USE_ATOMIC_OPERATIONS
2009-03-09 15:29:37 +00:00
} // namespace vm
2007-10-23 01:00:57 +00:00
#endif//X86_H