2011-07-13 14:25:21 +00:00
|
|
|
/* Copyright (c) 2008-2011, Avian Contributors
|
2009-08-06 17:52:36 +00:00
|
|
|
|
|
|
|
Permission to use, copy, modify, and/or distribute this software
|
|
|
|
for any purpose with or without fee is hereby granted, provided
|
|
|
|
that the above copyright notice and this permission notice appear
|
|
|
|
in all copies.
|
|
|
|
|
|
|
|
There is NO WARRANTY for this software. See license.txt for
|
|
|
|
details. */
|
|
|
|
|
|
|
|
#ifndef ARM_H
|
|
|
|
#define ARM_H
|
|
|
|
|
|
|
|
#include "types.h"
|
|
|
|
#include "common.h"
|
|
|
|
|
2011-08-11 01:21:48 +00:00
|
|
|
#ifdef __APPLE__
|
|
|
|
# include "libkern/OSAtomic.h"
|
|
|
|
# include "libkern/OSCacheControl.h"
|
|
|
|
# include "mach/mach_types.h"
|
|
|
|
# include "mach/arm/thread_act.h"
|
|
|
|
# include "mach/arm/thread_status.h"
|
|
|
|
|
|
|
|
# define THREAD_STATE ARM_THREAD_STATE
|
|
|
|
# define THREAD_STATE_TYPE arm_thread_state_t
|
|
|
|
# define THREAD_STATE_COUNT ARM_THREAD_STATE_COUNT
|
|
|
|
|
|
|
|
# if __DARWIN_UNIX03 && defined(_STRUCT_ARM_EXCEPTION_STATE)
|
|
|
|
# define FIELD(x) __##x
|
|
|
|
# else
|
|
|
|
# define FIELD(x) x
|
|
|
|
# endif
|
|
|
|
|
|
|
|
# define THREAD_STATE_IP(state) ((state).FIELD(pc))
|
|
|
|
# define THREAD_STATE_STACK(state) ((state).FIELD(sp))
|
|
|
|
# define THREAD_STATE_THREAD(state) ((state).FIELD(r[8]))
|
|
|
|
# define THREAD_STATE_LINK(state) ((state).FIELD(lr))
|
|
|
|
|
|
|
|
# define IP_REGISTER(context) \
|
|
|
|
THREAD_STATE_IP(context->uc_mcontext->FIELD(ss))
|
|
|
|
# define STACK_REGISTER(context) \
|
|
|
|
THREAD_STATE_STACK(context->uc_mcontext->FIELD(ss))
|
|
|
|
# define THREAD_REGISTER(context) \
|
|
|
|
THREAD_STATE_THREAD(context->uc_mcontext->FIELD(ss))
|
|
|
|
# define LINK_REGISTER(context) \
|
|
|
|
THREAD_STATE_LINK(context->uc_mcontext->FIELD(ss))
|
2012-07-18 01:38:39 +00:00
|
|
|
#elif (defined __QNX__)
|
2012-08-04 02:00:27 +00:00
|
|
|
# include "arm/smpxchg.h"
|
|
|
|
# include "sys/mman.h"
|
|
|
|
|
2012-07-18 01:38:39 +00:00
|
|
|
# define IP_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_PC])
|
|
|
|
# define STACK_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_SP])
|
|
|
|
# define THREAD_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_IP])
|
|
|
|
# define LINK_REGISTER(context) (context->uc_mcontext.cpu.gpr[ARM_REG_LR])
|
|
|
|
#else
|
2011-08-11 01:21:48 +00:00
|
|
|
# define IP_REGISTER(context) (context->uc_mcontext.arm_pc)
|
|
|
|
# define STACK_REGISTER(context) (context->uc_mcontext.arm_sp)
|
|
|
|
# define THREAD_REGISTER(context) (context->uc_mcontext.arm_ip)
|
|
|
|
# define LINK_REGISTER(context) (context->uc_mcontext.arm_lr)
|
|
|
|
#endif
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
|
2011-08-11 01:21:48 +00:00
|
|
|
#define VA_LIST(x) (&(x))
|
2009-08-06 17:52:36 +00:00
|
|
|
|
|
|
|
extern "C" uint64_t
|
|
|
|
vmNativeCall(void* function, unsigned stackTotal, void* memoryTable,
|
2012-07-31 16:36:01 +00:00
|
|
|
unsigned memoryCount, void* gprTable, void* vfpTable,
|
|
|
|
unsigned returnType);
|
2009-08-06 17:52:36 +00:00
|
|
|
|
|
|
|
namespace vm {
|
|
|
|
|
|
|
|
inline void
|
|
|
|
trap()
|
|
|
|
{
|
2010-07-12 20:18:36 +00:00
|
|
|
asm("bkpt");
|
2009-08-06 17:52:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
memoryBarrier()
|
|
|
|
{
|
|
|
|
asm("nop");
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
storeStoreMemoryBarrier()
|
|
|
|
{
|
|
|
|
memoryBarrier();
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
storeLoadMemoryBarrier()
|
|
|
|
{
|
|
|
|
memoryBarrier();
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
|
|
|
loadMemoryBarrier()
|
|
|
|
{
|
|
|
|
memoryBarrier();
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void
|
2011-01-31 22:39:59 +00:00
|
|
|
syncInstructionCache(const void* start, unsigned size)
|
2009-08-06 17:52:36 +00:00
|
|
|
{
|
2011-08-11 01:21:48 +00:00
|
|
|
#ifdef __APPLE__
|
|
|
|
sys_icache_invalidate(const_cast<void*>(start), size);
|
2012-08-04 02:00:27 +00:00
|
|
|
#elif (defined __QNX__)
|
|
|
|
msync(const_cast<void*>(start), size, MS_INVALIDATE_ICACHE);
|
2011-08-11 01:21:48 +00:00
|
|
|
#else
|
2011-01-31 22:39:59 +00:00
|
|
|
__clear_cache
|
|
|
|
(const_cast<void*>(start),
|
|
|
|
const_cast<uint8_t*>(static_cast<const uint8_t*>(start) + size));
|
2011-08-11 01:21:48 +00:00
|
|
|
#endif
|
2009-08-06 17:52:36 +00:00
|
|
|
}
|
|
|
|
|
2011-08-11 01:21:48 +00:00
|
|
|
#ifndef __APPLE__
|
2010-04-20 21:51:35 +00:00
|
|
|
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
|
2011-08-11 01:21:48 +00:00
|
|
|
# define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
|
|
|
|
#endif
|
2010-04-20 21:51:35 +00:00
|
|
|
|
|
|
|
inline bool
|
|
|
|
atomicCompareAndSwap32(uint32_t* p, uint32_t old, uint32_t new_)
|
|
|
|
{
|
2011-08-11 01:21:48 +00:00
|
|
|
#ifdef __APPLE__
|
|
|
|
return OSAtomicCompareAndSwap32(old, new_, reinterpret_cast<int32_t*>(p));
|
2012-08-04 02:00:27 +00:00
|
|
|
#elif (defined __QNX__)
|
|
|
|
return old == _smp_cmpxchg(p, old, new_);
|
2011-08-11 01:21:48 +00:00
|
|
|
#else
|
2010-04-20 21:51:35 +00:00
|
|
|
int r = __kernel_cmpxchg(static_cast<int>(old), static_cast<int>(new_), reinterpret_cast<int*>(p));
|
|
|
|
return (!r ? true : false);
|
2011-08-11 01:21:48 +00:00
|
|
|
#endif
|
2010-04-20 21:51:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool
|
|
|
|
atomicCompareAndSwap(uintptr_t* p, uintptr_t old, uintptr_t new_)
|
|
|
|
{
|
|
|
|
return atomicCompareAndSwap32(reinterpret_cast<uint32_t*>(p), old, new_);
|
|
|
|
}
|
|
|
|
|
2009-08-06 17:52:36 +00:00
|
|
|
inline uint64_t
|
|
|
|
dynamicCall(void* function, uintptr_t* arguments, uint8_t* argumentTypes,
|
2010-04-20 21:51:35 +00:00
|
|
|
unsigned argumentCount, unsigned argumentsSize UNUSED,
|
2012-07-31 16:36:01 +00:00
|
|
|
unsigned returnType)
|
2009-08-06 17:52:36 +00:00
|
|
|
{
|
2011-08-11 03:33:56 +00:00
|
|
|
#ifdef __APPLE__
|
|
|
|
const unsigned Alignment = 1;
|
|
|
|
#else
|
|
|
|
const unsigned Alignment = 2;
|
|
|
|
#endif
|
|
|
|
|
2009-08-06 17:52:36 +00:00
|
|
|
const unsigned GprCount = 4;
|
|
|
|
uintptr_t gprTable[GprCount];
|
|
|
|
unsigned gprIndex = 0;
|
|
|
|
|
2012-07-31 16:36:01 +00:00
|
|
|
const unsigned VfpCount = 16;
|
|
|
|
uintptr_t vfpTable[VfpCount];
|
|
|
|
unsigned vfpIndex = 0;
|
2012-07-19 11:28:03 +00:00
|
|
|
unsigned vfpBackfillIndex UNUSED = 0;
|
2012-07-31 16:36:01 +00:00
|
|
|
|
2010-04-14 15:26:50 +00:00
|
|
|
uintptr_t stack[(argumentCount * 8) / BytesPerWord]; // is > argumentSize to account for padding
|
2009-08-06 17:52:36 +00:00
|
|
|
unsigned stackIndex = 0;
|
|
|
|
|
|
|
|
unsigned ai = 0;
|
|
|
|
for (unsigned ati = 0; ati < argumentCount; ++ ati) {
|
|
|
|
switch (argumentTypes[ati]) {
|
|
|
|
case DOUBLE_TYPE:
|
2012-10-01 11:39:18 +00:00
|
|
|
#if defined(__ARM_PCS_VFP)
|
2012-07-31 16:36:01 +00:00
|
|
|
{
|
|
|
|
if (vfpIndex + Alignment <= VfpCount) {
|
|
|
|
if (vfpIndex % Alignment) {
|
2012-08-01 16:48:26 +00:00
|
|
|
vfpBackfillIndex = vfpIndex;
|
2012-07-31 16:36:01 +00:00
|
|
|
++ vfpIndex;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(vfpTable + vfpIndex, arguments + ai, 8);
|
|
|
|
vfpIndex += 8 / BytesPerWord;
|
|
|
|
} else {
|
|
|
|
vfpIndex = VfpCount;
|
|
|
|
if (stackIndex % Alignment) {
|
|
|
|
++ stackIndex;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(stack + stackIndex, arguments + ai, 8);
|
|
|
|
stackIndex += 8 / BytesPerWord;
|
|
|
|
}
|
|
|
|
ai += 8 / BytesPerWord;
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case FLOAT_TYPE:
|
2012-08-01 16:48:26 +00:00
|
|
|
if (vfpBackfillIndex) {
|
|
|
|
vfpTable[vfpBackfillIndex] = arguments[ai];
|
|
|
|
vfpBackfillIndex = 0;
|
|
|
|
} else if (vfpIndex < VfpCount) {
|
2012-07-31 16:36:01 +00:00
|
|
|
vfpTable[vfpIndex++] = arguments[ai];
|
|
|
|
} else {
|
|
|
|
stack[stackIndex++] = arguments[ai];
|
|
|
|
}
|
|
|
|
++ ai;
|
|
|
|
break;
|
|
|
|
#endif
|
2009-08-06 17:52:36 +00:00
|
|
|
case INT64_TYPE: {
|
2011-11-08 00:14:41 +00:00
|
|
|
if (gprIndex + Alignment <= GprCount) { // pass argument in register(s)
|
|
|
|
if (Alignment == 1
|
|
|
|
and BytesPerWord < 8
|
|
|
|
and gprIndex + Alignment == GprCount)
|
|
|
|
{
|
|
|
|
gprTable[gprIndex++] = arguments[ai];
|
|
|
|
stack[stackIndex++] = arguments[ai + 1];
|
|
|
|
} else {
|
|
|
|
if (gprIndex % Alignment) {
|
|
|
|
++gprIndex;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(gprTable + gprIndex, arguments + ai, 8);
|
|
|
|
gprIndex += 8 / BytesPerWord;
|
2010-04-14 15:26:50 +00:00
|
|
|
}
|
2011-08-11 03:33:56 +00:00
|
|
|
} else { // pass argument on stack
|
2010-12-10 21:01:22 +00:00
|
|
|
gprIndex = GprCount;
|
2011-11-08 00:14:41 +00:00
|
|
|
if (stackIndex % Alignment) {
|
2010-04-14 15:26:50 +00:00
|
|
|
++stackIndex;
|
|
|
|
}
|
2011-08-11 03:33:56 +00:00
|
|
|
|
2009-08-06 17:52:36 +00:00
|
|
|
memcpy(stack + stackIndex, arguments + ai, 8);
|
|
|
|
stackIndex += 8 / BytesPerWord;
|
|
|
|
}
|
|
|
|
ai += 8 / BytesPerWord;
|
|
|
|
} break;
|
|
|
|
|
|
|
|
default: {
|
|
|
|
if (gprIndex < GprCount) {
|
|
|
|
gprTable[gprIndex++] = arguments[ai];
|
|
|
|
} else {
|
|
|
|
stack[stackIndex++] = arguments[ai];
|
|
|
|
}
|
|
|
|
++ ai;
|
|
|
|
} break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gprIndex < GprCount) { // pad since assembly loads all GPRs
|
|
|
|
memset(gprTable + gprIndex, 0, (GprCount-gprIndex)*4);
|
|
|
|
gprIndex = GprCount;
|
|
|
|
}
|
2012-07-31 16:36:01 +00:00
|
|
|
if (vfpIndex < VfpCount) {
|
|
|
|
memset(vfpTable + vfpIndex, 0, (VfpCount-vfpIndex)*4);
|
|
|
|
vfpIndex = VfpCount;
|
|
|
|
}
|
2009-08-06 17:52:36 +00:00
|
|
|
|
|
|
|
unsigned stackSize = stackIndex*BytesPerWord + ((stackIndex & 1) << 2);
|
|
|
|
return vmNativeCall
|
|
|
|
(function, stackSize, stack, stackIndex * BytesPerWord,
|
2012-07-31 16:36:01 +00:00
|
|
|
(gprIndex ? gprTable : 0),
|
|
|
|
(vfpIndex ? vfpTable : 0), returnType);
|
2009-08-06 17:52:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace vm
|
|
|
|
|
|
|
|
#endif // ARM_H
|