corda/src/compile.cpp

8889 lines
221 KiB
C++
Raw Normal View History

2010-12-06 03:21:09 +00:00
/* Copyright (c) 2008-2010, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "machine.h"
2007-12-09 22:45:43 +00:00
#include "util.h"
#include "vector.h"
#include "process.h"
2008-02-11 17:21:41 +00:00
#include "assembler.h"
2007-12-09 22:45:43 +00:00
#include "compiler.h"
2008-06-04 22:21:27 +00:00
#include "arch.h"
using namespace vm;
extern "C" uint64_t
vmInvoke(void* thread, void* function, void* arguments,
unsigned argumentFootprint, unsigned frameSize, unsigned returnType);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
extern "C" void
vmInvoke_returnAddress();
extern "C" void
vmInvoke_safeStack();
2009-05-23 22:15:06 +00:00
extern "C" void
vmJumpAndInvoke(void* thread, void* function, void* base, void* stack,
unsigned argumentFootprint, uintptr_t* arguments,
unsigned frameSize);
2009-05-05 01:04:17 +00:00
2007-10-04 03:19:39 +00:00
extern "C" void
vmCall();
namespace {
namespace local {
const bool DebugCompile = false;
const bool DebugNatives = false;
const bool DebugCallTable = false;
const bool DebugMethodTree = false;
2008-11-08 22:36:38 +00:00
const bool DebugFrameMaps = false;
2009-08-06 16:32:00 +00:00
const bool DebugIntrinsics = false;
2007-12-11 23:52:28 +00:00
const bool CheckArrayBounds = true;
#ifdef AVIAN_CONTINUATIONS
const bool Continuations = true;
#else
const bool Continuations = false;
#endif
const unsigned MaxNativeCallFootprint = 4;
const unsigned InitialZoneCapacityInBytes = 64 * 1024;
const unsigned ExecutableAreaSizeInBytes = 16 * 1024 * 1024;
enum Root {
CallTable,
MethodTree,
MethodTreeSentinal,
ObjectPools,
StaticTableArray,
VirtualThunks,
ReceiveMethod,
WindMethod,
RewindMethod
};
const unsigned RootCount = RewindMethod + 1;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
inline bool
isVmInvokeUnsafeStack(void* ip)
{
return reinterpret_cast<uintptr_t>(ip)
>= reinterpret_cast<uintptr_t>(voidPointer(vmInvoke_returnAddress))
and reinterpret_cast<uintptr_t>(ip)
< reinterpret_cast<uintptr_t> (voidPointer(vmInvoke_safeStack));
}
2007-12-09 22:45:43 +00:00
class MyThread: public Thread {
public:
class CallTrace {
public:
CallTrace(MyThread* t, object method):
2007-12-09 22:45:43 +00:00
t(t),
2008-08-16 18:46:14 +00:00
base(t->base),
stack(t->stack),
2009-05-03 20:57:11 +00:00
continuation(t->continuation),
nativeMethod((methodFlags(t, method) & ACC_NATIVE) ? method : 0),
targetMethod(0),
originalMethod(method),
2007-12-09 22:45:43 +00:00
next(t->trace)
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
doTransition(t, 0, 0, 0, 0, this);
2007-12-09 22:45:43 +00:00
}
2007-09-30 15:52:21 +00:00
2007-12-09 22:45:43 +00:00
~CallTrace() {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
assert(t, t->stack == 0);
doTransition(t, 0, stack, base, continuation, next);
2007-12-09 22:45:43 +00:00
}
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
MyThread* t;
2008-08-16 18:46:14 +00:00
void* base;
void* stack;
2009-05-03 20:57:11 +00:00
object continuation;
object nativeMethod;
object targetMethod;
object originalMethod;
2007-12-09 22:45:43 +00:00
CallTrace* next;
};
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
class Context {
public:
class MyProtector: public Thread::Protector {
public:
MyProtector(MyThread* t, Context* context):
Protector(t), context(context)
{ }
virtual void visit(Heap::Visitor* v) {
v->visit(&(context->continuation));
}
Context* context;
};
Context(MyThread* t, void* ip, void* stack, void* base,
object continuation, CallTrace* trace):
ip(ip),
stack(stack),
base(base),
continuation(continuation),
trace(trace),
protector(t, this)
{ }
void* ip;
void* stack;
void* base;
object continuation;
CallTrace* trace;
MyProtector protector;
};
class TraceContext: public Context {
public:
TraceContext(MyThread* t, void* ip, void* stack, void* base,
object continuation, CallTrace* trace):
Context(t, ip, stack, base, continuation, trace),
t(t),
next(t->traceContext)
{
t->traceContext = this;
}
TraceContext(MyThread* t):
Context(t, t->ip, t->stack, t->base, t->continuation, t->trace),
t(t),
next(t->traceContext)
{
t->traceContext = this;
}
~TraceContext() {
t->traceContext = next;
}
MyThread* t;
TraceContext* next;
};
static void doTransition(MyThread* t, void* ip, void* stack, void* base,
object continuation, MyThread::CallTrace* trace)
{
// in this function, we "atomically" update the thread context
// fields in such a way to ensure that another thread may
// interrupt us at any time and still get a consistent, accurate
// stack trace. See MyProcessor::getStackTrace for details.
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
assert(t, t->transition == 0);
Context c(t, ip, stack, base, continuation, trace);
compileTimeMemoryBarrier();
t->transition = &c;
compileTimeMemoryBarrier();
t->ip = ip;
t->base = base;
t->stack = stack;
t->continuation = continuation;
t->trace = trace;
compileTimeMemoryBarrier();
t->transition = 0;
}
MyThread(Machine* m, object javaThread, MyThread* parent,
bool useNativeFeatures):
2007-12-09 22:45:43 +00:00
Thread(m, javaThread, parent),
2007-12-30 22:24:48 +00:00
ip(0),
2008-08-16 18:46:14 +00:00
base(0),
stack(0),
2009-05-03 20:57:11 +00:00
continuation(0),
2009-05-25 04:27:50 +00:00
exceptionStackAdjustment(0),
2009-05-03 20:57:11 +00:00
exceptionOffset(0),
exceptionHandler(0),
tailAddress(0),
2009-05-03 20:57:11 +00:00
virtualCallTarget(0),
virtualCallIndex(0),
2007-12-09 22:45:43 +00:00
trace(0),
reference(0),
arch(parent
? parent->arch
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
: makeArchitecture(m->system, useNativeFeatures)),
transition(0),
traceContext(0),
stackLimit(0)
{
arch->acquire();
}
2007-10-03 00:22:48 +00:00
2007-12-30 22:24:48 +00:00
void* ip;
2008-08-16 18:46:14 +00:00
void* base;
void* stack;
2009-05-03 20:57:11 +00:00
object continuation;
2009-05-25 04:27:50 +00:00
uintptr_t exceptionStackAdjustment;
2009-05-03 20:57:11 +00:00
uintptr_t exceptionOffset;
void* exceptionHandler;
void* tailAddress;
2009-05-03 20:57:11 +00:00
void* virtualCallTarget;
uintptr_t virtualCallIndex;
2007-12-09 22:45:43 +00:00
CallTrace* trace;
Reference* reference;
Assembler::Architecture* arch;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
Context* transition;
TraceContext* traceContext;
uintptr_t stackLimit;
2007-12-09 22:45:43 +00:00
};
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
void
transition(MyThread* t, void* ip, void* stack, void* base, object continuation,
MyThread::CallTrace* trace)
{
MyThread::doTransition(t, ip, stack, base, continuation, trace);
}
unsigned
parameterOffset(MyThread* t, object method)
{
return methodParameterFootprint(t, method)
+ t->arch->frameFooterSize()
+ t->arch->frameReturnAddressSize() - 1;
}
2007-12-09 22:45:43 +00:00
object
2009-05-05 01:04:17 +00:00
resolveThisPointer(MyThread* t, void* stack)
2007-12-09 22:45:43 +00:00
{
2009-05-03 20:57:11 +00:00
return reinterpret_cast<object*>(stack)
[t->arch->frameFooterSize() + t->arch->frameReturnAddressSize()];
}
2007-12-23 19:26:35 +00:00
object
findMethod(Thread* t, object method, object instance)
{
if ((methodFlags(t, method) & ACC_STATIC) == 0) {
if (classFlags(t, methodClass(t, method)) & ACC_INTERFACE) {
return findInterfaceMethod(t, method, objectClass(t, instance));
} else if (methodVirtual(t, method)) {
return findVirtualMethod(t, method, objectClass(t, instance));
}
}
return method;
}
object
resolveTarget(MyThread* t, void* stack, object method)
{
2009-05-05 01:04:17 +00:00
object class_ = objectClass(t, resolveThisPointer(t, stack));
2007-10-03 00:22:48 +00:00
if (classVmFlags(t, class_) & BootstrapFlag) {
PROTECT(t, method);
PROTECT(t, class_);
resolveSystemClass(t, root(t, Machine::BootLoader), className(t, class_));
if (UNLIKELY(t->exception)) return 0;
2007-10-03 00:22:48 +00:00
}
if (classFlags(t, methodClass(t, method)) & ACC_INTERFACE) {
return findInterfaceMethod(t, method, class_);
} else {
return findVirtualMethod(t, method, class_);
}
2007-12-09 22:45:43 +00:00
}
2007-10-03 00:22:48 +00:00
object
resolveTarget(MyThread* t, object class_, unsigned index)
{
if (classVmFlags(t, class_) & BootstrapFlag) {
PROTECT(t, class_);
resolveSystemClass(t, root(t, Machine::BootLoader), className(t, class_));
if (UNLIKELY(t->exception)) return 0;
}
return arrayBody(t, classVirtualTable(t, class_), index);
}
object&
root(Thread* t, Root root);
void
setRoot(Thread* t, Root root, object value);
2007-10-03 00:22:48 +00:00
2008-11-23 23:58:01 +00:00
unsigned
compiledSize(intptr_t address)
{
return reinterpret_cast<uintptr_t*>(address)[-1];
}
intptr_t
methodCompiled(Thread* t, object method)
{
return codeCompiled(t, methodCode(t, method));
}
intptr_t
compareIpToMethodBounds(Thread* t, intptr_t ip, object method)
{
2008-11-23 23:58:01 +00:00
intptr_t start = methodCompiled(t, method);
if (DebugMethodTree) {
fprintf(stderr, "find %p in (%p,%p)\n",
reinterpret_cast<void*>(ip),
reinterpret_cast<void*>(start),
reinterpret_cast<void*>(start + compiledSize(start)));
}
if (ip < start) {
return -1;
2008-04-11 23:01:17 +00:00
} else if (ip < start + static_cast<intptr_t>
(compiledSize(start) + BytesPerWord))
{
return 0;
} else {
return 1;
}
}
object
methodForIp(MyThread* t, void* ip)
{
if (DebugMethodTree) {
fprintf(stderr, "query for method containing %p\n", ip);
}
// we must use a version of the method tree at least as recent as the
// compiled form of the method containing the specified address (see
// compile(MyThread*, Allocator*, BootContext*, object)):
loadMemoryBarrier();
return treeQuery(t, root(t, MethodTree), reinterpret_cast<intptr_t>(ip),
root(t, MethodTreeSentinal), compareIpToMethodBounds);
}
2007-10-03 00:22:48 +00:00
2007-12-09 22:45:43 +00:00
class MyStackWalker: public Processor::StackWalker {
public:
2008-04-23 16:33:31 +00:00
enum State {
Start,
Next,
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
Trace,
2009-05-05 01:04:17 +00:00
Continuation,
2008-04-23 16:33:31 +00:00
Method,
NativeMethod,
Finish
};
2007-12-09 22:45:43 +00:00
class MyProtector: public Thread::Protector {
public:
MyProtector(MyStackWalker* walker):
Protector(walker->t), walker(walker)
{ }
2007-10-03 00:22:48 +00:00
2007-12-09 22:45:43 +00:00
virtual void visit(Heap::Visitor* v) {
v->visit(&(walker->method_));
2009-05-03 20:57:11 +00:00
v->visit(&(walker->continuation));
2007-12-09 22:45:43 +00:00
}
2007-10-03 00:22:48 +00:00
2007-12-09 22:45:43 +00:00
MyStackWalker* walker;
};
MyStackWalker(MyThread* t):
t(t),
2008-04-23 16:33:31 +00:00
state(Start),
method_(0),
2007-12-09 22:45:43 +00:00
protector(this)
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
{
if (t->traceContext) {
ip_ = t->traceContext->ip;
base = t->traceContext->base;
stack = t->traceContext->stack;
trace = t->traceContext->trace;
continuation = t->traceContext->continuation;
} else {
ip_ = 0;
base = t->base;
stack = t->stack;
trace = t->trace;
continuation = t->continuation;
}
}
2007-12-09 22:45:43 +00:00
MyStackWalker(MyStackWalker* w):
t(w->t),
2008-04-23 16:33:31 +00:00
state(w->state),
ip_(w->ip_),
2007-12-09 22:45:43 +00:00
base(w->base),
stack(w->stack),
trace(w->trace),
method_(w->method_),
2009-05-12 18:16:55 +00:00
continuation(w->continuation),
2007-12-09 22:45:43 +00:00
protector(this)
{ }
2007-12-09 22:45:43 +00:00
virtual void walk(Processor::StackVisitor* v) {
2008-04-23 16:33:31 +00:00
for (MyStackWalker it(this); it.valid();) {
2009-07-10 14:33:38 +00:00
MyStackWalker walker(&it);
2008-04-23 16:33:31 +00:00
if (not v->visit(&walker)) {
break;
}
it.next();
}
2008-04-23 16:33:31 +00:00
}
bool valid() {
while (true) {
// fprintf(stderr, "state: %d\n", state);
switch (state) {
case Start:
2008-08-17 19:32:40 +00:00
if (ip_ == 0) {
ip_ = t->arch->frameIp(stack);
2008-04-23 16:33:31 +00:00
}
2008-04-23 16:33:31 +00:00
if (trace and trace->nativeMethod) {
method_ = trace->nativeMethod;
state = NativeMethod;
} else {
state = Next;
2008-04-23 16:33:31 +00:00
}
break;
2007-12-09 22:45:43 +00:00
2008-04-23 16:33:31 +00:00
case Next:
if (stack) {
method_ = methodForIp(t, ip_);
if (method_) {
state = Method;
2009-05-03 20:57:11 +00:00
} else if (continuation) {
2009-05-17 00:39:08 +00:00
method_ = continuationMethod(t, continuation);
2009-05-03 20:57:11 +00:00
state = Continuation;
2008-04-23 16:33:31 +00:00
} else {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
state = Trace;
2008-04-23 16:33:31 +00:00
}
} else {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
state = Trace;
2008-04-23 16:33:31 +00:00
}
2007-12-09 22:45:43 +00:00
break;
2008-04-23 16:33:31 +00:00
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
case Trace: {
if (trace) {
continuation = trace->continuation;
stack = trace->stack;
base = trace->base;
ip_ = t->arch->frameIp(stack);
trace = trace->next;
state = Start;
} else {
state = Finish;
}
} break;
2009-05-03 20:57:11 +00:00
case Continuation:
2008-04-23 16:33:31 +00:00
case Method:
case NativeMethod:
return true;
case Finish:
return false;
default:
abort(t);
2007-12-09 22:45:43 +00:00
}
}
}
2008-04-23 16:33:31 +00:00
void next() {
switch (state) {
2009-05-03 20:57:11 +00:00
case Continuation:
continuation = continuationNext(t, continuation);
break;
2008-04-23 16:33:31 +00:00
case Method:
2008-08-17 19:32:40 +00:00
t->arch->nextFrame(&stack, &base);
ip_ = t->arch->frameIp(stack);
2008-04-23 16:33:31 +00:00
break;
2008-04-23 16:33:31 +00:00
case NativeMethod:
break;
default:
abort(t);
}
state = Next;
}
2007-12-09 22:45:43 +00:00
virtual object method() {
// fprintf(stderr, "method %s.%s\n", &byteArrayBody
// (t, className(t, methodClass(t, method_)), 0),
// &byteArrayBody(t, methodName(t, method_), 0));
return method_;
2007-10-03 00:22:48 +00:00
}
2007-12-09 22:45:43 +00:00
virtual int ip() {
2008-04-23 16:33:31 +00:00
switch (state) {
2009-05-03 20:57:11 +00:00
case Continuation:
2009-05-05 01:04:17 +00:00
return reinterpret_cast<intptr_t>(continuationAddress(t, continuation))
2009-05-03 20:57:11 +00:00
- methodCompiled(t, continuationMethod(t, continuation));
2008-04-23 16:33:31 +00:00
case Method:
2008-11-23 23:58:01 +00:00
return reinterpret_cast<intptr_t>(ip_) - methodCompiled(t, method_);
2008-04-23 16:33:31 +00:00
case NativeMethod:
return 0;
default:
abort(t);
2007-10-03 00:22:48 +00:00
}
}
2007-12-09 22:45:43 +00:00
virtual unsigned count() {
2008-04-23 16:33:31 +00:00
unsigned count = 0;
2007-12-09 22:45:43 +00:00
2008-04-23 16:33:31 +00:00
for (MyStackWalker walker(this); walker.valid();) {
walker.next();
++ count;
}
2007-12-09 22:45:43 +00:00
2008-04-23 16:33:31 +00:00
return count;
2007-10-03 00:22:48 +00:00
}
2007-12-09 22:45:43 +00:00
MyThread* t;
2008-04-23 16:33:31 +00:00
State state;
void* ip_;
2007-12-09 22:45:43 +00:00
void* base;
void* stack;
MyThread::CallTrace* trace;
object method_;
2009-05-03 20:57:11 +00:00
object continuation;
2007-12-09 22:45:43 +00:00
MyProtector protector;
};
unsigned
localSize(MyThread* t, object method)
{
unsigned size = codeMaxLocals(t, methodCode(t, method));
if ((methodFlags(t, method) & (ACC_SYNCHRONIZED | ACC_STATIC))
== ACC_SYNCHRONIZED)
{
++ size;
}
return size;
}
unsigned
alignedFrameSize(MyThread* t, object method)
{
return t->arch->alignFrameSize
(localSize(t, method)
- methodParameterFootprint(t, method)
+ codeMaxStack(t, methodCode(t, method))
+ t->arch->frameFootprint(MaxNativeCallFootprint));
}
2007-12-09 22:45:43 +00:00
int
localOffset(MyThread* t, int v, object method)
{
int parameterFootprint = methodParameterFootprint(t, method);
int frameSize = alignedFrameSize(t, method);
int offset = ((v < parameterFootprint) ?
(frameSize
+ parameterFootprint
+ t->arch->frameFooterSize()
+ t->arch->frameHeaderSize()
- v - 1) :
(frameSize
+ parameterFootprint
2009-05-17 23:43:48 +00:00
- v - 1));
assert(t, offset >= 0);
return offset;
2007-12-09 22:45:43 +00:00
}
2007-10-03 00:22:48 +00:00
2009-05-03 20:57:11 +00:00
int
localOffsetFromStack(MyThread* t, int index, object method)
{
return localOffset(t, index, method)
2009-05-17 23:43:48 +00:00
+ t->arch->frameReturnAddressSize();
2009-05-03 20:57:11 +00:00
}
object*
localObject(MyThread* t, void* stack, object method, unsigned index)
{
2009-05-17 23:43:48 +00:00
return static_cast<object*>(stack) + localOffsetFromStack(t, index, method);
2009-05-03 20:57:11 +00:00
}
int
stackOffsetFromFrame(MyThread* t, object method)
{
return alignedFrameSize(t, method) + t->arch->frameHeaderSize();
}
void*
stackForFrame(MyThread* t, void* frame, object method)
{
2009-05-03 20:57:11 +00:00
return static_cast<void**>(frame) - stackOffsetFromFrame(t, method);
}
2008-11-23 23:58:01 +00:00
class PoolElement: public Promise {
2007-12-09 22:45:43 +00:00
public:
2008-11-23 23:58:01 +00:00
PoolElement(Thread* t, object target, PoolElement* next):
t(t), target(target), address(0), next(next)
2007-12-16 00:24:15 +00:00
{ }
2007-12-09 22:45:43 +00:00
2008-11-23 23:58:01 +00:00
virtual int64_t value() {
assert(t, resolved());
return address;
}
virtual bool resolved() {
return address != 0;
}
Thread* t;
object target;
intptr_t address;
PoolElement* next;
2007-12-09 22:45:43 +00:00
};
class Context;
class SubroutineCall;
class Subroutine {
public:
Subroutine(unsigned ip, unsigned logIndex, Subroutine* listNext,
Subroutine* stackNext):
listNext(listNext),
stackNext(stackNext),
calls(0),
handle(0),
ip(ip),
logIndex(logIndex),
stackIndex(0),
callCount(0),
tableIndex(0),
visited(false)
{ }
Subroutine* listNext;
Subroutine* stackNext;
SubroutineCall* calls;
Compiler::Subroutine* handle;
unsigned ip;
unsigned logIndex;
unsigned stackIndex;
unsigned callCount;
unsigned tableIndex;
bool visited;
};
class SubroutinePath;
class SubroutineCall {
public:
SubroutineCall(Subroutine* subroutine, Promise* returnAddress):
subroutine(subroutine),
returnAddress(returnAddress),
paths(0),
next(subroutine->calls)
{
subroutine->calls = this;
2009-07-08 14:18:40 +00:00
++ subroutine->callCount;
}
Subroutine* subroutine;
Promise* returnAddress;
SubroutinePath* paths;
SubroutineCall* next;
};
class SubroutinePath {
public:
SubroutinePath(SubroutineCall* call, SubroutinePath* stackNext,
2009-07-08 14:18:40 +00:00
uintptr_t* rootTable):
call(call),
stackNext(stackNext),
listNext(call->paths),
2009-07-08 14:18:40 +00:00
rootTable(rootTable)
{
call->paths = this;
}
SubroutineCall* call;
SubroutinePath* stackNext;
SubroutinePath* listNext;
2009-07-08 14:18:40 +00:00
uintptr_t* rootTable;
};
void
print(SubroutinePath* path)
{
if (path) {
fprintf(stderr, " (");
while (true) {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
fprintf(stderr, "%p", path->call->returnAddress->resolved() ?
reinterpret_cast<void*>(path->call->returnAddress->value()) : 0);
path = path->stackNext;
if (path) {
fprintf(stderr, ", ");
} else {
break;
}
}
fprintf(stderr, ")");
}
}
class SubroutineTrace {
public:
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
SubroutineTrace(SubroutinePath* path, SubroutineTrace* next,
unsigned mapSize):
path(path),
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
next(next),
watch(false)
{
memset(map, 0, mapSize * BytesPerWord);
}
SubroutinePath* path;
SubroutineTrace* next;
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
bool watch;
uintptr_t map[0];
};
2008-02-11 17:21:41 +00:00
class TraceElement: public TraceHandler {
2007-12-09 22:45:43 +00:00
public:
static const unsigned VirtualCall = 1 << 0;
static const unsigned TailCall = 1 << 1;
static const unsigned LongCall = 1 << 2;
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
TraceElement(Context* context, unsigned ip, object target, unsigned flags,
TraceElement* next, unsigned mapSize):
context(context),
address(0),
next(next),
subroutineTrace(0),
target(target),
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
ip(ip),
subroutineTraceCount(0),
argumentIndex(0),
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
flags(flags),
watch(false)
{
memset(map, 0, mapSize * BytesPerWord);
}
2007-12-09 22:45:43 +00:00
virtual void handleTrace(Promise* address, unsigned argumentIndex) {
if (this->address == 0) {
this->address = address;
this->argumentIndex = argumentIndex;
}
}
Context* context;
Promise* address;
TraceElement* next;
SubroutineTrace* subroutineTrace;
2007-12-09 22:45:43 +00:00
object target;
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
unsigned ip;
unsigned subroutineTraceCount;
unsigned argumentIndex;
unsigned flags;
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
bool watch;
uintptr_t map[0];
2007-10-03 00:22:48 +00:00
};
class TraceElementPromise: public Promise {
public:
TraceElementPromise(System* s, TraceElement* trace): s(s), trace(trace) { }
virtual int64_t value() {
assert(s, resolved());
2009-04-22 01:39:25 +00:00
return trace->address->value();
}
virtual bool resolved() {
2009-04-22 01:39:25 +00:00
return trace->address != 0 and trace->address->resolved();
}
System* s;
TraceElement* trace;
};
2008-01-07 14:51:07 +00:00
enum Event {
2008-07-05 20:21:13 +00:00
PushContextEvent,
PopContextEvent,
2008-01-07 14:51:07 +00:00
IpEvent,
MarkEvent,
ClearEvent,
PushExceptionHandlerEvent,
TraceEvent,
PushSubroutineEvent,
PopSubroutineEvent
2008-01-07 14:51:07 +00:00
};
unsigned
frameMapSizeInBits(MyThread* t, object method)
{
return localSize(t, method) + codeMaxStack(t, methodCode(t, method));
}
2008-01-07 14:51:07 +00:00
unsigned
frameMapSizeInWords(MyThread* t, object method)
{
return ceiling(frameMapSizeInBits(t, method), BitsPerWord);
2008-01-07 14:51:07 +00:00
}
uint16_t*
makeVisitTable(MyThread* t, Zone* zone, object method)
{
unsigned size = codeLength(t, methodCode(t, method)) * 2;
uint16_t* table = static_cast<uint16_t*>(zone->allocate(size));
memset(table, 0, size);
return table;
}
uintptr_t*
makeRootTable(MyThread* t, Zone* zone, object method)
2008-01-07 14:51:07 +00:00
{
unsigned size = frameMapSizeInWords(t, method)
* codeLength(t, methodCode(t, method))
* BytesPerWord;
uintptr_t* table = static_cast<uintptr_t*>(zone->allocate(size));
memset(table, 0xFF, size);
2008-01-07 14:51:07 +00:00
return table;
}
enum Thunk {
#define THUNK(s) s##Thunk,
#include "thunks.cpp"
#undef THUNK
};
const unsigned ThunkCount = gcIfNecessaryThunk + 1;
intptr_t
getThunk(MyThread* t, Thunk thunk);
2008-11-23 23:58:01 +00:00
class BootContext {
public:
class MyProtector: public Thread::Protector {
public:
MyProtector(Thread* t, BootContext* c): Protector(t), c(c) { }
virtual void visit(Heap::Visitor* v) {
v->visit(&(c->constants));
v->visit(&(c->calls));
2008-11-23 23:58:01 +00:00
}
BootContext* c;
};
BootContext(Thread* t, object constants, object calls,
DelayedPromise* addresses, Zone* zone):
protector(t, this), constants(constants), calls(calls),
addresses(addresses), addressSentinal(addresses), zone(zone)
2008-11-23 23:58:01 +00:00
{ }
MyProtector protector;
object constants;
object calls;
DelayedPromise* addresses;
DelayedPromise* addressSentinal;
2008-11-23 23:58:01 +00:00
Zone* zone;
};
class Context {
2007-10-10 22:39:40 +00:00
public:
2007-12-09 22:45:43 +00:00
class MyProtector: public Thread::Protector {
public:
2008-02-11 17:21:41 +00:00
MyProtector(Context* c): Protector(c->thread), c(c) { }
2007-12-09 22:45:43 +00:00
virtual void visit(Heap::Visitor* v) {
v->visit(&(c->method));
2007-12-09 22:45:43 +00:00
for (PoolElement* p = c->objectPool; p; p = p->next) {
2008-11-23 23:58:01 +00:00
v->visit(&(p->target));
}
2007-12-09 22:45:43 +00:00
for (TraceElement* p = c->traceLog; p; p = p->next) {
v->visit(&(p->target));
2007-12-09 22:45:43 +00:00
}
}
Context* c;
2007-10-10 22:39:40 +00:00
};
class MyClient: public Compiler::Client {
public:
MyClient(MyThread* t): t(t) { }
virtual intptr_t getThunk(UnaryOperation, unsigned) {
abort(t);
}
virtual intptr_t getThunk(BinaryOperation op, unsigned size,
unsigned resultSize)
{
if (size == 8) {
switch(op) {
2009-12-01 16:21:33 +00:00
case Absolute:
assert(t, resultSize == 8);
return local::getThunk(t, absoluteLongThunk);
case FloatNegate:
assert(t, resultSize == 8);
return local::getThunk(t, negateDoubleThunk);
case FloatSquareRoot:
assert(t, resultSize == 8);
return local::getThunk(t, squareRootDoubleThunk);
case Float2Float:
assert(t, resultSize == 4);
return local::getThunk(t, doubleToFloatThunk);
case Float2Int:
if (resultSize == 8) {
return local::getThunk(t, doubleToLongThunk);
} else {
assert(t, resultSize == 4);
return local::getThunk(t, doubleToIntThunk);
}
case Int2Float:
if (resultSize == 8) {
return local::getThunk(t, longToDoubleThunk);
} else {
assert(t, resultSize == 4);
return local::getThunk(t, longToFloatThunk);
}
default: abort(t);
}
} else {
assert(t, size == 4);
switch(op) {
2009-12-01 16:21:33 +00:00
case Absolute:
assert(t, resultSize == 4);
return local::getThunk(t, absoluteIntThunk);
case FloatNegate:
2009-12-01 16:21:33 +00:00
assert(t, resultSize == 4);
return local::getThunk(t, negateFloatThunk);
case FloatAbsolute:
2009-12-01 16:21:33 +00:00
assert(t, resultSize == 4);
return local::getThunk(t, absoluteFloatThunk);
case Float2Float:
assert(t, resultSize == 8);
return local::getThunk(t, floatToDoubleThunk);
case Float2Int:
if (resultSize == 4) {
return local::getThunk(t, floatToIntThunk);
} else {
assert(t, resultSize == 8);
return local::getThunk(t, floatToLongThunk);
}
case Int2Float:
if (resultSize == 4) {
return local::getThunk(t, intToFloatThunk);
} else {
assert(t, resultSize == 8);
return local::getThunk(t, intToDoubleThunk);
}
default: abort(t);
}
}
}
virtual intptr_t getThunk(TernaryOperation op, unsigned size, unsigned,
bool* threadParameter)
{
*threadParameter = false;
if (size == 8) {
switch (op) {
case Divide:
*threadParameter = true;
return local::getThunk(t, divideLongThunk);
case Remainder:
*threadParameter = true;
return local::getThunk(t, moduloLongThunk);
case FloatAdd:
return local::getThunk(t, addDoubleThunk);
case FloatSubtract:
return local::getThunk(t, subtractDoubleThunk);
case FloatMultiply:
return local::getThunk(t, multiplyDoubleThunk);
case FloatDivide:
return local::getThunk(t, divideDoubleThunk);
case FloatRemainder:
return local::getThunk(t, moduloDoubleThunk);
case JumpIfFloatEqual:
case JumpIfFloatNotEqual:
case JumpIfFloatLess:
case JumpIfFloatGreater:
case JumpIfFloatLessOrEqual:
case JumpIfFloatGreaterOrUnordered:
case JumpIfFloatGreaterOrEqualOrUnordered:
return local::getThunk(t, compareDoublesGThunk);
case JumpIfFloatGreaterOrEqual:
case JumpIfFloatLessOrUnordered:
case JumpIfFloatLessOrEqualOrUnordered:
return local::getThunk(t, compareDoublesLThunk);
default: abort(t);
}
} else {
assert(t, size == 4);
switch (op) {
case Divide:
*threadParameter = true;
return local::getThunk(t, divideIntThunk);
case Remainder:
*threadParameter = true;
2009-10-29 20:14:44 +00:00
return local::getThunk(t, moduloIntThunk);
case FloatAdd:
return local::getThunk(t, addFloatThunk);
case FloatSubtract:
return local::getThunk(t, subtractFloatThunk);
case FloatMultiply:
return local::getThunk(t, multiplyFloatThunk);
case FloatDivide:
return local::getThunk(t, divideFloatThunk);
case FloatRemainder:
return local::getThunk(t, moduloFloatThunk);
case JumpIfFloatEqual:
case JumpIfFloatNotEqual:
case JumpIfFloatLess:
case JumpIfFloatGreater:
case JumpIfFloatLessOrEqual:
case JumpIfFloatGreaterOrUnordered:
case JumpIfFloatGreaterOrEqualOrUnordered:
return local::getThunk(t, compareFloatsGThunk);
case JumpIfFloatGreaterOrEqual:
case JumpIfFloatLessOrUnordered:
case JumpIfFloatLessOrEqualOrUnordered:
return local::getThunk(t, compareFloatsLThunk);
default: abort(t);
}
}
}
MyThread* t;
};
2008-11-23 23:58:01 +00:00
Context(MyThread* t, BootContext* bootContext, object method):
2008-02-11 17:21:41 +00:00
thread(t),
zone(t->m->system, t->m->heap, InitialZoneCapacityInBytes),
assembler(makeAssembler(t->m->system, t->m->heap, &zone, t->arch)),
client(t),
compiler(makeCompiler(t->m->system, assembler, &zone, &client)),
2007-10-10 22:39:40 +00:00
method(method),
2008-11-23 23:58:01 +00:00
bootContext(bootContext),
objectPool(0),
subroutines(0),
traceLog(0),
2008-01-07 14:51:07 +00:00
visitTable(makeVisitTable(t, &zone, method)),
rootTable(makeRootTable(t, &zone, method)),
subroutineTable(0),
objectPoolCount(0),
traceLogCount(0),
dirtyRoots(false),
leaf(true),
eventLog(t->m->system, t->m->heap, 1024),
protector(this)
{ }
Context(MyThread* t):
2008-02-11 17:21:41 +00:00
thread(t),
zone(t->m->system, t->m->heap, InitialZoneCapacityInBytes),
assembler(makeAssembler(t->m->system, t->m->heap, &zone, t->arch)),
client(t),
2008-02-11 17:21:41 +00:00
compiler(0),
method(0),
2008-11-23 23:58:01 +00:00
bootContext(0),
objectPool(0),
subroutines(0),
traceLog(0),
2008-01-07 14:51:07 +00:00
visitTable(0),
rootTable(0),
subroutineTable(0),
objectPoolCount(0),
traceLogCount(0),
dirtyRoots(false),
leaf(true),
eventLog(t->m->system, t->m->heap, 0),
protector(this)
{ }
~Context() {
if (compiler) compiler->dispose();
2008-02-11 17:21:41 +00:00
assembler->dispose();
}
2008-02-11 17:21:41 +00:00
MyThread* thread;
Zone zone;
2008-02-11 17:21:41 +00:00
Assembler* assembler;
MyClient client;
2008-02-11 17:21:41 +00:00
Compiler* compiler;
object method;
2008-11-23 23:58:01 +00:00
BootContext* bootContext;
PoolElement* objectPool;
Subroutine* subroutines;
TraceElement* traceLog;
2008-01-07 14:51:07 +00:00
uint16_t* visitTable;
uintptr_t* rootTable;
Subroutine** subroutineTable;
unsigned objectPoolCount;
unsigned traceLogCount;
bool dirtyRoots;
bool leaf;
2008-01-07 14:51:07 +00:00
Vector eventLog;
MyProtector protector;
};
2009-05-03 20:57:11 +00:00
unsigned
translateLocalIndex(Context* context, unsigned footprint, unsigned index)
{
unsigned parameterFootprint = methodParameterFootprint
(context->thread, context->method);
if (index < parameterFootprint) {
return parameterFootprint - index - footprint;
} else {
return index;
}
}
Compiler::Operand*
loadLocal(Context* context, unsigned footprint, unsigned index)
{
return context->compiler->loadLocal
(footprint, translateLocalIndex(context, footprint, index));
}
void
storeLocal(Context* context, unsigned footprint, Compiler::Operand* value,
unsigned index)
{
context->compiler->storeLocal
(footprint, value, translateLocalIndex(context, footprint, index));
}
class Frame {
public:
2008-02-11 17:21:41 +00:00
enum StackType {
Integer,
Long,
Object
};
Frame(Context* context, uint8_t* stackMap):
context(context),
2008-02-11 17:21:41 +00:00
t(context->thread),
c(context->compiler),
subroutine(0),
2008-01-07 14:51:07 +00:00
stackMap(stackMap),
2007-12-09 22:45:43 +00:00
ip(0),
sp(localSize()),
2008-01-07 14:51:07 +00:00
level(0)
2007-12-09 22:45:43 +00:00
{
2008-02-11 17:21:41 +00:00
memset(stackMap, 0, codeMaxStack(t, methodCode(t, context->method)));
2007-12-09 22:45:43 +00:00
}
2008-02-11 17:21:41 +00:00
Frame(Frame* f, uint8_t* stackMap):
context(f->context),
2008-02-11 17:21:41 +00:00
t(context->thread),
c(context->compiler),
subroutine(f->subroutine),
2008-01-07 14:51:07 +00:00
stackMap(stackMap),
2007-12-09 22:45:43 +00:00
ip(f->ip),
2008-01-07 14:51:07 +00:00
sp(f->sp),
level(f->level + 1)
2007-12-09 22:45:43 +00:00
{
2008-02-11 17:21:41 +00:00
memcpy(stackMap, f->stackMap, codeMaxStack
(t, methodCode(t, context->method)));
2008-01-07 14:51:07 +00:00
if (level > 1) {
2008-07-05 20:21:13 +00:00
context->eventLog.append(PushContextEvent);
2008-01-07 14:51:07 +00:00
}
}
~Frame() {
if (t->exception == 0) {
if (level > 1) {
2008-07-05 20:21:13 +00:00
context->eventLog.append(PopContextEvent);
}
2008-01-07 14:51:07 +00:00
}
2007-10-10 22:39:40 +00:00
}
2008-02-11 17:21:41 +00:00
Compiler::Operand* append(object o) {
2008-11-23 23:58:01 +00:00
if (context->bootContext) {
BootContext* bc = context->bootContext;
Promise* p = new (bc->zone->allocate(sizeof(ListenPromise)))
ListenPromise(t->m->system, bc->zone);
2008-11-23 23:58:01 +00:00
PROTECT(t, o);
2008-11-23 23:58:01 +00:00
object pointer = makePointer(t, p);
bc->constants = makeTriple(t, o, pointer, bc->constants);
2008-11-23 23:58:01 +00:00
return c->promiseConstant(p, Compiler::ObjectType);
2008-11-23 23:58:01 +00:00
} else {
for (PoolElement* e = context->objectPool; e; e = e->next) {
if (o == e->target) {
return c->address(e);
}
}
2008-11-23 23:58:01 +00:00
context->objectPool = new
(context->zone.allocate(sizeof(PoolElement)))
PoolElement(t, o, context->objectPool);
++ context->objectPoolCount;
2008-11-23 23:58:01 +00:00
return c->address(context->objectPool);
}
2007-10-10 22:39:40 +00:00
}
2008-01-07 14:51:07 +00:00
unsigned localSize() {
return local::localSize(t, context->method);
2007-12-09 22:45:43 +00:00
}
2008-01-07 14:51:07 +00:00
unsigned stackSize() {
return codeMaxStack(t, methodCode(t, context->method));
}
2008-01-07 14:51:07 +00:00
unsigned frameSize() {
return localSize() + stackSize();
2007-12-09 22:45:43 +00:00
}
2008-02-11 17:21:41 +00:00
void set(unsigned index, uint8_t type) {
2008-01-07 14:51:07 +00:00
assert(t, index < frameSize());
2007-10-10 22:39:40 +00:00
2008-02-11 17:21:41 +00:00
if (type == Object) {
context->eventLog.append(MarkEvent);
context->eventLog.append2(index);
} else {
context->eventLog.append(ClearEvent);
context->eventLog.append2(index);
2008-01-07 14:51:07 +00:00
}
int si = index - localSize();
if (si >= 0) {
2008-02-11 17:21:41 +00:00
stackMap[si] = type;
2008-01-07 14:51:07 +00:00
}
2007-10-10 22:39:40 +00:00
}
2008-02-11 17:21:41 +00:00
uint8_t get(unsigned index) {
2008-01-07 14:51:07 +00:00
assert(t, index < frameSize());
int si = index - localSize();
assert(t, si >= 0);
2008-02-11 17:21:41 +00:00
return stackMap[si];
2007-10-10 22:39:40 +00:00
}
void pushedInt() {
2008-01-07 14:51:07 +00:00
assert(t, sp + 1 <= frameSize());
2008-02-11 17:21:41 +00:00
set(sp++, Integer);
}
void pushedLong() {
assert(t, sp + 2 <= frameSize());
set(sp++, Long);
set(sp++, Long);
2007-10-10 22:39:40 +00:00
}
void pushedObject() {
2008-01-07 14:51:07 +00:00
assert(t, sp + 1 <= frameSize());
2008-02-11 17:21:41 +00:00
set(sp++, Object);
2007-12-09 22:45:43 +00:00
}
void popped(unsigned count) {
assert(t, sp >= count);
2008-01-07 14:51:07 +00:00
assert(t, sp - count >= localSize());
while (count) {
2008-02-11 17:21:41 +00:00
set(--sp, Integer);
-- count;
}
}
2007-12-09 22:45:43 +00:00
void poppedInt() {
assert(t, sp >= 1);
2008-01-07 14:51:07 +00:00
assert(t, sp - 1 >= localSize());
2008-02-11 17:21:41 +00:00
assert(t, get(sp - 1) == Integer);
2007-12-09 22:45:43 +00:00
-- sp;
}
2008-02-11 17:21:41 +00:00
void poppedLong() {
assert(t, sp >= 1);
assert(t, sp - 2 >= localSize());
assert(t, get(sp - 1) == Long);
assert(t, get(sp - 2) == Long);
sp -= 2;
}
2007-12-09 22:45:43 +00:00
void poppedObject() {
assert(t, sp >= 1);
2008-01-07 14:51:07 +00:00
assert(t, sp - 1 >= localSize());
2008-02-11 17:21:41 +00:00
assert(t, get(sp - 1) == Object);
set(--sp, Integer);
2007-12-09 22:45:43 +00:00
}
void storedInt(unsigned index) {
2008-01-07 14:51:07 +00:00
assert(t, index < localSize());
2008-02-11 17:21:41 +00:00
set(index, Integer);
}
void storedLong(unsigned index) {
assert(t, index + 1 < localSize());
set(index, Long);
set(index + 1, Long);
2007-12-09 22:45:43 +00:00
}
void storedObject(unsigned index) {
2008-01-07 14:51:07 +00:00
assert(t, index < localSize());
2008-02-11 17:21:41 +00:00
set(index, Object);
2007-10-10 22:39:40 +00:00
}
void dupped() {
2008-01-07 14:51:07 +00:00
assert(t, sp + 1 <= frameSize());
assert(t, sp - 1 >= localSize());
set(sp, get(sp - 1));
++ sp;
2007-10-10 22:39:40 +00:00
}
void duppedX1() {
2008-01-07 14:51:07 +00:00
assert(t, sp + 1 <= frameSize());
assert(t, sp - 2 >= localSize());
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
set(sp - 1, b2);
set(sp - 2, b1);
set(sp , b1);
2007-12-09 22:45:43 +00:00
++ sp;
}
void duppedX2() {
2008-01-07 14:51:07 +00:00
assert(t, sp + 1 <= frameSize());
assert(t, sp - 3 >= localSize());
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
uint8_t b3 = get(sp - 3);
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
set(sp - 2, b3);
set(sp - 1, b2);
set(sp - 3, b1);
set(sp , b1);
2007-12-09 22:45:43 +00:00
++ sp;
}
void dupped2() {
2008-01-07 14:51:07 +00:00
assert(t, sp + 2 <= frameSize());
assert(t, sp - 2 >= localSize());
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
set(sp, b2);
set(sp + 1, b1);
2007-12-09 22:45:43 +00:00
sp += 2;
}
void dupped2X1() {
2008-01-07 14:51:07 +00:00
assert(t, sp + 2 <= frameSize());
assert(t, sp - 3 >= localSize());
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
uint8_t b3 = get(sp - 3);
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
set(sp - 1, b3);
set(sp - 3, b2);
set(sp , b2);
set(sp - 2, b1);
set(sp + 1, b1);
2007-12-09 22:45:43 +00:00
sp += 2;
}
void dupped2X2() {
2008-01-07 14:51:07 +00:00
assert(t, sp + 2 <= frameSize());
assert(t, sp - 4 >= localSize());
2008-02-11 17:21:41 +00:00
uint8_t b4 = get(sp - 4);
uint8_t b3 = get(sp - 3);
uint8_t b2 = get(sp - 2);
uint8_t b1 = get(sp - 1);
2007-10-10 22:39:40 +00:00
2008-02-11 17:21:41 +00:00
set(sp - 2, b4);
set(sp - 1, b3);
set(sp - 4, b2);
set(sp , b2);
set(sp - 3, b1);
set(sp + 1, b1);
2007-12-09 22:45:43 +00:00
sp += 2;
}
2007-12-09 22:45:43 +00:00
void swapped() {
assert(t, sp - 2 >= localSize());
2007-12-09 22:45:43 +00:00
2008-02-11 17:21:41 +00:00
uint8_t saved = get(sp - 1);
2007-10-10 22:39:40 +00:00
2008-02-11 17:21:41 +00:00
set(sp - 1, get(sp - 2));
set(sp - 2, saved);
2007-10-10 22:39:40 +00:00
}
Promise* addressPromise(Promise* p) {
BootContext* bc = context->bootContext;
if (bc) {
bc->addresses = new (bc->zone->allocate(sizeof(DelayedPromise)))
DelayedPromise(t->m->system, bc->zone, p, bc->addresses);
return bc->addresses;
} else {
return p;
}
}
Compiler::Operand* addressOperand(Promise* p) {
return c->promiseConstant(addressPromise(p), Compiler::AddressType);
}
2008-02-11 17:21:41 +00:00
Compiler::Operand* machineIp(unsigned logicalIp) {
return c->promiseConstant(c->machineIp(logicalIp), Compiler::AddressType);
2007-12-16 00:24:15 +00:00
}
2008-01-07 14:51:07 +00:00
void visitLogicalIp(unsigned ip) {
c->visitLogicalIp(ip);
2008-01-07 14:51:07 +00:00
context->eventLog.append(IpEvent);
context->eventLog.append2(ip);
2008-01-07 14:51:07 +00:00
}
2007-12-09 22:45:43 +00:00
void startLogicalIp(unsigned ip) {
if (subroutine) {
context->subroutineTable[ip] = subroutine;
}
2007-12-09 22:45:43 +00:00
c->startLogicalIp(ip);
2008-04-20 05:23:08 +00:00
context->eventLog.append(IpEvent);
context->eventLog.append2(ip);
2007-12-09 22:45:43 +00:00
this->ip = ip;
}
2008-11-02 22:25:51 +00:00
void pushQuiet(unsigned footprint, Compiler::Operand* o) {
c->push(footprint, o);
2008-09-23 21:18:41 +00:00
}
void pushLongQuiet(Compiler::Operand* o) {
2008-11-02 22:25:51 +00:00
pushQuiet(2, o);
2008-07-05 20:21:13 +00:00
}
2008-11-02 22:25:51 +00:00
Compiler::Operand* popQuiet(unsigned footprint) {
return c->pop(footprint);
2008-09-23 21:18:41 +00:00
}
Compiler::Operand* popLongQuiet() {
2008-11-02 22:25:51 +00:00
Compiler::Operand* r = popQuiet(2);
2008-07-05 20:21:13 +00:00
return r;
}
2008-02-11 17:21:41 +00:00
void pushInt(Compiler::Operand* o) {
2008-11-02 22:25:51 +00:00
pushQuiet(1, o);
2007-12-09 22:45:43 +00:00
pushedInt();
}
2008-02-11 17:21:41 +00:00
void pushAddress(Compiler::Operand* o) {
2008-11-02 22:25:51 +00:00
pushQuiet(1, o);
pushedInt();
}
2008-02-11 17:21:41 +00:00
void pushObject(Compiler::Operand* o) {
2008-11-02 22:25:51 +00:00
pushQuiet(1, o);
2007-12-09 22:45:43 +00:00
pushedObject();
}
void pushObject() {
2008-07-05 20:21:13 +00:00
c->pushed();
2008-07-05 20:21:13 +00:00
pushedObject();
}
2007-12-16 21:30:19 +00:00
void pushLong(Compiler::Operand* o) {
2008-09-23 21:18:41 +00:00
pushLongQuiet(o);
2008-02-11 17:21:41 +00:00
pushedLong();
}
2007-12-09 22:45:43 +00:00
void pop(unsigned count) {
popped(count);
2009-05-15 02:08:01 +00:00
c->popped(count);
}
2008-02-11 17:21:41 +00:00
Compiler::Operand* popInt() {
2007-12-09 22:45:43 +00:00
poppedInt();
2008-11-02 22:25:51 +00:00
return popQuiet(1);
}
2007-12-16 21:30:19 +00:00
2008-02-11 17:21:41 +00:00
Compiler::Operand* popLong() {
poppedLong();
2008-09-23 21:18:41 +00:00
return popLongQuiet();
2007-12-09 22:45:43 +00:00
}
2008-02-11 17:21:41 +00:00
Compiler::Operand* popObject() {
2007-12-09 22:45:43 +00:00
poppedObject();
2008-11-02 22:25:51 +00:00
return popQuiet(1);
}
2007-12-09 22:45:43 +00:00
void loadInt(unsigned index) {
2008-01-07 14:51:07 +00:00
assert(t, index < localSize());
2009-05-03 20:57:11 +00:00
pushInt(loadLocal(context, 1, index));
2007-10-10 22:39:40 +00:00
}
2007-12-09 22:45:43 +00:00
void loadLong(unsigned index) {
2008-02-11 17:21:41 +00:00
assert(t, index < static_cast<unsigned>(localSize() - 1));
2009-05-03 20:57:11 +00:00
pushLong(loadLocal(context, 2, index));
2007-12-09 22:45:43 +00:00
}
2007-12-09 22:45:43 +00:00
void loadObject(unsigned index) {
2008-01-07 14:51:07 +00:00
assert(t, index < localSize());
2009-05-03 20:57:11 +00:00
pushObject(loadLocal(context, 1, index));
2007-12-09 22:45:43 +00:00
}
2007-12-09 22:45:43 +00:00
void storeInt(unsigned index) {
2009-05-03 20:57:11 +00:00
storeLocal(context, 1, popInt(), index);
storedInt(translateLocalIndex(context, 1, index));
2007-12-09 22:45:43 +00:00
}
2007-12-09 22:45:43 +00:00
void storeLong(unsigned index) {
2009-05-03 20:57:11 +00:00
storeLocal(context, 2, popLong(), index);
storedLong(translateLocalIndex(context, 2, index));
2007-12-09 22:45:43 +00:00
}
2007-12-09 22:45:43 +00:00
void storeObject(unsigned index) {
2009-05-03 20:57:11 +00:00
storeLocal(context, 1, popObject(), index);
storedObject(translateLocalIndex(context, 1, index));
2007-12-09 22:45:43 +00:00
}
void storeObjectOrAddress(unsigned index) {
2009-05-03 20:57:11 +00:00
storeLocal(context, 1, popQuiet(1), index);
assert(t, sp >= 1);
2008-01-07 14:51:07 +00:00
assert(t, sp - 1 >= localSize());
2008-02-11 17:21:41 +00:00
if (get(sp - 1) == Object) {
storedObject(translateLocalIndex(context, 1, index));
} else {
storedInt(translateLocalIndex(context, 1, index));
}
2008-01-07 14:51:07 +00:00
popped(1);
2007-12-09 22:45:43 +00:00
}
2007-12-09 22:45:43 +00:00
void dup() {
2008-11-02 22:25:51 +00:00
pushQuiet(1, c->peek(1, 0));
2008-02-11 17:21:41 +00:00
2007-12-09 22:45:43 +00:00
dupped();
}
2007-12-09 22:45:43 +00:00
void dupX1() {
2008-11-02 22:25:51 +00:00
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
2008-02-11 17:21:41 +00:00
2008-11-02 22:25:51 +00:00
pushQuiet(1, s0);
pushQuiet(1, s1);
pushQuiet(1, s0);
2007-12-09 22:45:43 +00:00
duppedX1();
}
2007-12-09 22:45:43 +00:00
void dupX2() {
2008-11-02 22:25:51 +00:00
Compiler::Operand* s0 = popQuiet(1);
2008-02-11 17:21:41 +00:00
if (get(sp - 2) == Long) {
2008-09-23 21:18:41 +00:00
Compiler::Operand* s1 = popLongQuiet();
2008-11-02 22:25:51 +00:00
pushQuiet(1, s0);
2008-09-23 21:18:41 +00:00
pushLongQuiet(s1);
2008-11-02 22:25:51 +00:00
pushQuiet(1, s0);
2008-02-11 17:21:41 +00:00
} else {
2008-11-02 22:25:51 +00:00
Compiler::Operand* s1 = popQuiet(1);
Compiler::Operand* s2 = popQuiet(1);
2008-02-11 17:21:41 +00:00
2008-11-02 22:25:51 +00:00
pushQuiet(1, s0);
pushQuiet(1, s2);
pushQuiet(1, s1);
pushQuiet(1, s0);
2008-02-11 17:21:41 +00:00
}
2007-12-09 22:45:43 +00:00
duppedX2();
2007-10-10 22:39:40 +00:00
}
2007-12-09 22:45:43 +00:00
void dup2() {
2008-02-11 17:21:41 +00:00
if (get(sp - 1) == Long) {
pushLongQuiet(c->peek(2, 0));
2008-02-11 17:21:41 +00:00
} else {
2008-11-02 22:25:51 +00:00
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
2008-02-11 17:21:41 +00:00
2008-11-02 22:25:51 +00:00
pushQuiet(1, s1);
pushQuiet(1, s0);
pushQuiet(1, s1);
pushQuiet(1, s0);
2008-02-11 17:21:41 +00:00
}
2007-12-09 22:45:43 +00:00
dupped2();
}
2007-12-09 22:45:43 +00:00
void dup2X1() {
2008-02-11 17:21:41 +00:00
if (get(sp - 1) == Long) {
2008-09-23 21:18:41 +00:00
Compiler::Operand* s0 = popLongQuiet();
2008-11-02 22:25:51 +00:00
Compiler::Operand* s1 = popQuiet(1);
2008-02-11 17:21:41 +00:00
2008-09-23 21:18:41 +00:00
pushLongQuiet(s0);
2008-11-02 22:25:51 +00:00
pushQuiet(1, s1);
2008-09-23 21:18:41 +00:00
pushLongQuiet(s0);
} else {
2008-11-02 22:25:51 +00:00
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
Compiler::Operand* s2 = popQuiet(1);
pushQuiet(1, s1);
pushQuiet(1, s0);
pushQuiet(1, s2);
pushQuiet(1, s1);
pushQuiet(1, s0);
2008-02-11 17:21:41 +00:00
}
2007-12-09 22:45:43 +00:00
dupped2X1();
}
2007-12-09 22:45:43 +00:00
void dup2X2() {
2008-02-11 17:21:41 +00:00
if (get(sp - 1) == Long) {
2008-09-23 21:18:41 +00:00
Compiler::Operand* s0 = popLongQuiet();
2008-02-11 17:21:41 +00:00
if (get(sp - 3) == Long) {
2008-09-23 21:18:41 +00:00
Compiler::Operand* s1 = popLongQuiet();
2008-09-23 21:18:41 +00:00
pushLongQuiet(s0);
pushLongQuiet(s1);
pushLongQuiet(s0);
2008-02-11 17:21:41 +00:00
} else {
2008-11-02 22:25:51 +00:00
Compiler::Operand* s1 = popQuiet(1);
Compiler::Operand* s2 = popQuiet(1);
2008-02-11 17:21:41 +00:00
2008-09-23 21:18:41 +00:00
pushLongQuiet(s0);
2008-11-02 22:25:51 +00:00
pushQuiet(1, s2);
pushQuiet(1, s1);
2008-09-23 21:18:41 +00:00
pushLongQuiet(s0);
2008-02-11 17:21:41 +00:00
}
} else {
2008-11-02 22:25:51 +00:00
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
Compiler::Operand* s2 = popQuiet(1);
Compiler::Operand* s3 = popQuiet(1);
pushQuiet(1, s1);
pushQuiet(1, s0);
pushQuiet(1, s3);
pushQuiet(1, s2);
pushQuiet(1, s1);
pushQuiet(1, s0);
2008-02-11 17:21:41 +00:00
}
2007-10-02 00:08:17 +00:00
2007-12-09 22:45:43 +00:00
dupped2X2();
}
2007-10-02 00:08:17 +00:00
2007-12-09 22:45:43 +00:00
void swap() {
2008-11-02 22:25:51 +00:00
Compiler::Operand* s0 = popQuiet(1);
Compiler::Operand* s1 = popQuiet(1);
2007-10-02 00:08:17 +00:00
2008-11-02 22:25:51 +00:00
pushQuiet(1, s0);
pushQuiet(1, s1);
2007-10-02 00:08:17 +00:00
2007-12-09 22:45:43 +00:00
swapped();
2007-10-02 00:08:17 +00:00
}
TraceElement* trace(object target, unsigned flags) {
2008-01-07 14:51:07 +00:00
unsigned mapSize = frameMapSizeInWords(t, context->method);
TraceElement* e = context->traceLog = new
(context->zone.allocate(sizeof(TraceElement) + (mapSize * BytesPerWord)))
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
TraceElement(context, ip, target, flags, context->traceLog, mapSize);
++ context->traceLogCount;
2008-01-07 14:51:07 +00:00
context->eventLog.append(TraceEvent);
context->eventLog.appendAddress(e);
return e;
}
unsigned startSubroutine(unsigned ip, Promise* returnAddress) {
pushAddress(addressOperand(returnAddress));
Subroutine* subroutine = 0;
for (Subroutine* s = context->subroutines; s; s = s->listNext) {
if (s->ip == ip) {
subroutine = s;
break;
}
}
if (subroutine == 0) {
context->subroutines = subroutine = new
(context->zone.allocate(sizeof(Subroutine)))
2009-07-08 14:18:40 +00:00
Subroutine(ip, context->eventLog.length() + 1 + BytesPerWord + 2,
context->subroutines, this->subroutine);
if (context->subroutineTable == 0) {
unsigned size = codeLength(t, methodCode(t, context->method))
* sizeof(Subroutine*);
context->subroutineTable = static_cast<Subroutine**>
(context->zone.allocate(size));
memset(context->subroutineTable, 0, size);
}
}
subroutine->handle = c->startSubroutine();
this->subroutine = subroutine;
SubroutineCall* call = new
(context->zone.allocate(sizeof(SubroutineCall)))
SubroutineCall(subroutine, returnAddress);
context->eventLog.append(PushSubroutineEvent);
context->eventLog.appendAddress(call);
unsigned nextIndexIndex = context->eventLog.length();
context->eventLog.append2(0);
c->saveLocals();
return nextIndexIndex;
}
void returnFromSubroutine(unsigned returnAddressLocal) {
c->returnFromSubroutine
(subroutine->handle, loadLocal(context, 1, returnAddressLocal));
subroutine->stackIndex = localOffsetFromStack
(t, translateLocalIndex(context, 1, returnAddressLocal),
context->method);
}
void endSubroutine(unsigned nextIndexIndex) {
c->linkSubroutine(subroutine->handle);
poppedInt();
context->eventLog.append(PopSubroutineEvent);
2009-07-08 14:18:40 +00:00
context->eventLog.set2(nextIndexIndex, context->eventLog.length());
subroutine = subroutine->stackNext;
}
2007-12-09 22:45:43 +00:00
Context* context;
2007-12-09 22:45:43 +00:00
MyThread* t;
Compiler* c;
Subroutine* subroutine;
2008-02-11 17:21:41 +00:00
uint8_t* stackMap;
2007-12-09 22:45:43 +00:00
unsigned ip;
unsigned sp;
2008-01-07 14:51:07 +00:00
unsigned level;
2007-12-30 22:24:48 +00:00
};
unsigned
savedTargetIndex(MyThread* t, object method)
{
return codeMaxLocals(t, methodCode(t, method));
}
object
findCallNode(MyThread* t, void* address);
void
insertCallNode(MyThread* t, object node);
void*
findExceptionHandler(Thread* t, object method, void* ip)
{
2009-05-03 20:57:11 +00:00
if (t->exception) {
object table = codeExceptionHandlerTable(t, methodCode(t, method));
if (table) {
object index = arrayBody(t, table, 0);
2009-05-03 20:57:11 +00:00
uint8_t* compiled = reinterpret_cast<uint8_t*>
(methodCompiled(t, method));
2009-05-03 20:57:11 +00:00
for (unsigned i = 0; i < arrayLength(t, table) - 1; ++i) {
unsigned start = intArrayBody(t, index, i * 3);
unsigned end = intArrayBody(t, index, (i * 3) + 1);
unsigned key = difference(ip, compiled) - 1;
2009-05-03 20:57:11 +00:00
if (key >= start and key < end) {
object catchType = arrayBody(t, table, i + 1);
2009-05-03 20:57:11 +00:00
if (catchType == 0 or instanceOf(t, catchType, t->exception)) {
return compiled + intArrayBody(t, index, (i * 3) + 2);
}
}
}
}
}
return 0;
}
2009-05-03 20:57:11 +00:00
void
2009-05-05 01:04:17 +00:00
releaseLock(MyThread* t, object method, void* stack)
2009-05-03 20:57:11 +00:00
{
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
object lock;
if (methodFlags(t, method) & ACC_STATIC) {
lock = methodClass(t, method);
} else {
lock = *localObject
(t, stackForFrame(t, stack, method), method,
savedTargetIndex(t, method));
}
release(t, lock);
}
}
void
findUnwindTarget(MyThread* t, void** targetIp, void** targetBase,
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
void** targetStack, object* targetContinuation)
2007-09-29 21:08:29 +00:00
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
void* ip;
void* base;
void* stack;
object continuation;
if (t->traceContext) {
ip = t->traceContext->ip;
base = t->traceContext->base;
stack = t->traceContext->stack;
continuation = t->traceContext->continuation;
} else {
ip = 0;
base = t->base;
stack = t->stack;
continuation = t->continuation;
}
2008-04-23 16:33:31 +00:00
if (ip == 0) {
ip = t->arch->frameIp(stack);
2007-12-30 22:24:48 +00:00
}
2009-05-17 00:39:08 +00:00
object target = t->trace->targetMethod;
*targetIp = 0;
while (*targetIp == 0) {
object method = methodForIp(t, ip);
if (method) {
void* handler = findExceptionHandler(t, method, ip);
2007-12-09 22:45:43 +00:00
if (handler) {
*targetIp = handler;
*targetBase = base;
t->arch->nextFrame(&stack, &base);
void** sp = static_cast<void**>(stackForFrame(t, stack, method))
+ t->arch->frameReturnAddressSize();
2007-10-04 22:41:19 +00:00
*targetStack = sp;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
*targetContinuation = continuation;
2009-05-17 23:43:48 +00:00
sp[localOffset(t, localSize(t, method), method)] = t->exception;
2007-10-04 22:41:19 +00:00
t->exception = 0;
2007-12-09 22:45:43 +00:00
} else {
t->arch->nextFrame(&stack, &base);
ip = t->arch->frameIp(stack);
2009-05-23 22:15:06 +00:00
if (t->exception) {
releaseLock(t, method, stack);
}
2009-05-17 00:39:08 +00:00
target = method;
2009-05-03 20:57:11 +00:00
}
} else {
*targetIp = ip;
*targetBase = base;
*targetStack = static_cast<void**>(stack)
+ t->arch->frameReturnAddressSize();
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
*targetContinuation = continuation;
2009-05-03 20:57:11 +00:00
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
while (Continuations and *targetContinuation) {
object c = *targetContinuation;
2009-05-25 04:27:50 +00:00
object method = continuationMethod(t, c);
2009-05-17 00:39:08 +00:00
2009-05-03 20:57:11 +00:00
void* handler = findExceptionHandler
2009-05-25 04:27:50 +00:00
(t, method, continuationAddress(t, c));
2009-05-03 20:57:11 +00:00
if (handler) {
t->exceptionHandler = handler;
2009-05-25 04:27:50 +00:00
t->exceptionStackAdjustment
= (stackOffsetFromFrame(t, method)
- ((continuationFramePointerOffset(t, c) / BytesPerWord)
- t->arch->framePointerOffset()
+ t->arch->frameReturnAddressSize())) * BytesPerWord;
2009-05-03 20:57:11 +00:00
2009-05-17 23:43:48 +00:00
t->exceptionOffset
= localOffset(t, localSize(t, method), method) * BytesPerWord;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
2009-05-03 20:57:11 +00:00
break;
2009-05-23 22:15:06 +00:00
} else if (t->exception) {
2009-05-17 00:39:08 +00:00
releaseLock(t, method,
2009-05-25 04:27:50 +00:00
reinterpret_cast<uint8_t*>(c)
2009-05-05 01:04:17 +00:00
+ ContinuationBody
2009-05-25 04:27:50 +00:00
+ continuationReturnAddressOffset(t, c)
- t->arch->returnAddressOffset());
}
2009-05-03 20:57:11 +00:00
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
*targetContinuation = continuationNext(t, c);
2009-05-03 20:57:11 +00:00
}
}
}
}
object
2009-05-25 04:27:50 +00:00
makeCurrentContinuation(MyThread* t, void** targetIp, void** targetBase,
void** targetStack)
2009-05-03 20:57:11 +00:00
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
void* ip = t->arch->frameIp(t->stack);
2009-05-03 20:57:11 +00:00
void* base = t->base;
void* stack = t->stack;
2009-05-25 04:27:50 +00:00
object context = t->continuation
? continuationContext(t, t->continuation)
: makeContinuationContext(t, 0, 0, 0, 0, t->trace->originalMethod);
PROTECT(t, context);
2009-05-03 20:57:11 +00:00
object target = t->trace->targetMethod;
PROTECT(t, target);
object first = 0;
PROTECT(t, first);
object last = 0;
PROTECT(t, last);
*targetIp = 0;
while (*targetIp == 0) {
object method = methodForIp(t, ip);
if (method) {
PROTECT(t, method);
void** top = static_cast<void**>(stack)
+ t->arch->frameReturnAddressSize()
+ t->arch->frameFooterSize();
2009-05-03 20:57:11 +00:00
unsigned argumentFootprint
= t->arch->argumentFootprint(methodParameterFootprint(t, target));
unsigned alignment = t->arch->stackAlignmentInWords();
if (TailCalls and argumentFootprint > alignment) {
2009-05-03 20:57:11 +00:00
top += argumentFootprint - alignment;
}
t->arch->nextFrame(&stack, &base);
void** bottom = static_cast<void**>(stack)
+ t->arch->frameReturnAddressSize();
2009-05-03 20:57:11 +00:00
unsigned frameSize = bottom - top;
unsigned totalSize = frameSize
+ t->arch->frameFooterSize()
+ t->arch->argumentFootprint(methodParameterFootprint(t, method));
object c = makeContinuation
2009-05-23 22:15:06 +00:00
(t, 0, context, method, ip,
((frameSize
+ t->arch->frameFooterSize()
+ t->arch->returnAddressOffset()
- t->arch->frameReturnAddressSize()) * BytesPerWord),
((frameSize
+ t->arch->frameFooterSize()
+ t->arch->framePointerOffset()
- t->arch->frameReturnAddressSize()) * BytesPerWord),
2009-05-03 20:57:11 +00:00
totalSize);
memcpy(&continuationBody(t, c, 0), top, totalSize * BytesPerWord);
if (last) {
set(t, last, ContinuationNext, c);
} else {
first = c;
}
2009-05-03 20:57:11 +00:00
last = c;
ip = t->arch->frameIp(stack);
target = method;
2007-10-12 22:06:33 +00:00
} else {
*targetIp = ip;
*targetBase = base;
*targetStack = static_cast<void**>(stack)
2009-12-03 06:09:05 +00:00
+ t->arch->frameReturnAddressSize();
}
}
2009-05-03 20:57:11 +00:00
expect(t, last);
set(t, last, ContinuationNext, t->continuation);
return first;
}
void NO_RETURN
unwind(MyThread* t)
{
void* ip;
void* base;
void* stack;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
object continuation;
findUnwindTarget(t, &ip, &base, &stack, &continuation);
transition(t, ip, stack, base, continuation, t->trace);
2009-05-05 01:04:17 +00:00
vmJump(ip, base, stack, t, 0, 0);
}
2008-12-02 02:38:00 +00:00
uintptr_t
defaultThunk(MyThread* t);
uintptr_t
nativeThunk(MyThread* t);
uintptr_t
bootNativeThunk(MyThread* t);
2008-12-02 02:38:00 +00:00
uintptr_t
aioobThunk(MyThread* t);
uintptr_t
stackOverflowThunk(MyThread* t);
uintptr_t
virtualThunk(MyThread* t, unsigned index);
bool
unresolved(MyThread* t, uintptr_t methodAddress);
2008-12-02 02:38:00 +00:00
uintptr_t
methodAddress(Thread* t, object method)
{
if (methodFlags(t, method) & ACC_NATIVE) {
return nativeThunk(static_cast<MyThread*>(t));
} else {
return methodCompiled(t, method);
}
}
void
tryInitClass(MyThread* t, object class_)
{
initClass(t, class_);
if (UNLIKELY(t->exception)) unwind(t);
}
FixedAllocator*
codeAllocator(MyThread* t);
void
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
object method);
int64_t
2007-12-30 22:24:48 +00:00
findInterfaceMethodFromInstance(MyThread* t, object method, object instance)
{
2007-12-30 22:24:48 +00:00
if (instance) {
object target = findInterfaceMethod(t, method, objectClass(t, instance));
if (unresolved(t, methodAddress(t, target))) {
PROTECT(t, target);
compile(t, codeAllocator(t), 0, target);
}
if (UNLIKELY(t->exception)) {
unwind(t);
} else {
if (methodFlags(t, target) & ACC_NATIVE) {
t->trace->nativeMethod = target;
}
return methodAddress(t, target);
}
2007-12-30 22:24:48 +00:00
} else {
t->exception = makeThrowable(t, Machine::NullPointerExceptionType);
2007-12-30 22:24:48 +00:00
unwind(t);
}
}
int64_t
compareDoublesG(uint64_t bi, uint64_t ai)
{
double a = bitsToDouble(ai);
double b = bitsToDouble(bi);
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return 1;
}
}
int64_t
compareDoublesL(uint64_t bi, uint64_t ai)
{
double a = bitsToDouble(ai);
double b = bitsToDouble(bi);
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return -1;
}
}
int64_t
compareFloatsG(uint32_t bi, uint32_t ai)
{
float a = bitsToFloat(ai);
float b = bitsToFloat(bi);
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return 1;
}
}
int64_t
compareFloatsL(uint32_t bi, uint32_t ai)
{
float a = bitsToFloat(ai);
float b = bitsToFloat(bi);
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else if (a == b) {
return 0;
} else {
return -1;
}
}
2009-10-07 00:50:32 +00:00
int64_t
compareLongs(uint64_t b, uint64_t a)
{
if (a < b) {
return -1;
} else if (a > b) {
return 1;
} else {
return 0;
}
}
2008-07-05 20:21:13 +00:00
uint64_t
addDouble(uint64_t b, uint64_t a)
{
return doubleToBits(bitsToDouble(a) + bitsToDouble(b));
}
2008-07-05 20:21:13 +00:00
uint64_t
subtractDouble(uint64_t b, uint64_t a)
{
return doubleToBits(bitsToDouble(a) - bitsToDouble(b));
}
2008-07-05 20:21:13 +00:00
uint64_t
multiplyDouble(uint64_t b, uint64_t a)
{
return doubleToBits(bitsToDouble(a) * bitsToDouble(b));
}
2008-07-05 20:21:13 +00:00
uint64_t
divideDouble(uint64_t b, uint64_t a)
{
return doubleToBits(bitsToDouble(a) / bitsToDouble(b));
}
2008-07-05 20:21:13 +00:00
uint64_t
moduloDouble(uint64_t b, uint64_t a)
{
return doubleToBits(fmod(bitsToDouble(a), bitsToDouble(b)));
}
2008-07-05 20:21:13 +00:00
uint64_t
2007-12-09 22:45:43 +00:00
negateDouble(uint64_t a)
{
return doubleToBits(- bitsToDouble(a));
}
uint64_t
squareRootDouble(uint64_t a)
{
return doubleToBits(sqrt(bitsToDouble(a)));
}
uint64_t
2007-12-09 22:45:43 +00:00
doubleToFloat(int64_t a)
{
return floatToBits(static_cast<float>(bitsToDouble(a)));
}
int64_t
2007-12-09 22:45:43 +00:00
doubleToInt(int64_t a)
{
return static_cast<int32_t>(bitsToDouble(a));
}
2008-07-05 20:21:13 +00:00
int64_t
2007-12-09 22:45:43 +00:00
doubleToLong(int64_t a)
{
return static_cast<int64_t>(bitsToDouble(a));
}
uint64_t
addFloat(uint32_t b, uint32_t a)
{
return floatToBits(bitsToFloat(a) + bitsToFloat(b));
}
uint64_t
subtractFloat(uint32_t b, uint32_t a)
{
return floatToBits(bitsToFloat(a) - bitsToFloat(b));
}
uint64_t
multiplyFloat(uint32_t b, uint32_t a)
{
return floatToBits(bitsToFloat(a) * bitsToFloat(b));
}
uint64_t
divideFloat(uint32_t b, uint32_t a)
{
return floatToBits(bitsToFloat(a) / bitsToFloat(b));
}
uint64_t
moduloFloat(uint32_t b, uint32_t a)
{
return floatToBits(fmod(bitsToFloat(a), bitsToFloat(b)));
}
uint64_t
2007-12-09 22:45:43 +00:00
negateFloat(uint32_t a)
{
return floatToBits(- bitsToFloat(a));
}
uint64_t
absoluteFloat(uint32_t a)
{
return floatToBits(fabsf(bitsToFloat(a)));
}
2009-12-01 16:21:33 +00:00
int64_t
absoluteLong(int64_t a)
{
return a > 0 ? a : -a;
}
int64_t
absoluteInt(int32_t a)
{
return a > 0 ? a : -a;
}
2008-07-05 20:21:13 +00:00
int64_t
divideLong(MyThread* t, int64_t b, int64_t a)
{
if (LIKELY(b)) {
return a / b;
} else {
t->exception = makeThrowable(t, Machine::ArithmeticExceptionType);
unwind(t);
}
}
2009-10-29 16:12:30 +00:00
int64_t
divideInt(MyThread* t, int32_t b, int32_t a)
2009-10-29 16:12:30 +00:00
{
if (LIKELY(b)) {
return a / b;
} else {
t->exception = makeThrowable(t, Machine::ArithmeticExceptionType);
unwind(t);
}
2009-10-29 16:12:30 +00:00
}
2008-07-05 20:21:13 +00:00
int64_t
moduloLong(MyThread* t, int64_t b, int64_t a)
{
if (LIKELY(b)) {
return a % b;
} else {
t->exception = makeThrowable(t, Machine::ArithmeticExceptionType);
unwind(t);
}
}
2009-10-29 20:14:44 +00:00
int64_t
moduloInt(MyThread* t, int32_t b, int32_t a)
{
if (LIKELY(b)) {
return a % b;
} else {
t->exception = makeThrowable(t, Machine::ArithmeticExceptionType);
unwind(t);
}
2009-10-29 20:14:44 +00:00
}
2008-07-05 20:21:13 +00:00
uint64_t
2007-12-09 22:45:43 +00:00
floatToDouble(int32_t a)
{
return doubleToBits(static_cast<double>(bitsToFloat(a)));
}
int64_t
2007-12-09 22:45:43 +00:00
floatToInt(int32_t a)
{
return static_cast<int32_t>(bitsToFloat(a));
}
2008-07-05 20:21:13 +00:00
int64_t
2007-12-09 22:45:43 +00:00
floatToLong(int32_t a)
{
return static_cast<int64_t>(bitsToFloat(a));
}
2008-07-05 20:21:13 +00:00
uint64_t
2007-12-09 22:45:43 +00:00
intToDouble(int32_t a)
{
return doubleToBits(static_cast<double>(a));
}
uint64_t
2007-12-09 22:45:43 +00:00
intToFloat(int32_t a)
{
return floatToBits(static_cast<float>(a));
}
2008-07-05 20:21:13 +00:00
uint64_t
2008-03-21 00:37:58 +00:00
longToDouble(int64_t a)
{
return doubleToBits(static_cast<double>(a));
}
uint64_t
2008-03-21 00:37:58 +00:00
longToFloat(int64_t a)
{
return floatToBits(static_cast<float>(a));
}
uint64_t
makeBlankObjectArray(MyThread* t, object class_, int32_t length)
2007-09-30 02:48:27 +00:00
{
if (length >= 0) {
return reinterpret_cast<uint64_t>(makeObjectArray(t, class_, length));
} else {
object message = makeString(t, "%d", length);
t->exception = makeThrowable
(t, Machine::NegativeArraySizeExceptionType, message);
unwind(t);
}
2007-09-30 02:48:27 +00:00
}
uint64_t
2008-11-30 01:39:42 +00:00
makeBlankArray(MyThread* t, unsigned type, int32_t length)
2007-09-30 02:48:27 +00:00
{
if (length >= 0) {
object (*constructor)(Thread*, uintptr_t);
2008-11-30 01:39:42 +00:00
switch (type) {
case T_BOOLEAN:
constructor = makeBooleanArray;
break;
case T_CHAR:
constructor = makeCharArray;
break;
case T_FLOAT:
constructor = makeFloatArray;
break;
case T_DOUBLE:
constructor = makeDoubleArray;
break;
case T_BYTE:
constructor = makeByteArray;
break;
case T_SHORT:
constructor = makeShortArray;
break;
case T_INT:
constructor = makeIntArray;
break;
case T_LONG:
constructor = makeLongArray;
break;
default: abort(t);
}
return reinterpret_cast<uintptr_t>(constructor(t, length));
} else {
object message = makeString(t, "%d", length);
t->exception = makeThrowable
(t, Machine::NegativeArraySizeExceptionType, message);
unwind(t);
}
2007-09-30 02:48:27 +00:00
}
uint64_t
2007-12-09 22:45:43 +00:00
lookUpAddress(int32_t key, uintptr_t* start, int32_t count,
uintptr_t default_)
{
2007-12-09 22:45:43 +00:00
int32_t bottom = 0;
int32_t top = count;
for (int32_t span = top - bottom; span; span = top - bottom) {
int32_t middle = bottom + (span / 2);
uintptr_t* p = start + (middle * 2);
int32_t k = *p;
if (key < k) {
top = middle;
} else if (key > k) {
bottom = middle + 1;
} else {
return p[1];
}
}
return default_;
2007-12-09 22:45:43 +00:00
}
2008-07-05 20:21:13 +00:00
void
2007-12-30 22:24:48 +00:00
setMaybeNull(MyThread* t, object o, unsigned offset, object value)
2007-12-09 22:45:43 +00:00
{
if (LIKELY(o)) {
2007-12-30 22:24:48 +00:00
set(t, o, offset, value);
} else {
t->exception = makeThrowable(t, Machine::NullPointerExceptionType);
2007-12-30 22:24:48 +00:00
unwind(t);
}
}
2008-07-05 20:21:13 +00:00
void
2007-12-30 22:24:48 +00:00
acquireMonitorForObject(MyThread* t, object o)
{
if (LIKELY(o)) {
2007-12-30 22:24:48 +00:00
acquire(t, o);
} else {
t->exception = makeThrowable(t, Machine::NullPointerExceptionType);
2007-12-30 22:24:48 +00:00
unwind(t);
}
2007-12-09 22:45:43 +00:00
}
2008-07-05 20:21:13 +00:00
void
2007-12-30 22:24:48 +00:00
releaseMonitorForObject(MyThread* t, object o)
2007-12-09 22:45:43 +00:00
{
if (LIKELY(o)) {
2007-12-30 22:24:48 +00:00
release(t, o);
} else {
t->exception = makeThrowable(t, Machine::NullPointerExceptionType);
2007-12-30 22:24:48 +00:00
unwind(t);
}
2007-12-09 22:45:43 +00:00
}
2007-12-09 22:45:43 +00:00
object
2008-11-11 00:07:44 +00:00
makeMultidimensionalArray2(MyThread* t, object class_, uintptr_t* countStack,
2007-12-09 22:45:43 +00:00
int32_t dimensions)
{
PROTECT(t, class_);
RUNTIME_ARRAY(int32_t, counts, dimensions);
2007-12-09 22:45:43 +00:00
for (int i = dimensions - 1; i >= 0; --i) {
RUNTIME_ARRAY_BODY(counts)[i] = countStack[dimensions - i - 1];
if (UNLIKELY(RUNTIME_ARRAY_BODY(counts)[i] < 0)) {
object message = makeString(t, "%d", RUNTIME_ARRAY_BODY(counts)[i]);
t->exception = makeThrowable
(t, Machine::NegativeArraySizeExceptionType, message);
2007-12-09 22:45:43 +00:00
return 0;
}
}
object array = makeArray(t, RUNTIME_ARRAY_BODY(counts)[0]);
2007-12-09 22:45:43 +00:00
setObjectClass(t, array, class_);
PROTECT(t, array);
populateMultiArray(t, array, RUNTIME_ARRAY_BODY(counts), 0, dimensions);
2007-12-09 22:45:43 +00:00
return array;
}
uint64_t
makeMultidimensionalArray(MyThread* t, object class_, int32_t dimensions,
2008-11-11 00:07:44 +00:00
int32_t offset)
{
2008-11-11 00:07:44 +00:00
object r = makeMultidimensionalArray2
(t, class_, static_cast<uintptr_t*>(t->stack) + offset, dimensions);
if (UNLIKELY(t->exception)) {
unwind(t);
} else {
return reinterpret_cast<uintptr_t>(r);
}
}
unsigned
traceSize(Thread* t)
{
class Counter: public Processor::StackVisitor {
public:
Counter(): count(0) { }
virtual bool visit(Processor::StackWalker*) {
++ count;
return true;
}
unsigned count;
} counter;
t->m->processor->walkStack(t, &counter);
return FixedSizeOfArray + (counter.count * ArrayElementSizeOfArray)
+ (counter.count * FixedSizeOfTraceElement);
}
2008-07-05 20:21:13 +00:00
void NO_RETURN
throwArrayIndexOutOfBounds(MyThread* t)
{
if (ensure(t, FixedSizeOfArrayIndexOutOfBoundsException + traceSize(t))) {
atomicOr(&(t->flags), Thread::TracingFlag);
t->exception = makeThrowable
(t, Machine::ArrayIndexOutOfBoundsExceptionType);
atomicAnd(&(t->flags), ~Thread::TracingFlag);
} else {
// not enough memory available for a new exception and stack trace
// -- use a preallocated instance instead
t->exception = root(t, Machine::ArrayIndexOutOfBoundsException);
}
2007-12-09 22:45:43 +00:00
unwind(t);
}
void NO_RETURN
throwStackOverflow(MyThread* t)
{
t->exception = makeThrowable(t, Machine::StackOverflowErrorType);
unwind(t);
}
2008-07-05 20:21:13 +00:00
void NO_RETURN
2007-12-09 22:45:43 +00:00
throw_(MyThread* t, object o)
2007-10-09 17:15:40 +00:00
{
if (LIKELY(o)) {
2007-12-09 22:45:43 +00:00
t->exception = o;
} else {
t->exception = makeThrowable(t, Machine::NullPointerExceptionType);
}
// printTrace(t, t->exception);
2007-12-09 22:45:43 +00:00
unwind(t);
}
2008-07-05 20:21:13 +00:00
void
checkCast(MyThread* t, object class_, object o)
{
if (UNLIKELY(o and not isAssignableFrom(t, class_, objectClass(t, o)))) {
object message = makeString
(t, "%s as %s",
&byteArrayBody(t, className(t, objectClass(t, o)), 0),
&byteArrayBody(t, className(t, class_), 0));
t->exception = makeThrowable(t, Machine::ClassCastExceptionType, message);
unwind(t);
}
}
uint64_t
instanceOf64(Thread* t, object class_, object o)
{
return instanceOf(t, class_, o);
}
uint64_t
makeNewGeneral64(Thread* t, object class_)
{
return reinterpret_cast<uintptr_t>(makeNewGeneral(t, class_));
}
uint64_t
makeNew64(Thread* t, object class_)
{
return reinterpret_cast<uintptr_t>(makeNew(t, class_));
}
2010-12-03 20:42:13 +00:00
uint64_t
getJClass64(Thread* t, object class_)
{
return reinterpret_cast<uintptr_t>(getJClass(t, class_));
}
2008-07-05 20:21:13 +00:00
void
2008-04-09 19:08:13 +00:00
gcIfNecessary(MyThread* t)
{
stress(t);
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
2008-04-09 19:08:13 +00:00
collect(t, Heap::MinorCollection);
}
}
2008-02-11 17:21:41 +00:00
unsigned
resultSize(MyThread* t, unsigned code)
2007-12-09 22:45:43 +00:00
{
switch (code) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case FloatField:
2008-02-11 17:21:41 +00:00
case IntField:
return 4;
case ObjectField:
return BytesPerWord;
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
case LongField:
2008-02-11 17:21:41 +00:00
case DoubleField:
return 8;
2007-12-09 22:45:43 +00:00
case VoidField:
2008-02-11 17:21:41 +00:00
return 0;
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
default:
abort(t);
}
2007-12-09 22:45:43 +00:00
}
void
pushReturnValue(MyThread* t, Frame* frame, unsigned code,
Compiler::Operand* result)
{
switch (code) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case FloatField:
case IntField:
return frame->pushInt(result);
case ObjectField:
return frame->pushObject(result);
case LongField:
case DoubleField:
return frame->pushLong(result);
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
default:
abort(t);
}
2007-12-09 22:45:43 +00:00
}
Compiler::OperandType
operandTypeForFieldCode(Thread* t, unsigned code)
{
switch (code) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case IntField:
case LongField:
return Compiler::IntegerType;
case ObjectField:
return Compiler::ObjectType;
case FloatField:
case DoubleField:
return Compiler::FloatType;
case VoidField:
return Compiler::VoidType;
default:
abort(t);
}
}
bool
useLongJump(MyThread* t, uintptr_t target)
{
uintptr_t reach = t->arch->maximumImmediateJump();
FixedAllocator* a = codeAllocator(t);
uintptr_t start = reinterpret_cast<uintptr_t>(a->base);
uintptr_t end = reinterpret_cast<uintptr_t>(a->base) + a->capacity;
assert(t, end - start < reach);
return (target > end && (target - start) > reach)
or (target < start && (end - target) > reach);
}
Compiler::Operand*
compileDirectInvoke(MyThread* t, Frame* frame, object target, bool tailCall,
bool useThunk, unsigned rSize, Promise* addressPromise)
{
Compiler* c = frame->c;
unsigned flags = (TailCalls and tailCall ? Compiler::TailJump : 0);
unsigned traceFlags;
if (addressPromise == 0 and useLongJump(t, methodAddress(t, target))) {
flags |= Compiler::LongJumpOrCall;
traceFlags = TraceElement::LongCall;
} else {
traceFlags = 0;
}
if (useThunk
or (TailCalls and tailCall and (methodFlags(t, target) & ACC_NATIVE)))
{
flags |= Compiler::Aligned;
if (TailCalls and tailCall) {
traceFlags |= TraceElement::TailCall;
TraceElement* trace = frame->trace(target, traceFlags);
Promise* returnAddressPromise = new
(frame->context->zone.allocate(sizeof(TraceElementPromise)))
TraceElementPromise(t->m->system, trace);
Compiler::Operand* result = c->stackCall
(c->promiseConstant(returnAddressPromise, Compiler::AddressType),
flags,
2009-04-22 01:39:25 +00:00
trace,
rSize,
operandTypeForFieldCode(t, methodReturnCode(t, target)),
methodParameterFootprint(t, target));
c->store
(BytesPerWord, frame->addressOperand(returnAddressPromise),
BytesPerWord, c->memory
(c->register_(t->arch->thread()), Compiler::AddressType,
difference(&(t->tailAddress), t)));
c->exit
(c->constant
((methodFlags(t, target) & ACC_NATIVE)
? nativeThunk(t) : defaultThunk(t),
Compiler::AddressType));
return result;
} else {
return c->stackCall
(c->constant(defaultThunk(t), Compiler::AddressType),
flags,
frame->trace(target, traceFlags),
rSize,
operandTypeForFieldCode(t, methodReturnCode(t, target)),
methodParameterFootprint(t, target));
}
} else {
Compiler::Operand* address =
(addressPromise
? c->promiseConstant(addressPromise, Compiler::AddressType)
: c->constant(methodAddress(t, target), Compiler::AddressType));
return c->stackCall
(address,
flags,
tailCall ? 0 : frame->trace
((methodFlags(t, target) & ACC_NATIVE) ? target : 0, 0),
rSize,
operandTypeForFieldCode(t, methodReturnCode(t, target)),
methodParameterFootprint(t, target));
}
}
2009-04-22 01:39:25 +00:00
bool
compileDirectInvoke(MyThread* t, Frame* frame, object target, bool tailCall)
2007-12-09 22:45:43 +00:00
{
unsigned rSize = resultSize(t, methodReturnCode(t, target));
Compiler::Operand* result = 0;
2009-04-22 01:39:25 +00:00
if (emptyMethod(t, target)) {
tailCall = false;
} else {
BootContext* bc = frame->context->bootContext;
if (bc) {
if ((methodClass(t, target) == methodClass(t, frame->context->method)
or (not classNeedsInit(t, methodClass(t, target))))
and (not (TailCalls and tailCall
and (methodFlags(t, target) & ACC_NATIVE))))
{
Promise* p = new (bc->zone->allocate(sizeof(ListenPromise)))
ListenPromise(t->m->system, bc->zone);
PROTECT(t, target);
object pointer = makePointer(t, p);
bc->calls = makeTriple(t, target, pointer, bc->calls);
result = compileDirectInvoke
(t, frame, target, tailCall, false, rSize, p);
} else {
result = compileDirectInvoke
(t, frame, target, tailCall, true, rSize, 0);
}
} else if (unresolved(t, methodAddress(t, target))
or classNeedsInit(t, methodClass(t, target)))
{
result = compileDirectInvoke
(t, frame, target, tailCall, true, rSize, 0);
} else {
result = compileDirectInvoke
(t, frame, target, tailCall, false, rSize, 0);
}
2008-04-09 19:08:13 +00:00
}
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
frame->pop(methodParameterFootprint(t, target));
2007-09-25 23:53:11 +00:00
if (rSize) {
pushReturnValue(t, frame, methodReturnCode(t, target), result);
2008-02-11 17:21:41 +00:00
}
2009-04-22 01:39:25 +00:00
return tailCall;
2007-12-09 22:45:43 +00:00
}
void
handleMonitorEvent(MyThread* t, Frame* frame, intptr_t function)
{
Compiler* c = frame->c;
object method = frame->context->method;
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
2008-02-11 17:21:41 +00:00
Compiler::Operand* lock;
if (methodFlags(t, method) & ACC_STATIC) {
PROTECT(t, method);
lock = frame->append(methodClass(t, method));
} else {
2009-05-03 20:57:11 +00:00
lock = loadLocal(frame->context, 1, savedTargetIndex(t, method));
}
c->call(c->constant(function, Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
0,
Compiler::VoidType,
2, c->register_(t->arch->thread()), lock);
}
}
void
handleEntrance(MyThread* t, Frame* frame)
{
object method = frame->context->method;
if ((methodFlags(t, method) & (ACC_SYNCHRONIZED | ACC_STATIC))
== ACC_SYNCHRONIZED)
{
// save 'this' pointer in case it is overwritten.
unsigned index = savedTargetIndex(t, method);
2009-05-03 20:57:11 +00:00
storeLocal(frame->context, 1, loadLocal(frame->context, 1, 0), index);
2008-02-11 17:21:41 +00:00
frame->set(index, Frame::Object);
}
handleMonitorEvent
(t, frame, getThunk(t, acquireMonitorForObjectThunk));
}
void
handleExit(MyThread* t, Frame* frame)
{
handleMonitorEvent
(t, frame, getThunk(t, releaseMonitorForObjectThunk));
}
bool
inTryBlock(MyThread* t, object code, unsigned ip)
{
object table = codeExceptionHandlerTable(t, code);
if (table) {
unsigned length = exceptionHandlerTableLength(t, table);
for (unsigned i = 0; i < length; ++i) {
ExceptionHandler* eh = exceptionHandlerTableBody(t, table, i);
if (ip >= exceptionHandlerStart(eh)
and ip < exceptionHandlerEnd(eh))
{
return true;
}
}
}
return false;
}
bool
needsReturnBarrier(MyThread* t, object method)
{
return (methodFlags(t, method) & ConstructorFlag)
and (classVmFlags(t, methodClass(t, method)) & HasFinalMemberFlag);
}
bool
returnsNext(MyThread* t, object code, unsigned ip)
{
switch (codeBody(t, code, ip)) {
case return_:
case areturn:
case ireturn:
case freturn:
case lreturn:
case dreturn:
return true;
case goto_: {
uint32_t offset = codeReadInt16(t, code, ++ip);
uint32_t newIp = (ip - 3) + offset;
assert(t, newIp < codeLength(t, code));
return returnsNext(t, code, newIp);
}
case goto_w: {
uint32_t offset = codeReadInt32(t, code, ++ip);
uint32_t newIp = (ip - 5) + offset;
assert(t, newIp < codeLength(t, code));
return returnsNext(t, code, newIp);
}
default:
return false;
}
}
bool
isTailCall(MyThread* t, object code, unsigned ip, object caller, object callee)
{
return TailCalls
and ((methodFlags(t, caller) & ACC_SYNCHRONIZED) == 0)
and (not inTryBlock(t, code, ip - 1))
and (not needsReturnBarrier(t, caller))
and (methodReturnCode(t, caller) == VoidField
or methodReturnCode(t, caller) == methodReturnCode(t, callee))
and returnsNext(t, code, ip);
}
2008-09-20 23:42:46 +00:00
void
compile(MyThread* t, Frame* initialFrame, unsigned ip,
2008-09-25 00:48:32 +00:00
int exceptionHandlerStart = -1);
2008-09-20 23:42:46 +00:00
void
2008-09-25 00:48:32 +00:00
saveStateAndCompile(MyThread* t, Frame* initialFrame, unsigned ip)
2008-09-20 23:42:46 +00:00
{
2008-09-22 14:28:18 +00:00
Compiler::State* state = initialFrame->c->saveState();
compile(t, initialFrame, ip);
2008-09-22 14:28:18 +00:00
initialFrame->c->restoreState(state);
2008-09-20 23:42:46 +00:00
}
bool
integerBranch(MyThread* t, Frame* frame, object code, unsigned& ip,
unsigned size, Compiler::Operand* a, Compiler::Operand* b)
2009-10-07 00:50:32 +00:00
{
if (ip + 3 > codeLength(t, code)) {
return false;
}
Compiler* c = frame->c;
unsigned instruction = codeBody(t, code, ip++);
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
assert(t, newIp < codeLength(t, code));
Compiler::Operand* target = frame->machineIp(newIp);
switch (instruction) {
case ifeq:
c->jumpIfEqual(size, a, b, target);
break;
2009-10-07 00:50:32 +00:00
case ifne:
c->jumpIfNotEqual(size, a, b, target);
break;
2009-10-07 00:50:32 +00:00
case ifgt:
c->jumpIfGreater(size, a, b, target);
break;
2009-10-07 00:50:32 +00:00
case ifge:
c->jumpIfGreaterOrEqual(size, a, b, target);
break;
2009-10-07 00:50:32 +00:00
case iflt:
c->jumpIfLess(size, a, b, target);
break;
2009-10-07 00:50:32 +00:00
case ifle:
c->jumpIfLessOrEqual(size, a, b, target);
break;
2009-10-07 00:50:32 +00:00
default:
ip -= 3;
return false;
}
saveStateAndCompile(t, frame, newIp);
return t->exception == 0;
2009-10-07 00:50:32 +00:00
}
bool
floatBranch(MyThread* t, Frame* frame, object code, unsigned& ip,
unsigned size, bool lessIfUnordered, Compiler::Operand* a,
Compiler::Operand* b)
{
2009-10-07 00:50:32 +00:00
if (ip + 3 > codeLength(t, code)) {
return false;
}
Compiler* c = frame->c;
unsigned instruction = codeBody(t, code, ip++);
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
assert(t, newIp < codeLength(t, code));
Compiler::Operand* target = frame->machineIp(newIp);
switch (instruction) {
case ifeq:
2009-10-07 00:50:32 +00:00
c->jumpIfFloatEqual(size, a, b, target);
break;
2009-10-07 00:50:32 +00:00
case ifne:
2009-10-07 00:50:32 +00:00
c->jumpIfFloatNotEqual(size, a, b, target);
break;
2009-10-07 00:50:32 +00:00
case ifgt:
2009-10-07 00:50:32 +00:00
if (lessIfUnordered) {
c->jumpIfFloatGreater(size, a, b, target);
} else {
c->jumpIfFloatGreaterOrUnordered(size, a, b, target);
}
break;
2009-10-07 00:50:32 +00:00
case ifge:
2009-10-07 00:50:32 +00:00
if (lessIfUnordered) {
c->jumpIfFloatGreaterOrEqual(size, a, b, target);
} else {
c->jumpIfFloatGreaterOrEqualOrUnordered(size, a, b, target);
}
break;
2009-10-07 00:50:32 +00:00
case iflt:
2009-10-07 00:50:32 +00:00
if (lessIfUnordered) {
c->jumpIfFloatLessOrUnordered(size, a, b, target);
} else {
c->jumpIfFloatLess(size, a, b, target);
}
break;
2009-10-07 00:50:32 +00:00
case ifle:
if (lessIfUnordered) {
c->jumpIfFloatLessOrEqualOrUnordered(size, a, b, target);
} else {
c->jumpIfFloatLessOrEqual(size, a, b, target);
}
break;
2009-10-07 00:50:32 +00:00
default:
2009-10-07 00:50:32 +00:00
ip -= 3;
return false;
}
saveStateAndCompile(t, frame, newIp);
return t->exception == 0;
}
bool
intrinsic(MyThread* t, Frame* frame, object target)
{
#define MATCH(name, constant) \
2009-11-28 04:01:27 +00:00
(byteArrayLength(t, name) == sizeof(constant) \
2009-12-02 15:49:10 +00:00
and ::strcmp(reinterpret_cast<char*>(&byteArrayBody(t, name, 0)), \
constant) == 0)
object className = vm::className(t, methodClass(t, target));
if (UNLIKELY(MATCH(className, "java/lang/Math"))) {
Compiler* c = frame->c;
if (MATCH(methodName(t, target), "sqrt")
and MATCH(methodSpec(t, target), "(D)D"))
{
frame->pushLong(c->fsqrt(8, frame->popLong()));
return true;
} else if (MATCH(methodName(t, target), "abs")) {
if (MATCH(methodSpec(t, target), "(I)I")) {
frame->pushInt(c->abs(4, frame->popInt()));
return true;
} else if (MATCH(methodSpec(t, target), "(J)J")) {
frame->pushLong(c->abs(8, frame->popLong()));
return true;
} else if (MATCH(methodSpec(t, target), "(F)F")) {
frame->pushInt(c->fabs(4, frame->popInt()));
return true;
}
}
}
return false;
}
2007-12-09 22:45:43 +00:00
void
2008-09-25 00:48:32 +00:00
compile(MyThread* t, Frame* initialFrame, unsigned ip,
int exceptionHandlerStart)
2007-12-09 22:45:43 +00:00
{
RUNTIME_ARRAY(uint8_t, stackMap,
codeMaxStack(t, methodCode(t, initialFrame->context->method)));
Frame myFrame(initialFrame, RUNTIME_ARRAY_BODY(stackMap));
2007-12-09 22:45:43 +00:00
Frame* frame = &myFrame;
Compiler* c = frame->c;
Context* context = frame->context;
2007-09-25 23:53:11 +00:00
object code = methodCode(t, context->method);
2007-12-09 22:45:43 +00:00
PROTECT(t, code);
2007-12-09 22:45:43 +00:00
while (ip < codeLength(t, code)) {
2008-01-07 14:51:07 +00:00
if (context->visitTable[ip] ++) {
2007-12-09 22:45:43 +00:00
// we've already visited this part of the code
2008-04-20 05:23:08 +00:00
frame->visitLogicalIp(ip);
2007-12-09 22:45:43 +00:00
return;
2007-09-30 04:07:22 +00:00
}
2007-12-09 22:45:43 +00:00
frame->startLogicalIp(ip);
if (exceptionHandlerStart >= 0) {
2008-09-25 00:48:32 +00:00
c->initLocalsFromLogicalIp(exceptionHandlerStart);
exceptionHandlerStart = -1;
2008-04-19 07:03:59 +00:00
frame->pushObject();
2008-04-09 19:08:13 +00:00
2008-04-17 22:07:32 +00:00
c->call
(c->constant(getThunk(t, gcIfNecessaryThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-04-17 22:07:32 +00:00
0,
Compiler::VoidType,
1, c->register_(t->arch->thread()));
2008-04-09 19:08:13 +00:00
}
2008-01-07 14:51:07 +00:00
// fprintf(stderr, "ip: %d map: %ld\n", ip, *(frame->map));
2007-12-09 22:45:43 +00:00
unsigned instruction = codeBody(t, code, ip++);
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
switch (instruction) {
case aaload:
case baload:
case caload:
case daload:
case faload:
case iaload:
case laload:
case saload: {
2008-04-19 20:41:31 +00:00
Compiler::Operand* index = frame->popInt();
Compiler::Operand* array = frame->popObject();
if (inTryBlock(t, code, ip - 1)) {
c->saveLocals();
frame->trace(0, 0);
}
if (CheckArrayBounds) {
2008-11-23 23:58:01 +00:00
c->checkBounds(array, ArrayLength, index, aioobThunk(t));
}
2007-09-25 23:53:11 +00:00
switch (instruction) {
case aaload:
frame->pushObject
(c->load
(BytesPerWord, BytesPerWord, c->memory
(array, Compiler::ObjectType, ArrayBody, index, BytesPerWord),
BytesPerWord));
break;
case faload:
frame->pushInt
(c->load
(4, 4, c->memory
(array, Compiler::FloatType, ArrayBody, index, 4), BytesPerWord));
break;
2009-11-30 15:08:45 +00:00
case iaload:
frame->pushInt
(c->load
(4, 4, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 4),
BytesPerWord));
break;
case baload:
frame->pushInt
(c->load
(1, 1, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 1),
BytesPerWord));
break;
case caload:
frame->pushInt
(c->loadz
(2, 2, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 2),
BytesPerWord));
break;
case daload:
frame->pushLong
(c->load
(8, 8, c->memory
(array, Compiler::FloatType, ArrayBody, index, 8), 8));
break;
case laload:
frame->pushLong
(c->load
(8, 8, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 8), 8));
break;
case saload:
frame->pushInt
(c->load
(2, 2, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 2),
BytesPerWord));
break;
2007-12-09 22:45:43 +00:00
}
} break;
2007-12-09 22:45:43 +00:00
case aastore:
case bastore:
case castore:
case dastore:
case fastore:
case iastore:
case lastore:
case sastore: {
Compiler::Operand* value;
2007-12-09 22:45:43 +00:00
if (instruction == dastore or instruction == lastore) {
value = frame->popLong();
} else if (instruction == aastore) {
value = frame->popObject();
} else {
value = frame->popInt();
}
2007-09-30 02:48:27 +00:00
Compiler::Operand* index = frame->popInt();
Compiler::Operand* array = frame->popObject();
if (inTryBlock(t, code, ip - 1)) {
c->saveLocals();
frame->trace(0, 0);
}
if (CheckArrayBounds) {
2008-11-23 23:58:01 +00:00
c->checkBounds(array, ArrayLength, index, aioobThunk(t));
}
switch (instruction) {
case aastore: {
c->call
(c->constant(getThunk(t, setMaybeNullThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
0,
Compiler::VoidType,
4, c->register_(t->arch->thread()), array,
c->add
(4, c->constant(ArrayBody, Compiler::IntegerType),
c->shl
(4, c->constant(log(BytesPerWord), Compiler::IntegerType), index)),
value);
} break;
case fastore:
c->store
(BytesPerWord, value, 4, c->memory
(array, Compiler::FloatType, ArrayBody, index, 4));
break;
case iastore:
c->store
(BytesPerWord, value, 4, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 4));
break;
case bastore:
c->store
(BytesPerWord, value, 1, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 1));
break;
case castore:
case sastore:
c->store
(BytesPerWord, value, 2, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 2));
break;
case dastore:
c->store
(8, value, 8, c->memory
(array, Compiler::FloatType, ArrayBody, index, 8));
break;
case lastore:
c->store
(8, value, 8, c->memory
(array, Compiler::IntegerType, ArrayBody, index, 8));
break;
2007-12-09 22:45:43 +00:00
}
} break;
2007-09-30 15:52:21 +00:00
2007-12-09 22:45:43 +00:00
case aconst_null:
frame->pushObject(c->constant(0, Compiler::ObjectType));
2007-12-09 22:45:43 +00:00
break;
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
case aload:
frame->loadObject(codeBody(t, code, ip++));
break;
2007-12-09 22:45:43 +00:00
case aload_0:
frame->loadObject(0);
break;
2007-10-08 23:13:55 +00:00
2007-12-09 22:45:43 +00:00
case aload_1:
frame->loadObject(1);
break;
2007-12-09 22:45:43 +00:00
case aload_2:
frame->loadObject(2);
break;
2007-12-09 22:45:43 +00:00
case aload_3:
frame->loadObject(3);
break;
2007-12-09 22:45:43 +00:00
case anewarray: {
uint16_t index = codeReadInt16(t, code, ip);
object class_ = resolveClassInPool(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
Compiler::Operand* length = frame->popInt();
2007-10-08 23:13:55 +00:00
2008-02-11 17:21:41 +00:00
frame->pushObject
(c->call
(c->constant
(getThunk(t, makeBlankObjectArrayThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
BytesPerWord,
Compiler::ObjectType,
3, c->register_(t->arch->thread()), frame->append(class_), length));
2007-12-09 22:45:43 +00:00
} break;
case areturn: {
handleExit(t, frame);
c->return_(BytesPerWord, frame->popObject());
} return;
case arraylength: {
2008-02-11 17:21:41 +00:00
frame->pushInt
(c->load
(BytesPerWord, BytesPerWord,
c->memory
(frame->popObject(), Compiler::IntegerType, ArrayLength, 0, 1),
BytesPerWord));
} break;
2007-10-04 03:19:39 +00:00
2007-12-09 22:45:43 +00:00
case astore:
frame->storeObjectOrAddress(codeBody(t, code, ip++));
2007-12-09 22:45:43 +00:00
break;
2007-10-10 21:34:04 +00:00
2007-12-09 22:45:43 +00:00
case astore_0:
frame->storeObjectOrAddress(0);
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case astore_1:
frame->storeObjectOrAddress(1);
2007-12-09 22:45:43 +00:00
break;
2007-10-10 21:34:04 +00:00
2007-12-09 22:45:43 +00:00
case astore_2:
frame->storeObjectOrAddress(2);
2007-12-09 22:45:43 +00:00
break;
2007-10-10 21:34:04 +00:00
2007-12-09 22:45:43 +00:00
case astore_3:
frame->storeObjectOrAddress(3);
2007-12-09 22:45:43 +00:00
break;
2007-10-10 21:34:04 +00:00
case athrow: {
Compiler::Operand* target = frame->popObject();
2008-02-11 17:21:41 +00:00
c->call
(c->constant(getThunk(t, throw_Thunk), Compiler::AddressType),
Compiler::NoReturn,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
0,
Compiler::VoidType,
2, c->register_(t->arch->thread()), target);
} return;
2007-12-09 22:45:43 +00:00
case bipush:
frame->pushInt
(c->constant
(static_cast<int8_t>(codeBody(t, code, ip++)),
Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case checkcast: {
uint16_t index = codeReadInt16(t, code, ip);
object class_ = resolveClassInPool(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
2008-11-02 22:25:51 +00:00
Compiler::Operand* instance = c->peek(1, 0);
2008-02-11 17:21:41 +00:00
c->call
(c->constant(getThunk(t, checkCastThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
0,
Compiler::VoidType,
3, c->register_(t->arch->thread()), frame->append(class_), instance);
2007-12-09 22:45:43 +00:00
} break;
2007-10-10 21:34:04 +00:00
2007-12-09 22:45:43 +00:00
case d2f: {
frame->pushInt(c->f2f(8, 4, frame->popLong()));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case d2i: {
frame->pushInt(c->f2i(8, 4, frame->popLong()));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case d2l: {
frame->pushLong(c->f2i(8, 8, frame->popLong()));
2007-12-09 22:45:43 +00:00
} break;
2007-10-10 21:34:04 +00:00
2007-12-09 22:45:43 +00:00
case dadd: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->fadd(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-10-10 21:34:04 +00:00
2007-12-09 22:45:43 +00:00
case dcmpg: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
if (not floatBranch(t, frame, code, ip, 8, false, a, b)) {
if (UNLIKELY(t->exception)) return;
frame->pushInt
(c->call
(c->constant
(getThunk(t, compareDoublesGThunk), Compiler::AddressType),
0, 0, 4, Compiler::IntegerType, 4,
static_cast<Compiler::Operand*>(0), a,
static_cast<Compiler::Operand*>(0), b));
}
2007-12-09 22:45:43 +00:00
} break;
2007-10-10 21:34:04 +00:00
2007-12-09 22:45:43 +00:00
case dcmpl: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
if (not floatBranch(t, frame, code, ip, 8, true, a, b)) {
if (UNLIKELY(t->exception)) return;
frame->pushInt
(c->call
(c->constant
(getThunk(t, compareDoublesLThunk), Compiler::AddressType),
0, 0, 4, Compiler::IntegerType, 4,
static_cast<Compiler::Operand*>(0), a,
static_cast<Compiler::Operand*>(0), b));
}
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case dconst_0:
frame->pushLong(c->constant(doubleToBits(0.0), Compiler::FloatType));
2007-12-09 22:45:43 +00:00
break;
case dconst_1:
frame->pushLong(c->constant(doubleToBits(1.0), Compiler::FloatType));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case ddiv: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->fdiv(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case dmul: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->fmul(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case dneg: {
frame->pushLong(c->fneg(8, frame->popLong()));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case vm::drem: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->frem(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case dsub: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->fsub(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case dup:
frame->dup();
break;
2007-12-09 22:45:43 +00:00
case dup_x1:
frame->dupX1();
break;
2007-12-09 22:45:43 +00:00
case dup_x2:
frame->dupX2();
break;
2007-12-09 22:45:43 +00:00
case dup2:
frame->dup2();
break;
2007-12-09 22:45:43 +00:00
case dup2_x1:
frame->dup2X1();
break;
2007-12-09 22:45:43 +00:00
case dup2_x2:
frame->dup2X2();
break;
2007-10-09 17:15:40 +00:00
2007-12-09 22:45:43 +00:00
case f2d: {
frame->pushLong(c->f2f(4, 8, frame->popInt()));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case f2i: {
frame->pushInt(c->f2i(4, 4, frame->popInt()));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case f2l: {
frame->pushLong(c->f2i(4, 8, frame->popInt()));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case fadd: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->fadd(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case fcmpg: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
if (not floatBranch(t, frame, code, ip, 4, false, a, b)) {
if (UNLIKELY(t->exception)) return;
frame->pushInt
(c->call
(c->constant
(getThunk(t, compareFloatsGThunk), Compiler::AddressType),
0, 0, 4, Compiler::IntegerType, 2, a, b));
}
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case fcmpl: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
if (not floatBranch(t, frame, code, ip, 4, true, a, b)) {
if (UNLIKELY(t->exception)) return;
frame->pushInt
(c->call
(c->constant
(getThunk(t, compareFloatsLThunk), Compiler::AddressType),
0, 0, 4, Compiler::IntegerType, 2, a, b));
}
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case fconst_0:
frame->pushInt(c->constant(floatToBits(0.0), Compiler::FloatType));
2007-12-09 22:45:43 +00:00
break;
case fconst_1:
frame->pushInt(c->constant(floatToBits(1.0), Compiler::FloatType));
2007-12-09 22:45:43 +00:00
break;
case fconst_2:
frame->pushInt(c->constant(floatToBits(2.0), Compiler::FloatType));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case fdiv: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->fdiv(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case fmul: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->fmul(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case fneg: {
frame->pushInt(c->fneg(4, frame->popInt()));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case vm::frem: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->frem(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case fsub: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->fsub(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case getfield:
case getstatic: {
uint16_t index = codeReadInt16(t, code, ip);
object field = resolveField(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
if ((fieldFlags(t, field) & ACC_VOLATILE)
and BytesPerWord == 4
and (fieldCode(t, field) == DoubleField
or fieldCode(t, field) == LongField))
{
PROTECT(t, field);
c->call
(c->constant
(getThunk(t, acquireMonitorForObjectThunk), Compiler::AddressType),
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
c->register_(t->arch->thread()),
frame->append(field));
}
2008-02-11 17:21:41 +00:00
Compiler::Operand* table;
2007-12-09 22:45:43 +00:00
if (instruction == getstatic) {
assert(t, fieldFlags(t, field) & ACC_STATIC);
PROTECT(t, field);
if (fieldClass(t, field) != methodClass(t, context->method)
2008-12-02 02:38:00 +00:00
and classNeedsInit(t, fieldClass(t, field)))
{
2008-03-16 19:38:43 +00:00
c->call
(c->constant
(getThunk(t, tryInitClassThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-03-16 19:38:43 +00:00
0,
Compiler::VoidType,
2, c->register_(t->arch->thread()),
frame->append(fieldClass(t, field)));
}
2007-12-09 22:45:43 +00:00
table = frame->append(classStaticTable(t, fieldClass(t, field)));
} else {
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
2007-12-09 22:45:43 +00:00
table = frame->popObject();
if (inTryBlock(t, code, ip - 3)) {
c->saveLocals();
frame->trace(0, 0);
}
2007-12-09 22:45:43 +00:00
}
2007-12-09 22:45:43 +00:00
switch (fieldCode(t, field)) {
case ByteField:
case BooleanField:
2008-02-11 17:21:41 +00:00
frame->pushInt
(c->load
(1, 1, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1),
BytesPerWord));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case CharField:
2008-02-11 17:21:41 +00:00
frame->pushInt
(c->loadz
(2, 2, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1),
BytesPerWord));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case ShortField:
2008-02-11 17:21:41 +00:00
frame->pushInt
(c->load
(2, 2, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1),
BytesPerWord));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case FloatField:
frame->pushInt
(c->load
(4, 4, c->memory
(table, Compiler::FloatType, fieldOffset(t, field), 0, 1),
BytesPerWord));
break;
2007-12-09 22:45:43 +00:00
case IntField:
2008-02-11 17:21:41 +00:00
frame->pushInt
(c->load
(4, 4, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1),
BytesPerWord));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case DoubleField:
frame->pushLong
(c->load
(8, 8, c->memory
(table, Compiler::FloatType, fieldOffset(t, field), 0, 1), 8));
break;
2007-12-09 22:45:43 +00:00
case LongField:
2007-12-30 22:24:48 +00:00
frame->pushLong
(c->load
(8, 8, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1), 8));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case ObjectField:
2007-12-30 22:24:48 +00:00
frame->pushObject
(c->load
(BytesPerWord, BytesPerWord,
c->memory
(table, Compiler::ObjectType, fieldOffset(t, field), 0, 1),
BytesPerWord));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
default:
abort(t);
}
2009-03-03 03:18:15 +00:00
if (fieldFlags(t, field) & ACC_VOLATILE) {
if (BytesPerWord == 4
and (fieldCode(t, field) == DoubleField
or fieldCode(t, field) == LongField))
{
c->call
(c->constant
(getThunk(t, releaseMonitorForObjectThunk),
Compiler::AddressType),
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
c->register_(t->arch->thread()),
frame->append(field));
} else {
c->loadBarrier();
}
2009-03-03 03:18:15 +00:00
}
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case goto_: {
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
2007-12-09 22:45:43 +00:00
assert(t, newIp < codeLength(t, code));
2007-12-16 00:24:15 +00:00
c->jmp(frame->machineIp(newIp));
2007-12-09 22:45:43 +00:00
ip = newIp;
} break;
2007-12-09 22:45:43 +00:00
case goto_w: {
uint32_t offset = codeReadInt32(t, code, ip);
uint32_t newIp = (ip - 5) + offset;
2007-12-09 22:45:43 +00:00
assert(t, newIp < codeLength(t, code));
2007-12-16 00:24:15 +00:00
c->jmp(frame->machineIp(newIp));
2007-12-09 22:45:43 +00:00
ip = newIp;
} break;
2007-12-09 22:45:43 +00:00
case i2b: {
frame->pushInt(c->load(BytesPerWord, 1, frame->popInt(), BytesPerWord));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case i2c: {
frame->pushInt(c->loadz(BytesPerWord, 2, frame->popInt(), BytesPerWord));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case i2d: {
frame->pushLong(c->i2f(4, 8, frame->popInt()));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case i2f: {
frame->pushInt(c->i2f(4, 4, frame->popInt()));
2007-12-09 22:45:43 +00:00
} break;
case i2l:
frame->pushLong(c->load(BytesPerWord, 4, frame->popInt(), 8));
break;
2007-10-04 03:19:39 +00:00
2007-12-09 22:45:43 +00:00
case i2s: {
frame->pushInt(c->load(BytesPerWord, 2, frame->popInt(), BytesPerWord));
2007-12-09 22:45:43 +00:00
} break;
case iadd: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->add(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
case iand: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->and_(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-10-04 00:41:54 +00:00
2007-12-09 22:45:43 +00:00
case iconst_m1:
frame->pushInt(c->constant(-1, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-10-04 00:41:54 +00:00
2007-12-09 22:45:43 +00:00
case iconst_0:
frame->pushInt(c->constant(0, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-10-04 00:41:54 +00:00
2007-12-09 22:45:43 +00:00
case iconst_1:
frame->pushInt(c->constant(1, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case iconst_2:
frame->pushInt(c->constant(2, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case iconst_3:
frame->pushInt(c->constant(3, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
case iconst_4:
frame->pushInt(c->constant(4, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
case iconst_5:
frame->pushInt(c->constant(5, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case idiv: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
if (inTryBlock(t, code, ip - 1)) {
c->saveLocals();
frame->trace(0, 0);
}
frame->pushInt(c->div(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case if_acmpeq:
case if_acmpne: {
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
2007-12-09 22:45:43 +00:00
assert(t, newIp < codeLength(t, code));
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popObject();
Compiler::Operand* b = frame->popObject();
Compiler::Operand* target = frame->machineIp(newIp);
2007-12-09 22:45:43 +00:00
if (instruction == if_acmpeq) {
c->jumpIfEqual(BytesPerWord, a, b, target);
2007-12-09 22:45:43 +00:00
} else {
c->jumpIfNotEqual(BytesPerWord, a, b, target);
2007-12-09 22:45:43 +00:00
}
2008-09-20 23:42:46 +00:00
saveStateAndCompile(t, frame, newIp);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
} break;
2007-12-09 22:45:43 +00:00
case if_icmpeq:
case if_icmpne:
case if_icmpgt:
case if_icmpge:
case if_icmplt:
case if_icmple: {
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
2007-12-09 22:45:43 +00:00
assert(t, newIp < codeLength(t, code));
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
Compiler::Operand* target = frame->machineIp(newIp);
2007-12-09 22:45:43 +00:00
switch (instruction) {
case if_icmpeq:
2009-10-07 00:50:32 +00:00
c->jumpIfEqual(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case if_icmpne:
2009-10-07 00:50:32 +00:00
c->jumpIfNotEqual(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case if_icmpgt:
2009-10-07 00:50:32 +00:00
c->jumpIfGreater(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case if_icmpge:
2009-10-07 00:50:32 +00:00
c->jumpIfGreaterOrEqual(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case if_icmplt:
2009-10-07 00:50:32 +00:00
c->jumpIfLess(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case if_icmple:
2009-10-07 00:50:32 +00:00
c->jumpIfLessOrEqual(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
2009-10-07 00:50:32 +00:00
default:
abort(t);
2007-12-09 22:45:43 +00:00
}
2008-09-20 23:42:46 +00:00
saveStateAndCompile(t, frame, newIp);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
} break;
2007-10-03 00:22:48 +00:00
2007-12-09 22:45:43 +00:00
case ifeq:
case ifne:
case ifgt:
case ifge:
case iflt:
case ifle: {
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
2007-12-09 22:45:43 +00:00
assert(t, newIp < codeLength(t, code));
2007-10-03 00:22:48 +00:00
2008-02-11 17:21:41 +00:00
Compiler::Operand* target = frame->machineIp(newIp);
2009-10-07 00:50:32 +00:00
Compiler::Operand* a = c->constant(0, Compiler::IntegerType);
Compiler::Operand* b = frame->popInt();
2007-12-09 22:45:43 +00:00
switch (instruction) {
case ifeq:
2009-10-07 00:50:32 +00:00
c->jumpIfEqual(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case ifne:
2009-10-07 00:50:32 +00:00
c->jumpIfNotEqual(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case ifgt:
2009-10-07 00:50:32 +00:00
c->jumpIfGreater(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case ifge:
2009-10-07 00:50:32 +00:00
c->jumpIfGreaterOrEqual(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case iflt:
2009-10-07 00:50:32 +00:00
c->jumpIfLess(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
case ifle:
2009-10-07 00:50:32 +00:00
c->jumpIfLessOrEqual(4, a, b, target);
2007-12-09 22:45:43 +00:00
break;
2009-10-07 00:50:32 +00:00
default:
abort(t);
2007-12-09 22:45:43 +00:00
}
2008-09-20 23:42:46 +00:00
saveStateAndCompile(t, frame, newIp);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
} break;
2007-12-09 22:45:43 +00:00
case ifnull:
case ifnonnull: {
uint32_t offset = codeReadInt16(t, code, ip);
uint32_t newIp = (ip - 3) + offset;
2007-12-09 22:45:43 +00:00
assert(t, newIp < codeLength(t, code));
2009-10-07 00:50:32 +00:00
Compiler::Operand* a = c->constant(0, Compiler::ObjectType);
Compiler::Operand* b = frame->popObject();
2008-02-11 17:21:41 +00:00
Compiler::Operand* target = frame->machineIp(newIp);
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
if (instruction == ifnull) {
c->jumpIfEqual(BytesPerWord, a, b, target);
2007-12-09 22:45:43 +00:00
} else {
c->jumpIfNotEqual(BytesPerWord, a, b, target);
2007-12-09 22:45:43 +00:00
}
2008-09-20 23:42:46 +00:00
saveStateAndCompile(t, frame, newIp);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
} break;
2007-09-29 21:08:29 +00:00
2007-12-09 22:45:43 +00:00
case iinc: {
uint8_t index = codeBody(t, code, ip++);
int8_t count = codeBody(t, code, ip++);
2007-09-29 21:08:29 +00:00
2009-05-15 02:08:01 +00:00
storeLocal
(context, 1,
c->add
(4, c->constant(count, Compiler::IntegerType),
loadLocal(context, 1, index)),
2009-05-03 20:57:11 +00:00
index);
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case iload:
case fload:
frame->loadInt(codeBody(t, code, ip++));
break;
2007-12-09 22:45:43 +00:00
case iload_0:
case fload_0:
frame->loadInt(0);
break;
2007-12-09 22:45:43 +00:00
case iload_1:
case fload_1:
frame->loadInt(1);
break;
2007-12-09 22:45:43 +00:00
case iload_2:
case fload_2:
frame->loadInt(2);
break;
2007-10-03 00:22:48 +00:00
2007-12-09 22:45:43 +00:00
case iload_3:
case fload_3:
frame->loadInt(3);
break;
2007-10-03 00:22:48 +00:00
2007-12-09 22:45:43 +00:00
case imul: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->mul(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
case ineg: {
frame->pushInt(c->neg(4, frame->popInt()));
} break;
2007-12-09 22:45:43 +00:00
case instanceof: {
uint16_t index = codeReadInt16(t, code, ip);
2007-09-30 04:07:22 +00:00
object class_ = resolveClassInPool(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
2007-09-30 04:07:22 +00:00
2008-02-11 17:21:41 +00:00
frame->pushInt
(c->call
(c->constant(getThunk(t, instanceOf64Thunk), Compiler::AddressType),
0, 0, 4, Compiler::IntegerType,
3, c->register_(t->arch->thread()), frame->append(class_),
frame->popObject()));
2007-12-09 22:45:43 +00:00
} break;
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
case invokeinterface: {
context->leaf = false;
2007-12-09 22:45:43 +00:00
uint16_t index = codeReadInt16(t, code, ip);
ip += 2;
2007-09-30 02:48:27 +00:00
object target = resolveMethod(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
2007-09-30 02:48:27 +00:00
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
2007-12-09 22:45:43 +00:00
unsigned parameterFootprint = methodParameterFootprint(t, target);
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
unsigned instance = parameterFootprint - 1;
int returnCode = methodReturnCode(t, target);
unsigned rSize = resultSize(t, returnCode);
2008-07-05 20:21:13 +00:00
Compiler::Operand* result = c->stackCall
2008-02-11 17:21:41 +00:00
(c->call
(c->constant
(getThunk(t, findInterfaceMethodFromInstanceThunk),
Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
BytesPerWord,
Compiler::AddressType,
3, c->register_(t->arch->thread()), frame->append(target),
2008-11-02 22:25:51 +00:00
c->peek(1, instance)),
2008-02-11 17:21:41 +00:00
0,
frame->trace(0, 0),
rSize,
operandTypeForFieldCode(t, returnCode),
2008-07-05 20:21:13 +00:00
parameterFootprint);
2007-09-30 15:52:21 +00:00
2007-12-09 22:45:43 +00:00
frame->pop(parameterFootprint);
if (rSize) {
pushReturnValue(t, frame, returnCode, result);
2008-02-11 17:21:41 +00:00
}
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case invokespecial: {
context->leaf = false;
2007-12-09 22:45:43 +00:00
uint16_t index = codeReadInt16(t, code, ip);
object target = resolveMethod(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
object class_ = methodClass(t, context->method);
2007-12-09 22:45:43 +00:00
if (isSpecialMethod(t, target, class_)) {
target = findVirtualMethod(t, target, classSuper(t, class_));
2007-12-09 22:45:43 +00:00
}
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
bool tailCall = isTailCall(t, code, ip, context->method, target);
compileDirectInvoke(t, frame, target, tailCall);
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case invokestatic: {
context->leaf = false;
2007-12-09 22:45:43 +00:00
uint16_t index = codeReadInt16(t, code, ip);
2007-09-30 02:48:27 +00:00
object target = resolveMethod(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
2007-09-29 21:08:29 +00:00
assert(t, methodFlags(t, target) & ACC_STATIC);
if (not intrinsic(t, frame, target)) {
bool tailCall = isTailCall(t, code, ip, context->method, target);
compileDirectInvoke(t, frame, target, tailCall);
}
2007-12-09 22:45:43 +00:00
} break;
2007-09-29 21:08:29 +00:00
2007-12-09 22:45:43 +00:00
case invokevirtual: {
context->leaf = false;
2007-12-09 22:45:43 +00:00
uint16_t index = codeReadInt16(t, code, ip);
2007-09-29 21:08:29 +00:00
object target = resolveMethod(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
2007-09-29 21:08:29 +00:00
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
2007-12-09 22:45:43 +00:00
unsigned parameterFootprint = methodParameterFootprint(t, target);
2007-12-09 22:45:43 +00:00
unsigned offset = ClassVtable + (methodOffset(t, target) * BytesPerWord);
2007-09-29 21:08:29 +00:00
2008-11-02 22:25:51 +00:00
Compiler::Operand* instance = c->peek(1, parameterFootprint - 1);
2007-09-29 21:08:29 +00:00
unsigned rSize = resultSize(t, methodReturnCode(t, target));
bool tailCall = isTailCall(t, code, ip, context->method, target);
Compiler::Operand* result = c->stackCall
2009-05-03 20:57:11 +00:00
(c->memory
(c->and_
(BytesPerWord, c->constant(PointerMask, Compiler::IntegerType),
c->memory(instance, Compiler::ObjectType, 0, 0, 1)),
Compiler::ObjectType, offset, 0, 1),
2009-04-22 01:39:25 +00:00
tailCall ? Compiler::TailJump : 0,
frame->trace(0, 0),
rSize,
operandTypeForFieldCode(t, methodReturnCode(t, target)),
parameterFootprint);
2007-12-09 22:45:43 +00:00
frame->pop(parameterFootprint);
if (rSize) {
pushReturnValue(t, frame, methodReturnCode(t, target), result);
2008-02-11 17:21:41 +00:00
}
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case ior: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->or_(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case irem: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
if (inTryBlock(t, code, ip - 1)) {
c->saveLocals();
frame->trace(0, 0);
}
frame->pushInt(c->rem(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case ireturn:
case freturn: {
handleExit(t, frame);
c->return_(4, frame->popInt());
} return;
2007-12-09 22:45:43 +00:00
case ishl: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->shl(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case ishr: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->shr(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case istore:
case fstore:
frame->storeInt(codeBody(t, code, ip++));
break;
2007-12-09 22:45:43 +00:00
case istore_0:
case fstore_0:
frame->storeInt(0);
break;
2007-12-09 22:45:43 +00:00
case istore_1:
case fstore_1:
frame->storeInt(1);
break;
2007-12-09 22:45:43 +00:00
case istore_2:
case fstore_2:
frame->storeInt(2);
break;
2007-12-09 22:45:43 +00:00
case istore_3:
case fstore_3:
frame->storeInt(3);
break;
2007-12-09 22:45:43 +00:00
case isub: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->sub(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case iushr: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->ushr(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case ixor: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popInt();
Compiler::Operand* b = frame->popInt();
frame->pushInt(c->xor_(4, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case jsr:
case jsr_w: {
uint32_t thisIp;
uint32_t newIp;
if (instruction == jsr) {
uint32_t offset = codeReadInt16(t, code, ip);
thisIp = ip - 3;
newIp = thisIp + offset;
} else {
uint32_t offset = codeReadInt32(t, code, ip);
thisIp = ip - 5;
newIp = thisIp + offset;
}
assert(t, newIp < codeLength(t, code));
unsigned start = frame->startSubroutine(newIp, c->machineIp(ip));
c->jmp(frame->machineIp(newIp));
2009-07-08 14:18:40 +00:00
saveStateAndCompile(t, frame, newIp);
if (UNLIKELY(t->exception)) return;
frame->endSubroutine(start);
2007-12-16 21:30:19 +00:00
} break;
2008-03-21 00:37:58 +00:00
case l2d: {
frame->pushLong(c->i2f(8, 8, frame->popLong()));
2008-03-21 00:37:58 +00:00
} break;
case l2f: {
frame->pushInt(c->i2f(8, 4, frame->popLong()));
2008-03-21 00:37:58 +00:00
} break;
case l2i:
frame->pushInt(c->load(8, 8, frame->popLong(), BytesPerWord));
break;
2007-12-09 22:45:43 +00:00
case ladd: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->add(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
case land: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->and_(8, a, b));
} break;
2007-12-09 22:45:43 +00:00
case lcmp: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
if (not integerBranch(t, frame, code, ip, 8, a, b)) {
if (UNLIKELY(t->exception)) return;
2009-10-07 00:50:32 +00:00
frame->pushInt
(c->call
(c->constant
(getThunk(t, compareLongsThunk), Compiler::AddressType),
0, 0, 4, Compiler::IntegerType, 4,
static_cast<Compiler::Operand*>(0), a,
static_cast<Compiler::Operand*>(0), b));
}
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lconst_0:
frame->pushLong(c->constant(0, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case lconst_1:
frame->pushLong(c->constant(1, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
case ldc:
case ldc_w: {
uint16_t index;
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
if (instruction == ldc) {
index = codeBody(t, code, ip++);
} else {
index = codeReadInt16(t, code, ip);
}
2007-09-30 15:52:21 +00:00
2007-12-09 22:45:43 +00:00
object pool = codePool(t, code);
2007-09-30 15:52:21 +00:00
2007-12-09 22:45:43 +00:00
if (singletonIsObject(t, pool, index - 1)) {
object v = singletonObject(t, pool, index - 1);
if (objectClass(t, v) == type(t, Machine::ReferenceType)) {
v = resolveClassInPool(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
}
2007-09-30 15:52:21 +00:00
if (objectClass(t, v) == type(t, Machine::ClassType)) {
frame->pushObject
(c->call
(c->constant
2010-12-03 20:42:13 +00:00
(getThunk(t, getJClass64Thunk), Compiler::AddressType),
0,
frame->trace(0, 0),
BytesPerWord,
Compiler::ObjectType,
2, c->register_(t->arch->thread()), frame->append(v)));
} else {
2007-12-09 22:45:43 +00:00
frame->pushObject(frame->append(v));
}
2007-12-09 22:45:43 +00:00
} else {
frame->pushInt
(c->constant
(singletonValue(t, pool, index - 1),
singletonBit(t, pool, poolSize(t, pool), index - 1)
? Compiler::FloatType : Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
}
} break;
2007-10-04 22:41:19 +00:00
2007-12-09 22:45:43 +00:00
case ldc2_w: {
uint16_t index = codeReadInt16(t, code, ip);
2007-12-09 22:45:43 +00:00
object pool = codePool(t, code);
2007-12-09 22:45:43 +00:00
uint64_t v;
memcpy(&v, &singletonValue(t, pool, index - 1), 8);
frame->pushLong
(c->constant
(v, singletonBit(t, pool, poolSize(t, pool), index - 1)
? Compiler::FloatType : Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case ldiv_: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
if (inTryBlock(t, code, ip - 1)) {
c->saveLocals();
frame->trace(0, 0);
}
frame->pushLong(c->div(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lload:
case dload:
frame->loadLong(codeBody(t, code, ip++));
break;
2007-12-09 22:45:43 +00:00
case lload_0:
case dload_0:
frame->loadLong(0);
break;
2007-12-09 22:45:43 +00:00
case lload_1:
case dload_1:
frame->loadLong(1);
break;
2007-12-09 22:45:43 +00:00
case lload_2:
case dload_2:
frame->loadLong(2);
break;
2007-12-09 22:45:43 +00:00
case lload_3:
case dload_3:
frame->loadLong(3);
break;
2007-12-09 22:45:43 +00:00
case lmul: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->mul(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lneg:
frame->pushLong(c->neg(8, frame->popLong()));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case lookupswitch: {
int32_t base = ip - 1;
2007-12-09 22:45:43 +00:00
ip = (ip + 3) & ~3; // pad to four byte boundary
2008-02-11 17:21:41 +00:00
Compiler::Operand* key = frame->popInt();
2007-12-09 22:45:43 +00:00
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
assert(t, defaultIp < codeLength(t, code));
Compiler::Operand* default_ = frame->addressOperand
(c->machineIp(defaultIp));
2007-12-09 22:45:43 +00:00
int32_t pairCount = codeReadInt32(t, code, ip);
if (pairCount) {
Compiler::Operand* start = 0;
RUNTIME_ARRAY(uint32_t, ipTable, pairCount);
for (int32_t i = 0; i < pairCount; ++i) {
unsigned index = ip + (i * 8);
int32_t key = codeReadInt32(t, code, index);
uint32_t newIp = base + codeReadInt32(t, code, index);
assert(t, newIp < codeLength(t, code));
RUNTIME_ARRAY_BODY(ipTable)[i] = newIp;
Promise* p = c->poolAppend(key);
if (i == 0) {
start = frame->addressOperand(p);
}
c->poolAppendPromise(frame->addressPromise(c->machineIp(newIp)));
2007-12-09 22:45:43 +00:00
}
assert(t, start);
c->jmp
(c->call
(c->constant
(getThunk(t, lookUpAddressThunk), Compiler::AddressType),
0, 0, BytesPerWord, Compiler::AddressType,
4, key, start, c->constant(pairCount, Compiler::IntegerType),
default_));
2007-12-16 00:24:15 +00:00
Compiler::State* state = c->saveState();
for (int32_t i = 0; i < pairCount; ++i) {
compile(t, frame, RUNTIME_ARRAY_BODY(ipTable)[i]);
if (UNLIKELY(t->exception)) return;
c->restoreState(state);
}
} else {
// a switch statement with no cases, apparently
c->jmp(default_);
2007-12-16 00:24:15 +00:00
}
2008-01-07 14:51:07 +00:00
ip = defaultIp;
} break;
2007-12-09 22:45:43 +00:00
case lor: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->or_(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lrem: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
if (inTryBlock(t, code, ip - 1)) {
c->saveLocals();
frame->trace(0, 0);
}
frame->pushLong(c->rem(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lreturn:
case dreturn: {
handleExit(t, frame);
c->return_(8, frame->popLong());
} return;
2007-12-09 22:45:43 +00:00
case lshl: {
Compiler::Operand* a = frame->popInt();
2008-02-11 17:21:41 +00:00
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->shl(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lshr: {
Compiler::Operand* a = frame->popInt();
2008-02-11 17:21:41 +00:00
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->shr(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lstore:
case dstore:
frame->storeLong(codeBody(t, code, ip++));
break;
2007-12-09 22:45:43 +00:00
case lstore_0:
case dstore_0:
frame->storeLong(0);
break;
2007-12-09 22:45:43 +00:00
case lstore_1:
case dstore_1:
frame->storeLong(1);
break;
case lstore_2:
case dstore_2:
frame->storeLong(2);
break;
case lstore_3:
case dstore_3:
frame->storeLong(3);
break;
2007-12-09 22:45:43 +00:00
case lsub: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->sub(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lushr: {
Compiler::Operand* a = frame->popInt();
2008-02-11 17:21:41 +00:00
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->ushr(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case lxor: {
2008-02-11 17:21:41 +00:00
Compiler::Operand* a = frame->popLong();
Compiler::Operand* b = frame->popLong();
frame->pushLong(c->xor_(8, a, b));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case monitorenter: {
Compiler::Operand* target = frame->popObject();
2008-02-11 17:21:41 +00:00
c->call
(c->constant
(getThunk(t, acquireMonitorForObjectThunk), Compiler::AddressType),
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
c->register_(t->arch->thread()), target);
2007-12-09 22:45:43 +00:00
} break;
2007-09-29 21:08:29 +00:00
2007-12-09 22:45:43 +00:00
case monitorexit: {
Compiler::Operand* target = frame->popObject();
2008-02-11 17:21:41 +00:00
c->call
(c->constant
(getThunk(t, releaseMonitorForObjectThunk), Compiler::AddressType),
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
c->register_(t->arch->thread()), target);
2007-12-09 22:45:43 +00:00
} break;
2007-09-29 21:08:29 +00:00
2007-12-09 22:45:43 +00:00
case multianewarray: {
uint16_t index = codeReadInt16(t, code, ip);
uint8_t dimensions = codeBody(t, code, ip++);
object class_ = resolveClassInPool(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
PROTECT(t, class_);
unsigned offset
2009-05-17 23:43:48 +00:00
= localOffset
(t, localSize(t, context->method) + c->topOfStack(), context->method)
+ t->arch->frameReturnAddressSize();
2008-11-11 00:07:44 +00:00
2008-02-11 17:21:41 +00:00
Compiler::Operand* result = c->call
(c->constant
(getThunk(t, makeMultidimensionalArrayThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
BytesPerWord,
Compiler::ObjectType,
4, c->register_(t->arch->thread()), frame->append(class_),
c->constant(dimensions, Compiler::IntegerType),
c->constant(offset, Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
frame->pop(dimensions);
frame->pushObject(result);
} break;
2007-10-12 00:30:46 +00:00
2007-12-09 22:45:43 +00:00
case new_: {
uint16_t index = codeReadInt16(t, code, ip);
object class_ = resolveClassInPool(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
if (classVmFlags(t, class_) & (WeakReferenceFlag | HasFinalizerFlag)) {
2008-02-11 17:21:41 +00:00
frame->pushObject
(c->call
(c->constant
(getThunk(t, makeNewGeneral64Thunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
BytesPerWord,
Compiler::ObjectType,
2, c->register_(t->arch->thread()), frame->append(class_)));
2007-12-09 22:45:43 +00:00
} else {
2008-02-11 17:21:41 +00:00
frame->pushObject
(c->call
(c->constant(getThunk(t, makeNew64Thunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
BytesPerWord,
Compiler::ObjectType,
2, c->register_(t->arch->thread()), frame->append(class_)));
2007-12-09 22:45:43 +00:00
}
} break;
2007-12-09 22:45:43 +00:00
case newarray: {
uint8_t type = codeBody(t, code, ip++);
Compiler::Operand* length = frame->popInt();
2008-02-11 17:21:41 +00:00
frame->pushObject
(c->call
(c->constant(getThunk(t, makeBlankArrayThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
BytesPerWord,
Compiler::ObjectType,
3, c->register_(t->arch->thread()),
c->constant(type, Compiler::IntegerType), length));
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case nop: break;
2007-12-09 22:45:43 +00:00
case pop_:
frame->pop(1);
break;
2007-12-09 22:45:43 +00:00
case pop2:
frame->pop(2);
break;
2007-12-09 22:45:43 +00:00
case putfield:
case putstatic: {
uint16_t index = codeReadInt16(t, code, ip);
object field = resolveField(t, context->method, index - 1);
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) return;
object staticTable = 0;
2007-12-09 22:45:43 +00:00
if (instruction == putstatic) {
assert(t, fieldFlags(t, field) & ACC_STATIC);
if (fieldClass(t, field) != methodClass(t, context->method)
and classNeedsInit(t, fieldClass(t, field)))
{
PROTECT(t, field);
2008-03-16 19:38:43 +00:00
c->call
(c->constant
(getThunk(t, tryInitClassThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-03-16 19:38:43 +00:00
0,
Compiler::VoidType,
2, c->register_(t->arch->thread()),
frame->append(fieldClass(t, field)));
}
2007-12-09 22:45:43 +00:00
staticTable = classStaticTable(t, fieldClass(t, field));
} else {
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
if (inTryBlock(t, code, ip - 3)) {
c->saveLocals();
frame->trace(0, 0);
}
2007-12-09 22:45:43 +00:00
}
2007-10-12 22:06:33 +00:00
if (fieldFlags(t, field) & ACC_VOLATILE) {
if (BytesPerWord == 4
and (fieldCode(t, field) == DoubleField
or fieldCode(t, field) == LongField))
{
PROTECT(t, field);
c->call
(c->constant
(getThunk(t, acquireMonitorForObjectThunk),
Compiler::AddressType),
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
c->register_(t->arch->thread()), frame->append(field));
} else {
c->storeStoreBarrier();
}
}
2008-02-11 17:21:41 +00:00
Compiler::Operand* value;
2007-12-09 22:45:43 +00:00
switch (fieldCode(t, field)) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case FloatField:
case IntField: {
value = frame->popInt();
} break;
2007-12-09 22:45:43 +00:00
case DoubleField:
case LongField: {
value = frame->popLong();
} break;
2007-12-09 22:45:43 +00:00
case ObjectField: {
value = frame->popObject();
} break;
2007-12-09 22:45:43 +00:00
default: abort(t);
}
2008-02-11 17:21:41 +00:00
Compiler::Operand* table;
2007-12-09 22:45:43 +00:00
if (instruction == putstatic) {
PROTECT(t, field);
2007-12-09 22:45:43 +00:00
table = frame->append(staticTable);
} else {
table = frame->popObject();
}
2007-12-09 22:45:43 +00:00
switch (fieldCode(t, field)) {
case ByteField:
case BooleanField:
c->store
(BytesPerWord, value, 1, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1));
break;
2007-12-09 22:45:43 +00:00
case CharField:
case ShortField:
c->store
(BytesPerWord, value, 2, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1));
break;
2007-12-09 22:45:43 +00:00
case FloatField:
c->store
(BytesPerWord, value, 4, c->memory
(table, Compiler::FloatType, fieldOffset(t, field), 0, 1));
break;
2007-12-09 22:45:43 +00:00
case IntField:
c->store
(BytesPerWord, value, 4, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1));
break;
2007-12-09 22:45:43 +00:00
case DoubleField:
c->store
(8, value, 8, c->memory
(table, Compiler::FloatType, fieldOffset(t, field), 0, 1));
break;
2007-12-09 22:45:43 +00:00
case LongField:
c->store
(8, value, 8, c->memory
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1));
break;
2007-12-09 22:45:43 +00:00
case ObjectField:
if (instruction == putfield) {
2008-02-11 17:21:41 +00:00
c->call
(c->constant
(getThunk(t, setMaybeNullThunk), Compiler::AddressType),
0,
frame->trace(0, 0),
2008-02-11 17:21:41 +00:00
0,
Compiler::VoidType,
4, c->register_(t->arch->thread()), table,
c->constant(fieldOffset(t, field), Compiler::IntegerType), value);
2007-12-30 22:24:48 +00:00
} else {
2008-02-11 17:21:41 +00:00
c->call
(c->constant(getThunk(t, setThunk), Compiler::AddressType),
0, 0, 0, Compiler::VoidType,
4, c->register_(t->arch->thread()), table,
c->constant(fieldOffset(t, field), Compiler::IntegerType), value);
2007-12-30 22:24:48 +00:00
}
break;
2007-12-09 22:45:43 +00:00
default: abort(t);
}
2009-03-03 03:18:15 +00:00
if (fieldFlags(t, field) & ACC_VOLATILE) {
if (BytesPerWord == 4
and (fieldCode(t, field) == DoubleField
or fieldCode(t, field) == LongField))
{
c->call
(c->constant
(getThunk(t, releaseMonitorForObjectThunk),
Compiler::AddressType),
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
c->register_(t->arch->thread()), frame->append(field));
} else {
c->storeLoadBarrier();
}
2009-03-03 03:18:15 +00:00
}
2007-12-09 22:45:43 +00:00
} break;
case ret: {
unsigned index = codeBody(t, code, ip);
frame->returnFromSubroutine(index);
} return;
2007-12-09 22:45:43 +00:00
case return_:
if (needsReturnBarrier(t, context->method)) {
c->storeStoreBarrier();
}
handleExit(t, frame);
c->return_(0, 0);
2007-12-09 22:45:43 +00:00
return;
2007-12-09 22:45:43 +00:00
case sipush:
frame->pushInt
(c->constant
(static_cast<int16_t>(codeReadInt16(t, code, ip)),
Compiler::IntegerType));
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case swap:
frame->swap();
break;
2007-12-09 22:45:43 +00:00
case tableswitch: {
int32_t base = ip - 1;
2007-12-09 22:45:43 +00:00
ip = (ip + 3) & ~3; // pad to four byte boundary
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
assert(t, defaultIp < codeLength(t, code));
int32_t bottom = codeReadInt32(t, code, ip);
int32_t top = codeReadInt32(t, code, ip);
2008-02-11 17:21:41 +00:00
Compiler::Operand* start = 0;
RUNTIME_ARRAY(uint32_t, ipTable, top - bottom + 1);
2007-12-16 00:24:15 +00:00
for (int32_t i = 0; i < top - bottom + 1; ++i) {
2007-12-09 22:45:43 +00:00
unsigned index = ip + (i * 4);
uint32_t newIp = base + codeReadInt32(t, code, index);
assert(t, newIp < codeLength(t, code));
RUNTIME_ARRAY_BODY(ipTable)[i] = newIp;
2007-12-16 00:24:15 +00:00
Promise* p = c->poolAppendPromise
(frame->addressPromise(c->machineIp(newIp)));
2007-12-09 22:45:43 +00:00
if (i == 0) {
start = frame->addressOperand(p);
}
2007-12-09 22:45:43 +00:00
}
2007-12-16 00:24:15 +00:00
assert(t, start);
Compiler::Operand* key = frame->popInt();
2007-12-09 22:45:43 +00:00
2009-10-07 00:50:32 +00:00
c->jumpIfLess(4, c->constant(bottom, Compiler::IntegerType), key,
frame->machineIp(defaultIp));
2008-09-20 23:42:46 +00:00
c->save(1, key);
2008-09-20 23:42:46 +00:00
saveStateAndCompile(t, frame, defaultIp);
2007-10-04 22:41:19 +00:00
2009-10-07 00:50:32 +00:00
c->jumpIfGreater(4, c->constant(top, Compiler::IntegerType), key,
frame->machineIp(defaultIp));
2008-09-20 23:42:46 +00:00
c->save(1, key);
2008-09-20 23:42:46 +00:00
saveStateAndCompile(t, frame, defaultIp);
2007-10-04 22:41:19 +00:00
Compiler::Operand* normalizedKey
= (bottom
? c->sub(4, c->constant(bottom, Compiler::IntegerType), key) : key);
c->jmp
(c->load
(BytesPerWord, BytesPerWord, c->memory
(start, Compiler::AddressType, 0, normalizedKey, BytesPerWord),
BytesPerWord));
2007-12-09 22:45:43 +00:00
Compiler::State* state = c->saveState();
2007-12-16 00:24:15 +00:00
for (int32_t i = 0; i < top - bottom + 1; ++i) {
compile(t, frame, RUNTIME_ARRAY_BODY(ipTable)[i]);
2007-12-16 00:24:15 +00:00
if (UNLIKELY(t->exception)) return;
c->restoreState(state);
2007-12-16 00:24:15 +00:00
}
2008-01-07 14:51:07 +00:00
ip = defaultIp;
} break;
2007-12-09 22:45:43 +00:00
case wide: {
switch (codeBody(t, code, ip++)) {
case aload: {
frame->loadObject(codeReadInt16(t, code, ip));
2007-10-04 22:41:19 +00:00
} break;
2007-12-09 22:45:43 +00:00
case astore: {
frame->storeObject(codeReadInt16(t, code, ip));
} break;
2007-10-04 22:41:19 +00:00
2007-12-09 22:45:43 +00:00
case iinc: {
uint16_t index = codeReadInt16(t, code, ip);
int16_t count = codeReadInt16(t, code, ip);
2007-10-04 22:41:19 +00:00
2009-05-15 02:08:01 +00:00
storeLocal
(context, 1,
c->add
(4, c->constant(count, Compiler::IntegerType),
loadLocal(context, 1, index)),
2009-05-03 20:57:11 +00:00
index);
2007-12-09 22:45:43 +00:00
} break;
2007-10-04 22:41:19 +00:00
2007-12-09 22:45:43 +00:00
case iload: {
frame->loadInt(codeReadInt16(t, code, ip));
} break;
2007-10-04 22:41:19 +00:00
2007-12-09 22:45:43 +00:00
case istore: {
frame->storeInt(codeReadInt16(t, code, ip));
} break;
2007-10-04 22:41:19 +00:00
2007-12-09 22:45:43 +00:00
case lload: {
frame->loadLong(codeReadInt16(t, code, ip));
} break;
2007-10-04 22:41:19 +00:00
2007-12-09 22:45:43 +00:00
case lstore: {
frame->storeLong(codeReadInt16(t, code, ip));
} break;
2007-10-12 14:26:36 +00:00
case ret: {
unsigned index = codeReadInt16(t, code, ip);
c->jmp(loadLocal(context, 1, index));
frame->returnFromSubroutine(index);
} return;
2007-10-04 22:41:19 +00:00
2007-12-09 22:45:43 +00:00
default: abort(t);
}
} break;
default: abort(t);
2007-12-09 22:45:43 +00:00
}
}
}
2007-10-04 22:41:19 +00:00
FILE* compileLog = 0;
void
logCompile(MyThread* t, const void* code, unsigned size, const char* class_,
const char* name, const char* spec)
{
static bool open = false;
if (not open) {
open = true;
const char* path = findProperty(t, "avian.jit.log");
2008-11-11 16:17:11 +00:00
if (path) {
compileLog = vm::fopen(path, "wb");
} else if (DebugCompile) {
compileLog = stderr;
}
}
if (compileLog) {
fprintf(compileLog, "%p %p %s.%s%s\n",
code, static_cast<const uint8_t*>(code) + size,
class_, name, spec);
}
}
object
translateExceptionHandlerTable(MyThread* t, Compiler* c, object method,
intptr_t start)
2008-01-07 14:51:07 +00:00
{
object oldTable = codeExceptionHandlerTable(t, methodCode(t, method));
2008-01-07 14:51:07 +00:00
if (oldTable) {
PROTECT(t, method);
2008-01-07 14:51:07 +00:00
PROTECT(t, oldTable);
unsigned length = exceptionHandlerTableLength(t, oldTable);
object newIndex = makeIntArray(t, length * 3);
PROTECT(t, newIndex);
object newTable = makeArray(t, length + 1);
PROTECT(t, newTable);
set(t, newTable, ArrayBody, newIndex);
2008-01-07 14:51:07 +00:00
for (unsigned i = 0; i < length; ++i) {
ExceptionHandler* oldHandler = exceptionHandlerTableBody
(t, oldTable, i);
intArrayBody(t, newIndex, i * 3)
2008-02-11 17:21:41 +00:00
= c->machineIp(exceptionHandlerStart(oldHandler))->value() - start;
2008-01-07 14:51:07 +00:00
intArrayBody(t, newIndex, (i * 3) + 1)
2008-02-11 17:21:41 +00:00
= c->machineIp(exceptionHandlerEnd(oldHandler))->value() - start;
2008-01-07 14:51:07 +00:00
intArrayBody(t, newIndex, (i * 3) + 2)
2008-02-11 17:21:41 +00:00
= c->machineIp(exceptionHandlerIp(oldHandler))->value() - start;
2008-01-07 14:51:07 +00:00
object type;
if (exceptionHandlerCatchType(oldHandler)) {
type = resolveClassInPool
(t, method, exceptionHandlerCatchType(oldHandler) - 1);
if (UNLIKELY(t->exception)) return 0;
} else {
type = 0;
}
set(t, newTable, ArrayBody + ((i + 1) * BytesPerWord), type);
2008-01-07 14:51:07 +00:00
}
return newTable;
} else {
return 0;
2008-01-07 14:51:07 +00:00
}
}
object
translateLineNumberTable(MyThread* t, Compiler* c, object code, intptr_t start)
2008-01-07 14:51:07 +00:00
{
object oldTable = codeLineNumberTable(t, code);
if (oldTable) {
PROTECT(t, code);
PROTECT(t, oldTable);
unsigned length = lineNumberTableLength(t, oldTable);
object newTable = makeLineNumberTable(t, length);
2008-01-07 14:51:07 +00:00
for (unsigned i = 0; i < length; ++i) {
LineNumber* oldLine = lineNumberTableBody(t, oldTable, i);
LineNumber* newLine = lineNumberTableBody(t, newTable, i);
lineNumberIp(newLine)
2008-02-11 17:21:41 +00:00
= c->machineIp(lineNumberIp(oldLine))->value() - start;
2008-01-07 14:51:07 +00:00
lineNumberLine(newLine) = lineNumberLine(oldLine);
}
return newTable;
} else {
return 0;
2008-01-07 14:51:07 +00:00
}
}
void
printSet(uintptr_t m, unsigned limit)
{
if (limit) {
for (unsigned i = 0; i < 16; ++i) {
if ((m >> i) & 1) {
fprintf(stderr, "1");
} else {
fprintf(stderr, "_");
}
}
}
}
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
void
calculateTryCatchRoots(Context* context, SubroutinePath* subroutinePath,
uintptr_t* roots, unsigned mapSize, unsigned start,
unsigned end)
{
memset(roots, 0xFF, mapSize * BytesPerWord);
if (DebugFrameMaps) {
fprintf(stderr, "calculate try/catch roots from %d to %d", start, end);
if (subroutinePath) {
fprintf(stderr, " ");
print(subroutinePath);
}
fprintf(stderr, "\n");
}
for (TraceElement* te = context->traceLog; te; te = te->next) {
if (te->ip >= start and te->ip < end) {
uintptr_t* traceRoots = 0;
if (subroutinePath == 0) {
traceRoots = te->map;
te->watch = true;
} else {
for (SubroutineTrace* t = te->subroutineTrace; t; t = t->next) {
if (t->path == subroutinePath) {
traceRoots = t->map;
t->watch = true;
break;
}
}
}
if (traceRoots) {
if (DebugFrameMaps) {
fprintf(stderr, " use roots at ip %3d: ", te->ip);
printSet(*traceRoots, mapSize);
fprintf(stderr, "\n");
}
for (unsigned wi = 0; wi < mapSize; ++wi) {
roots[wi] &= traceRoots[wi];
}
} else {
if (DebugFrameMaps) {
fprintf(stderr, " skip roots at ip %3d\n", te->ip);
}
}
}
}
if (DebugFrameMaps) {
fprintf(stderr, "result roots : ");
printSet(*roots, mapSize);
fprintf(stderr, "\n");
}
}
unsigned
calculateFrameMaps(MyThread* t, Context* context, uintptr_t* originalRoots,
unsigned eventIndex, SubroutinePath* subroutinePath = 0)
2008-01-07 14:51:07 +00:00
{
// for each instruction with more than one predecessor, and for each
// stack position, determine if there exists a path to that
// instruction such that there is not an object pointer left at that
// stack position (i.e. it is uninitialized or contains primitive
// data).
2008-01-07 14:51:07 +00:00
unsigned mapSize = frameMapSizeInWords(t, context->method);
RUNTIME_ARRAY(uintptr_t, roots, mapSize);
if (originalRoots) {
memcpy(RUNTIME_ARRAY_BODY(roots), originalRoots, mapSize * BytesPerWord);
} else {
memset(RUNTIME_ARRAY_BODY(roots), 0, mapSize * BytesPerWord);
}
2008-01-07 14:51:07 +00:00
int32_t ip = -1;
// invariant: for each stack position, roots contains a zero at that
// position if there exists some path to the current instruction
// such that there is definitely not an object pointer at that
// position. Otherwise, roots contains a one at that position,
// meaning either all known paths result in an object pointer at
// that position, or the contents of that position are as yet
// unknown.
unsigned length = context->eventLog.length();
while (eventIndex < length) {
Event e = static_cast<Event>(context->eventLog.get(eventIndex++));
2008-01-07 14:51:07 +00:00
switch (e) {
2008-07-05 20:21:13 +00:00
case PushContextEvent: {
2009-07-08 14:18:40 +00:00
eventIndex = calculateFrameMaps
(t, context, RUNTIME_ARRAY_BODY(roots), eventIndex, subroutinePath);
2008-01-07 14:51:07 +00:00
} break;
2008-07-05 20:21:13 +00:00
case PopContextEvent:
return eventIndex;
2008-01-07 14:51:07 +00:00
case IpEvent: {
ip = context->eventLog.get2(eventIndex);
eventIndex += 2;
2008-01-07 14:51:07 +00:00
if (DebugFrameMaps) {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
fprintf(stderr, " roots at ip %3d: ", ip);
printSet(*RUNTIME_ARRAY_BODY(roots), mapSize);
fprintf(stderr, "\n");
}
2009-07-08 14:18:40 +00:00
uintptr_t* tableRoots
= (subroutinePath ? subroutinePath->rootTable : context->rootTable)
+ (ip * mapSize);
2008-01-07 14:51:07 +00:00
if (context->visitTable[ip] > 1) {
for (unsigned wi = 0; wi < mapSize; ++wi) {
uintptr_t newRoots = tableRoots[wi] & RUNTIME_ARRAY_BODY(roots)[wi];
if ((eventIndex == length
2008-07-05 20:21:13 +00:00
or context->eventLog.get(eventIndex) == PopContextEvent)
and newRoots != tableRoots[wi])
{
if (DebugFrameMaps) {
fprintf(stderr, "dirty roots!\n");
}
context->dirtyRoots = true;
}
tableRoots[wi] = newRoots;
RUNTIME_ARRAY_BODY(roots)[wi] &= tableRoots[wi];
2008-01-07 14:51:07 +00:00
}
if (DebugFrameMaps) {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
fprintf(stderr, " table roots at ip %3d: ", ip);
printSet(*tableRoots, mapSize);
fprintf(stderr, "\n");
}
} else {
memcpy(tableRoots, RUNTIME_ARRAY_BODY(roots), mapSize * BytesPerWord);
2008-01-07 14:51:07 +00:00
}
} break;
case MarkEvent: {
unsigned i = context->eventLog.get2(eventIndex);
eventIndex += 2;
2008-01-07 14:51:07 +00:00
markBit(RUNTIME_ARRAY_BODY(roots), i);
2008-01-07 14:51:07 +00:00
} break;
case ClearEvent: {
unsigned i = context->eventLog.get2(eventIndex);
eventIndex += 2;
clearBit(RUNTIME_ARRAY_BODY(roots), i);
2008-01-07 14:51:07 +00:00
} break;
case PushExceptionHandlerEvent: {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
unsigned start = context->eventLog.get2(eventIndex);
eventIndex += 2;
unsigned end = context->eventLog.get2(eventIndex);
eventIndex += 2;
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
if (context->subroutineTable and context->subroutineTable[start]) {
Subroutine* s = context->subroutineTable[start];
unsigned originalEventIndex = eventIndex;
for (SubroutineCall* c = s->calls; c; c = c->next) {
for (SubroutinePath* p = c->paths; p; p = p->listNext) {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
calculateTryCatchRoots
(context, p, RUNTIME_ARRAY_BODY(roots), mapSize, start, end);
eventIndex = calculateFrameMaps
(t, context, RUNTIME_ARRAY_BODY(roots), originalEventIndex, p);
}
}
} else {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
calculateTryCatchRoots
(context, 0, RUNTIME_ARRAY_BODY(roots), mapSize, start, end);
eventIndex = calculateFrameMaps
(t, context, RUNTIME_ARRAY_BODY(roots), eventIndex, 0);
}
} break;
2008-01-07 14:51:07 +00:00
case TraceEvent: {
TraceElement* te; context->eventLog.get(eventIndex, &te, BytesPerWord);
if (DebugFrameMaps) {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
fprintf(stderr, " trace roots at ip %3d: ", ip);
printSet(*RUNTIME_ARRAY_BODY(roots), mapSize);
if (subroutinePath) {
fprintf(stderr, " ");
print(subroutinePath);
}
fprintf(stderr, "\n");
}
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
uintptr_t* map;
bool watch;
if (subroutinePath == 0) {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
map = te->map;
watch = te->watch;
} else {
SubroutineTrace* trace = 0;
for (SubroutineTrace* t = te->subroutineTrace; t; t = t->next) {
if (t->path == subroutinePath) {
trace = t;
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
break;
}
}
if (trace == 0) {
te->subroutineTrace = trace = new
(context->zone.allocate
(sizeof(SubroutineTrace) + (mapSize * BytesPerWord)))
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
SubroutineTrace(subroutinePath, te->subroutineTrace, mapSize);
++ te->subroutineTraceCount;
}
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
map = trace->map;
watch = trace->watch;
}
for (unsigned wi = 0; wi < mapSize; ++wi) {
uintptr_t v = RUNTIME_ARRAY_BODY(roots)[wi];
if (watch and map[wi] != v) {
if (DebugFrameMaps) {
fprintf(stderr, "dirty roots due to trace watch!\n");
}
context->dirtyRoots = true;
}
map[wi] = v;
}
eventIndex += BytesPerWord;
2008-01-07 14:51:07 +00:00
} break;
case PushSubroutineEvent: {
SubroutineCall* call;
context->eventLog.get(eventIndex, &call, BytesPerWord);
eventIndex += BytesPerWord;
2009-07-08 14:18:40 +00:00
unsigned nextIndex = context->eventLog.get2(eventIndex);
2009-07-08 14:18:40 +00:00
eventIndex = nextIndex;
SubroutinePath* path = 0;
for (SubroutinePath* p = call->paths; p; p = p->listNext) {
if (p->stackNext == subroutinePath) {
path = p;
break;
}
}
if (path == 0) {
path = new (context->zone.allocate(sizeof(SubroutinePath)))
SubroutinePath(call, subroutinePath,
makeRootTable(t, &(context->zone), context->method));
}
calculateFrameMaps
(t, context, RUNTIME_ARRAY_BODY(roots), call->subroutine->logIndex,
path);
} break;
case PopSubroutineEvent:
return static_cast<unsigned>(-1);
2008-01-07 14:51:07 +00:00
default: abort(t);
}
}
return eventIndex;
2008-01-07 14:51:07 +00:00
}
int
compareTraceElementPointers(const void* va, const void* vb)
{
TraceElement* a = *static_cast<TraceElement* const*>(va);
TraceElement* b = *static_cast<TraceElement* const*>(vb);
if (a->address->value() > b->address->value()) {
return 1;
} else if (a->address->value() < b->address->value()) {
return -1;
} else {
return 0;
}
}
2008-04-11 21:00:18 +00:00
unsigned
simpleFrameMapTableSize(MyThread* t, object method, object map)
2008-04-11 21:00:18 +00:00
{
int size = frameMapSizeInBits(t, method);
2008-04-11 21:00:18 +00:00
return ceiling(intArrayLength(t, map) * size, 32 + size);
}
unsigned
codeSingletonSizeInBytes(MyThread*, unsigned codeSizeInBytes)
2007-12-09 22:45:43 +00:00
{
unsigned count = ceiling(codeSizeInBytes, BytesPerWord);
unsigned size = count + singletonMaskSize(count);
return pad(SingletonBody + (size * BytesPerWord));
}
2007-12-18 02:09:32 +00:00
2008-11-23 23:58:01 +00:00
uint8_t*
finish(MyThread* t, Allocator* allocator, Assembler* a, const char* name,
unsigned length)
2008-02-11 17:21:41 +00:00
{
uint8_t* start = static_cast<uint8_t*>(allocator->allocate(pad(length)));
2007-10-04 22:41:19 +00:00
2008-02-11 17:21:41 +00:00
a->writeTo(start);
2007-10-04 22:41:19 +00:00
logCompile(t, start, length, 0, name, 0);
2007-10-04 22:41:19 +00:00
2008-11-23 23:58:01 +00:00
return start;
2008-02-11 17:21:41 +00:00
}
2007-10-12 14:26:36 +00:00
void
setBit(int32_t* dst, unsigned index)
{
dst[index / 32] |= static_cast<int32_t>(1) << (index % 32);
}
void
clearBit(int32_t* dst, unsigned index)
{
dst[index / 32] &= ~(static_cast<int32_t>(1) << (index % 32));
}
void
copyFrameMap(int32_t* dst, uintptr_t* src, unsigned mapSizeInBits,
unsigned offset, TraceElement* p,
SubroutinePath* subroutinePath)
{
if (DebugFrameMaps) {
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
fprintf(stderr, " orig roots at ip %3d: ", p->ip);
printSet(src[0], ceiling(mapSizeInBits, BitsPerWord));
print(subroutinePath);
fprintf(stderr, "\n");
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
fprintf(stderr, " final roots at ip %3d: ", p->ip);
}
for (unsigned j = 0; j < p->argumentIndex; ++j) {
if (getBit(src, j)) {
if (DebugFrameMaps) {
fprintf(stderr, "1");
}
setBit(dst, offset + j);
} else {
if (DebugFrameMaps) {
fprintf(stderr, "_");
}
clearBit(dst, offset + j);
}
}
if (DebugFrameMaps) {
print(subroutinePath);
fprintf(stderr, "\n");
}
}
class FrameMapTableHeader {
public:
FrameMapTableHeader(unsigned indexCount):
indexCount(indexCount)
{ }
unsigned indexCount;
};
class FrameMapTableIndexElement {
public:
FrameMapTableIndexElement(int offset, unsigned base, unsigned path):
offset(offset),
base(base),
path(path)
{ }
int offset;
unsigned base;
unsigned path;
};
class FrameMapTablePath {
public:
FrameMapTablePath(unsigned stackIndex, unsigned elementCount, unsigned next):
stackIndex(stackIndex),
elementCount(elementCount),
next(next)
{ }
unsigned stackIndex;
unsigned elementCount;
unsigned next;
int32_t elements[0];
};
int
compareInt32s(const void* va, const void* vb)
{
return *static_cast<int32_t const*>(va) - *static_cast<int32_t const*>(vb);
}
int
compare(SubroutinePath* a, SubroutinePath* b)
{
if (a->stackNext) {
int d = compare(a->stackNext, b->stackNext);
if (d) return d;
}
int64_t av = a->call->returnAddress->value();
int64_t bv = b->call->returnAddress->value();
if (av > bv) {
return 1;
} else if (av < bv) {
return -1;
} else {
return 0;
}
}
int
compareSubroutineTracePointers(const void* va, const void* vb)
{
return compare((*static_cast<SubroutineTrace* const*>(va))->path,
(*static_cast<SubroutineTrace* const*>(vb))->path);
}
object
makeGeneralFrameMapTable(MyThread* t, Context* context, uint8_t* start,
TraceElement** elements, unsigned elementCount,
unsigned pathFootprint, unsigned mapCount)
{
unsigned mapSize = frameMapSizeInBits(t, context->method);
unsigned indexOffset = sizeof(FrameMapTableHeader);
unsigned mapsOffset = indexOffset
+ (elementCount * sizeof(FrameMapTableIndexElement));
unsigned pathsOffset = mapsOffset + (ceiling(mapCount * mapSize, 32) * 4);
object table = makeByteArray(t, pathsOffset + pathFootprint);
int8_t* body = &byteArrayBody(t, table, 0);
new (body) FrameMapTableHeader(elementCount);
unsigned nextTableIndex = pathsOffset;
unsigned nextMapIndex = 0;
for (unsigned i = 0; i < elementCount; ++i) {
TraceElement* p = elements[i];
unsigned mapBase = nextMapIndex;
unsigned pathIndex;
if (p->subroutineTrace) {
FrameMapTablePath* previous = 0;
Subroutine* subroutine = p->subroutineTrace->path->call->subroutine;
for (Subroutine* s = subroutine; s; s = s->stackNext) {
2009-07-08 14:18:40 +00:00
if (s->tableIndex == 0) {
unsigned pathObjectSize = sizeof(FrameMapTablePath)
+ (sizeof(int32_t) * s->callCount);
assert(t, nextTableIndex + pathObjectSize
<= byteArrayLength(t, table));
s->tableIndex = nextTableIndex;
nextTableIndex += pathObjectSize;
FrameMapTablePath* current = new (body + s->tableIndex)
FrameMapTablePath
(s->stackIndex, s->callCount,
s->stackNext ? s->stackNext->tableIndex : 0);
unsigned i = 0;
for (SubroutineCall* c = subroutine->calls; c; c = c->next) {
assert(t, i < s->callCount);
2009-07-08 14:18:40 +00:00
current->elements[i++]
= static_cast<intptr_t>(c->returnAddress->value())
- reinterpret_cast<intptr_t>(start);
}
assert(t, i == s->callCount);
qsort(current->elements, s->callCount, sizeof(int32_t),
compareInt32s);
if (previous) {
previous->next = s->tableIndex;
}
previous = current;
} else {
break;
}
}
pathIndex = subroutine->tableIndex;
RUNTIME_ARRAY(SubroutineTrace*, traces, p->subroutineTraceCount);
unsigned i = 0;
for (SubroutineTrace* trace = p->subroutineTrace;
trace; trace = trace->next)
{
assert(t, i < p->subroutineTraceCount);
RUNTIME_ARRAY_BODY(traces)[i++] = trace;
}
assert(t, i == p->subroutineTraceCount);
qsort(RUNTIME_ARRAY_BODY(traces), p->subroutineTraceCount,
sizeof(SubroutineTrace*), compareSubroutineTracePointers);
for (unsigned i = 0; i < p->subroutineTraceCount; ++i) {
assert(t, mapsOffset + ceiling(nextMapIndex + mapSize, 32) * 4
<= pathsOffset);
copyFrameMap(reinterpret_cast<int32_t*>(body + mapsOffset),
RUNTIME_ARRAY_BODY(traces)[i]->map, mapSize,
nextMapIndex, p, RUNTIME_ARRAY_BODY(traces)[i]->path);
nextMapIndex += mapSize;
}
} else {
pathIndex = 0;
assert(t, mapsOffset + ceiling(nextMapIndex + mapSize, 32) * 4
<= pathsOffset);
copyFrameMap(reinterpret_cast<int32_t*>(body + mapsOffset), p->map,
mapSize, nextMapIndex, p, 0);
nextMapIndex += mapSize;
}
unsigned elementIndex = indexOffset
+ (i * sizeof(FrameMapTableIndexElement));
assert(t, elementIndex + sizeof(FrameMapTableIndexElement) <= mapsOffset);
new (body + elementIndex) FrameMapTableIndexElement
(static_cast<intptr_t>(p->address->value())
- reinterpret_cast<intptr_t>(start), mapBase, pathIndex);
}
assert(t, nextMapIndex == mapCount * mapSize);
return table;
}
object
makeSimpleFrameMapTable(MyThread* t, Context* context, uint8_t* start,
TraceElement** elements, unsigned elementCount)
{
unsigned mapSize = frameMapSizeInBits(t, context->method);
object table = makeIntArray
(t, elementCount + ceiling(elementCount * mapSize, 32));
assert(t, intArrayLength(t, table) == elementCount
+ simpleFrameMapTableSize(t, context->method, table));
for (unsigned i = 0; i < elementCount; ++i) {
TraceElement* p = elements[i];
intArrayBody(t, table, i) = static_cast<intptr_t>(p->address->value())
- reinterpret_cast<intptr_t>(start);
assert(t, elementCount + ceiling((i + 1) * mapSize, 32)
<= intArrayLength(t, table));
if (mapSize) {
copyFrameMap(&intArrayBody(t, table, elementCount), p->map,
mapSize, i * mapSize, p, 0);
}
}
return table;
}
2009-10-20 14:20:49 +00:00
void
2008-11-23 23:58:01 +00:00
finish(MyThread* t, Allocator* allocator, Context* context)
2008-02-11 17:21:41 +00:00
{
Compiler* c = context->compiler;
if (false) {
logCompile
(t, 0, 0,
reinterpret_cast<const char*>
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
reinterpret_cast<const char*>
(&byteArrayBody(t, methodName(t, context->method), 0)),
reinterpret_cast<const char*>
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
}
// for debugging:
if (false and
::strcmp
(reinterpret_cast<const char*>
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
"java/lang/System") == 0 and
::strcmp
(reinterpret_cast<const char*>
(&byteArrayBody(t, methodName(t, context->method), 0)),
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
"<clinit>") == 0)
{
trap();
}
// todo: this is a CPU-intensive operation, so consider doing it
// earlier before we've acquired the global class lock to improve
// parallelism (the downside being that it may end up being a waste
// of cycles if another thread compiles the same method in parallel,
// which might be mitigated by fine-grained, per-method locking):
unsigned codeSize = c->compile
(context->leaf ? 0 : stackOverflowThunk(t),
difference(&(t->stackLimit), t));
2008-11-23 23:58:01 +00:00
uintptr_t* code = static_cast<uintptr_t*>
(allocator->allocate(pad(codeSize) + pad(c->poolSize()) + BytesPerWord));
2008-11-23 23:58:01 +00:00
code[0] = codeSize;
uint8_t* start = reinterpret_cast<uint8_t*>(code + 1);
if (context->objectPool) {
object pool = allocate3
(t, allocator, Machine::ImmortalAllocation,
FixedSizeOfArray + ((context->objectPoolCount + 1) * BytesPerWord),
true);
2008-11-23 23:58:01 +00:00
initArray(t, pool, context->objectPoolCount + 1);
mark(t, pool, 0);
2008-11-23 23:58:01 +00:00
set(t, pool, ArrayBody, root(t, ObjectPools));
setRoot(t, ObjectPools, pool);
2008-11-23 23:58:01 +00:00
unsigned i = 1;
for (PoolElement* p = context->objectPool; p; p = p->next) {
unsigned offset = ArrayBody + ((i++) * BytesPerWord);
2008-11-23 23:58:01 +00:00
p->address = reinterpret_cast<uintptr_t>(pool) + offset;
set(t, pool, offset, p->target);
}
}
2008-02-11 17:21:41 +00:00
c->writeTo(start);
BootContext* bc = context->bootContext;
if (bc) {
for (DelayedPromise* p = bc->addresses;
p != bc->addressSentinal;
p = p->next)
{
p->basis = new (bc->zone->allocate(sizeof(ResolvedPromise)))
ResolvedPromise(p->basis->value());
}
}
object newExceptionHandlerTable = translateExceptionHandlerTable
(t, c, context->method, reinterpret_cast<intptr_t>(start));
if (UNLIKELY(t->exception)) return;
PROTECT(t, newExceptionHandlerTable);
object newLineNumberTable = translateLineNumberTable
(t, c, methodCode(t, context->method), reinterpret_cast<intptr_t>(start));
{ object code = methodCode(t, context->method);
2008-04-11 21:00:18 +00:00
code = makeCode
(t, 0, newExceptionHandlerTable, newLineNumberTable,
reinterpret_cast<uintptr_t>(start), codeMaxStack(t, code),
codeMaxLocals(t, code), 0);
set(t, context->method, MethodCode, code);
2008-02-11 17:21:41 +00:00
}
if (context->traceLogCount) {
RUNTIME_ARRAY(TraceElement*, elements, context->traceLogCount);
unsigned index = 0;
unsigned pathFootprint = 0;
unsigned mapCount = 0;
for (TraceElement* p = context->traceLog; p; p = p->next) {
assert(t, index < context->traceLogCount);
2008-04-11 21:00:18 +00:00
if (p->address) {
SubroutineTrace* trace = p->subroutineTrace;
unsigned myMapCount = 1;
if (trace) {
for (Subroutine* s = trace->path->call->subroutine;
s; s = s->stackNext)
{
unsigned callCount = s->callCount;
myMapCount *= callCount;
if (not s->visited) {
s->visited = true;
pathFootprint += sizeof(FrameMapTablePath)
+ (sizeof(int32_t) * callCount);
}
}
}
mapCount += myMapCount;
RUNTIME_ARRAY_BODY(elements)[index++] = p;
if (p->target) {
insertCallNode
(t, makeCallNode
(t, p->address->value(), p->target, p->flags, 0));
}
2007-12-09 22:45:43 +00:00
}
}
qsort(RUNTIME_ARRAY_BODY(elements), index,
sizeof(TraceElement*), compareTraceElementPointers);
object map;
if (pathFootprint) {
map = makeGeneralFrameMapTable
(t, context, start, RUNTIME_ARRAY_BODY(elements), index, pathFootprint,
mapCount);
} else {
map = makeSimpleFrameMapTable
(t, context, start, RUNTIME_ARRAY_BODY(elements), index);
2007-12-09 22:45:43 +00:00
}
set(t, methodCode(t, context->method), CodePool, map);
}
logCompile
(t, start, codeSize,
reinterpret_cast<const char*>
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
reinterpret_cast<const char*>
(&byteArrayBody(t, methodName(t, context->method), 0)),
reinterpret_cast<const char*>
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
2008-02-11 17:21:41 +00:00
// for debugging:
2009-10-04 22:10:36 +00:00
if (false and
::strcmp
2008-02-11 17:21:41 +00:00
(reinterpret_cast<const char*>
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
"java/lang/System") == 0 and
::strcmp
2008-02-11 17:21:41 +00:00
(reinterpret_cast<const char*>
(&byteArrayBody(t, methodName(t, context->method), 0)),
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
"<clinit>") == 0)
2008-02-11 17:21:41 +00:00
{
trap();
2007-12-09 22:45:43 +00:00
}
syncInstructionCache(start, codeSize);
2007-12-09 22:45:43 +00:00
}
2007-09-30 02:48:27 +00:00
void
compile(MyThread* t, Context* context)
2007-12-09 22:45:43 +00:00
{
2008-02-11 17:21:41 +00:00
Compiler* c = context->compiler;
2007-09-30 02:48:27 +00:00
// fprintf(stderr, "compiling %s.%s%s\n",
// &byteArrayBody(t, className(t, methodClass(t, context->method)), 0),
// &byteArrayBody(t, methodName(t, context->method), 0),
// &byteArrayBody(t, methodSpec(t, context->method), 0));
unsigned footprint = methodParameterFootprint(t, context->method);
unsigned locals = localSize(t, context->method);
c->init(codeLength(t, methodCode(t, context->method)), footprint, locals,
alignedFrameSize(t, context->method));
2007-09-30 02:48:27 +00:00
RUNTIME_ARRAY(uint8_t, stackMap,
codeMaxStack(t, methodCode(t, context->method)));
Frame frame(context, RUNTIME_ARRAY_BODY(stackMap));
2007-09-30 02:48:27 +00:00
2009-05-15 02:08:01 +00:00
unsigned index = methodParameterFootprint(t, context->method);
if ((methodFlags(t, context->method) & ACC_STATIC) == 0) {
2009-05-15 02:08:01 +00:00
frame.set(--index, Frame::Object);
c->initLocal(1, index, Compiler::ObjectType);
2008-01-07 14:51:07 +00:00
}
for (MethodSpecIterator it
(t, reinterpret_cast<const char*>
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
it.hasNext();)
{
2008-01-07 14:51:07 +00:00
switch (*it.next()) {
case 'L':
case '[':
2009-05-15 02:08:01 +00:00
frame.set(--index, Frame::Object);
c->initLocal(1, index, Compiler::ObjectType);
2008-01-07 14:51:07 +00:00
break;
case 'J':
frame.set(--index, Frame::Long);
frame.set(--index, Frame::Long);
c->initLocal(2, index, Compiler::IntegerType);
break;
2009-11-30 15:08:45 +00:00
2008-01-07 14:51:07 +00:00
case 'D':
2009-05-15 02:08:01 +00:00
frame.set(--index, Frame::Long);
frame.set(--index, Frame::Long);
c->initLocal(2, index, Compiler::FloatType);
2008-01-07 14:51:07 +00:00
break;
case 'F':
frame.set(--index, Frame::Integer);
c->initLocal(1, index, Compiler::FloatType);
break;
2008-01-07 14:51:07 +00:00
default:
2009-05-15 02:08:01 +00:00
frame.set(--index, Frame::Integer);
c->initLocal(1, index, Compiler::IntegerType);
2008-01-07 14:51:07 +00:00
break;
}
}
handleEntrance(t, &frame);
Compiler::State* state = c->saveState();
2007-12-09 22:45:43 +00:00
compile(t, &frame, 0);
if (UNLIKELY(t->exception)) return;
2007-09-30 02:48:27 +00:00
context->dirtyRoots = false;
unsigned eventIndex = calculateFrameMaps(t, context, 0, 0);
2008-01-07 14:51:07 +00:00
object eht = codeExceptionHandlerTable(t, methodCode(t, context->method));
2007-12-09 22:45:43 +00:00
if (eht) {
PROTECT(t, eht);
2007-09-30 02:48:27 +00:00
unsigned visitCount = exceptionHandlerTableLength(t, eht);
2009-03-08 01:23:28 +00:00
RUNTIME_ARRAY(bool, visited, visitCount);
memset(RUNTIME_ARRAY_BODY(visited), 0, visitCount * sizeof(bool));
while (visitCount) {
bool progress = false;
for (unsigned i = 0; i < exceptionHandlerTableLength(t, eht); ++i) {
c->restoreState(state);
ExceptionHandler* eh = exceptionHandlerTableBody(t, eht, i);
unsigned start = exceptionHandlerStart(eh);
if ((not RUNTIME_ARRAY_BODY(visited)[i])
and context->visitTable[start])
{
-- visitCount;
RUNTIME_ARRAY_BODY(visited)[i] = true;
progress = true;
RUNTIME_ARRAY(uint8_t, stackMap,
codeMaxStack(t, methodCode(t, context->method)));
Frame frame2(&frame, RUNTIME_ARRAY_BODY(stackMap));
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
unsigned end = exceptionHandlerEnd(eh);
if (exceptionHandlerIp(eh) >= start
and exceptionHandlerIp(eh) < end)
{
end = exceptionHandlerIp(eh);
}
context->eventLog.append(PushExceptionHandlerEvent);
context->eventLog.append2(start);
fix stack frame mapping code for exception handlers Previously, the stack frame mapping code (responsible for statically calculating the map of GC roots for a method's stack frame during JIT compilation) would assume that the map of GC roots on entry to an exception handler is the same as on entry to the "try" block which the handler is attached to. Technically, this is true, but the algorithm we use does not consider whether a local variable is still "live" (i.e. will be read later) when calculating the map - only whether we can expect to find a reference there via normal (non-exceptional) control flow. This can backfire if, within a "try" block, the stack location which held an object reference on entry to the block gets overwritten with a non-reference (i.e. a primitive). If an exception is later thrown from such a block, we might end up trying to treat that non-reference as a reference during GC, which will crash the VM. The ideal way to fix this is to calculate the true interval for which each value is live and use that to produce the stack frame maps. This would provide the added benefit of ensuring that the garbage collector does not visit references which, although still present on the stack, will not be used again. However, this commit uses the less invasive strategy of ANDing together the root maps at each GC point within a "try" block and using the result as the map on entry to the corresponding exception handler(s). This should give us safe, if not optimal, results. Later on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
context->eventLog.append2(end);
2007-09-30 02:48:27 +00:00
for (unsigned i = 1;
i < codeMaxStack(t, methodCode(t, context->method));
++i)
{
2008-02-11 17:21:41 +00:00
frame2.set(localSize(t, context->method) + i, Frame::Integer);
}
2008-01-07 14:51:07 +00:00
2008-09-25 00:48:32 +00:00
compile(t, &frame2, exceptionHandlerIp(eh), start);
if (UNLIKELY(t->exception)) return;
context->eventLog.append(PopContextEvent);
eventIndex = calculateFrameMaps(t, context, 0, eventIndex);
}
}
assert(t, progress);
2007-12-09 22:45:43 +00:00
}
}
2007-09-30 02:48:27 +00:00
while (context->dirtyRoots) {
context->dirtyRoots = false;
calculateFrameMaps(t, context, 0, 0);
}
2007-12-09 22:45:43 +00:00
}
2007-09-30 02:48:27 +00:00
void
updateCall(MyThread* t, UnaryOperation op, void* returnAddress, void* target)
{
t->arch->updateCall(op, returnAddress, target);
2007-12-09 22:45:43 +00:00
}
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
void*
compileMethod2(MyThread* t, void* ip)
2007-12-09 22:45:43 +00:00
{
object node = findCallNode(t, ip);
object target = callNodeTarget(t, node);
2007-09-30 02:48:27 +00:00
2007-12-09 22:45:43 +00:00
if (LIKELY(t->exception == 0)) {
PROTECT(t, node);
PROTECT(t, target);
t->trace->targetMethod = target;
compile(t, codeAllocator(t), 0, target);
t->trace->targetMethod = 0;
2007-12-09 22:45:43 +00:00
}
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) {
2007-12-16 22:41:07 +00:00
return 0;
2007-12-09 22:45:43 +00:00
} else {
uintptr_t address;
if ((methodFlags(t, target) & ACC_NATIVE)
and useLongJump(t, reinterpret_cast<uintptr_t>(ip)))
{
address = bootNativeThunk(t);
} else {
address = methodAddress(t, target);
}
uint8_t* updateIp = static_cast<uint8_t*>(ip);
UnaryOperation op;
if (callNodeFlags(t, node) & TraceElement::LongCall) {
if (callNodeFlags(t, node) & TraceElement::TailCall) {
op = AlignedLongJump;
} else {
op = AlignedLongCall;
}
} else if (callNodeFlags(t, node) & TraceElement::TailCall) {
op = AlignedJump;
} else {
op = AlignedCall;
}
updateCall(t, op, updateIp, reinterpret_cast<void*>(address));
return reinterpret_cast<void*>(address);
2007-12-09 22:45:43 +00:00
}
}
2007-09-30 02:48:27 +00:00
uint64_t
2007-12-16 22:41:07 +00:00
compileMethod(MyThread* t)
{
void* ip;
if (t->tailAddress) {
ip = t->tailAddress;
t->tailAddress = 0;
} else {
ip = t->arch->frameIp(t->stack);
}
void* r = compileMethod2(t, ip);
if (UNLIKELY(t->exception)) {
unwind(t);
} else {
return reinterpret_cast<uintptr_t>(r);
}
}
void*
compileVirtualMethod2(MyThread* t, object class_, unsigned index)
{
// If class_ has BootstrapFlag set, that means its vtable is not yet
// available. However, we must set t->trace->targetMethod to an
// appropriate method to ensure we can accurately scan the stack for
// GC roots. We find such a method by looking for a superclass with
// a vtable and using it instead:
object c = class_;
while (classVmFlags(t, c) & BootstrapFlag) {
c = classSuper(t, c);
}
t->trace->targetMethod = arrayBody(t, classVirtualTable(t, c), index);
PROTECT(t, class_);
object target = resolveTarget(t, class_, index);
PROTECT(t, target);
if (LIKELY(t->exception == 0)) {
compile(t, codeAllocator(t), 0, target);
}
t->trace->targetMethod = 0;
if (UNLIKELY(t->exception)) {
return 0;
} else {
void* address = reinterpret_cast<void*>(methodAddress(t, target));
if (methodFlags(t, target) & ACC_NATIVE) {
t->trace->nativeMethod = target;
} else {
classVtable(t, class_, methodOffset(t, target)) = address;
}
return address;
}
}
uint64_t
compileVirtualMethod(MyThread* t)
{
2009-05-03 20:57:11 +00:00
object class_ = objectClass(t, static_cast<object>(t->virtualCallTarget));
t->virtualCallTarget = 0;
unsigned index = t->virtualCallIndex;
t->virtualCallIndex = 0;
void* r = compileVirtualMethod2(t, class_, index);
if (UNLIKELY(t->exception)) {
unwind(t);
} else {
return reinterpret_cast<uintptr_t>(r);
}
}
2009-05-05 01:04:17 +00:00
uint64_t
invokeNativeFast(MyThread* t, object method, void* function)
2009-05-03 20:57:11 +00:00
{
2010-11-12 23:53:16 +00:00
FastNativeFunction f; memcpy(&f, &function, sizeof(void*));
return f(t, method,
static_cast<uintptr_t*>(t->stack)
+ t->arch->frameFooterSize()
+ t->arch->frameReturnAddressSize());
2009-05-03 20:57:11 +00:00
}
2007-12-09 22:45:43 +00:00
uint64_t
invokeNativeSlow(MyThread* t, object method, void* function)
2007-12-09 22:45:43 +00:00
{
PROTECT(t, method);
2007-12-09 22:45:43 +00:00
unsigned footprint = methodParameterFootprint(t, method) + 1;
if (methodFlags(t, method) & ACC_STATIC) {
++ footprint;
}
2008-01-03 19:49:07 +00:00
unsigned count = methodParameterCount(t, method) + 2;
RUNTIME_ARRAY(uintptr_t, args, footprint);
2007-12-09 22:45:43 +00:00
unsigned argOffset = 0;
RUNTIME_ARRAY(uint8_t, types, count);
2007-12-09 22:45:43 +00:00
unsigned typeOffset = 0;
2007-09-30 02:48:27 +00:00
RUNTIME_ARRAY_BODY(args)[argOffset++] = reinterpret_cast<uintptr_t>(t);
RUNTIME_ARRAY_BODY(types)[typeOffset++] = POINTER_TYPE;
2007-12-09 22:45:43 +00:00
uintptr_t* sp = static_cast<uintptr_t*>(t->stack)
2009-05-03 20:57:11 +00:00
+ t->arch->frameFooterSize()
+ t->arch->frameReturnAddressSize();
object jclass = 0;
PROTECT(t, jclass);
2007-12-09 22:45:43 +00:00
if (methodFlags(t, method) & ACC_STATIC) {
jclass = getJClass(t, methodClass(t, method));
RUNTIME_ARRAY_BODY(args)[argOffset++]
= reinterpret_cast<uintptr_t>(&jclass);
2007-12-09 22:45:43 +00:00
} else {
RUNTIME_ARRAY_BODY(args)[argOffset++]
= reinterpret_cast<uintptr_t>(sp++);
2007-12-09 22:45:43 +00:00
}
RUNTIME_ARRAY_BODY(types)[typeOffset++] = POINTER_TYPE;
2007-12-09 22:45:43 +00:00
MethodSpecIterator it
(t, reinterpret_cast<const char*>
(&byteArrayBody(t, methodSpec(t, method), 0)));
while (it.hasNext()) {
unsigned type = RUNTIME_ARRAY_BODY(types)[typeOffset++]
2007-12-09 22:45:43 +00:00
= fieldType(t, fieldCode(t, *it.next()));
2007-12-09 22:45:43 +00:00
switch (type) {
case INT8_TYPE:
case INT16_TYPE:
case INT32_TYPE:
case FLOAT_TYPE:
RUNTIME_ARRAY_BODY(args)[argOffset++] = *(sp++);
2007-12-09 22:45:43 +00:00
break;
2007-12-09 22:45:43 +00:00
case INT64_TYPE:
case DOUBLE_TYPE: {
memcpy(RUNTIME_ARRAY_BODY(args) + argOffset, sp, 8);
2007-12-18 02:09:32 +00:00
argOffset += (8 / BytesPerWord);
2009-05-03 20:57:11 +00:00
sp += 2;
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
case POINTER_TYPE: {
if (*sp) {
RUNTIME_ARRAY_BODY(args)[argOffset++]
= reinterpret_cast<uintptr_t>(sp);
} else {
RUNTIME_ARRAY_BODY(args)[argOffset++] = 0;
}
2009-05-03 20:57:11 +00:00
++ sp;
2007-12-09 22:45:43 +00:00
} break;
2007-12-09 22:45:43 +00:00
default: abort(t);
2007-10-03 00:22:48 +00:00
}
}
unsigned returnCode = methodReturnCode(t, method);
unsigned returnType = fieldType(t, returnCode);
2007-12-09 22:45:43 +00:00
uint64_t result;
if (DebugNatives) {
2007-12-09 22:45:43 +00:00
fprintf(stderr, "invoke native method %s.%s\n",
&byteArrayBody(t, className(t, methodClass(t, method)), 0),
&byteArrayBody(t, methodName(t, method), 0));
}
2007-10-04 00:41:54 +00:00
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
if (methodFlags(t, method) & ACC_STATIC) {
acquire(t, methodClass(t, method));
} else {
acquire(t, *reinterpret_cast<object*>(RUNTIME_ARRAY_BODY(args)[1]));
}
}
Reference* reference = t->reference;
2007-12-09 22:45:43 +00:00
{ ENTER(t, Thread::IdleState);
2007-10-03 00:22:48 +00:00
2007-12-09 22:45:43 +00:00
result = t->m->system->call
(function,
RUNTIME_ARRAY_BODY(args),
RUNTIME_ARRAY_BODY(types),
2008-01-03 19:49:07 +00:00
count,
2007-12-09 22:45:43 +00:00
footprint * BytesPerWord,
returnType);
}
2007-09-25 23:53:11 +00:00
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
if (methodFlags(t, method) & ACC_STATIC) {
release(t, methodClass(t, method));
} else {
release(t, *reinterpret_cast<object*>(RUNTIME_ARRAY_BODY(args)[1]));
}
}
if (DebugNatives) {
2007-12-09 22:45:43 +00:00
fprintf(stderr, "return from native method %s.%s\n",
&byteArrayBody(t, className(t, methodClass(t, method)), 0),
&byteArrayBody(t, methodName(t, method), 0));
2007-10-03 00:22:48 +00:00
}
if (LIKELY(t->exception == 0)) {
switch (returnCode) {
case ByteField:
case BooleanField:
result = static_cast<int8_t>(result);
break;
case CharField:
result = static_cast<uint16_t>(result);
break;
case ShortField:
result = static_cast<int16_t>(result);
break;
case FloatField:
case IntField:
result = static_cast<int32_t>(result);
break;
2007-12-18 02:09:32 +00:00
case LongField:
case DoubleField:
break;
case ObjectField:
result = static_cast<uintptr_t>(result) ? *reinterpret_cast<uintptr_t*>
(static_cast<uintptr_t>(result)) : 0;
break;
case VoidField:
result = 0;
break;
default: abort(t);
2007-12-18 02:09:32 +00:00
}
2007-12-09 22:45:43 +00:00
} else {
result = 0;
2007-10-03 00:22:48 +00:00
}
while (t->reference != reference) {
dispose(t, t->reference);
}
return result;
2007-12-09 22:45:43 +00:00
}
2009-05-03 20:57:11 +00:00
uint64_t
invokeNative2(MyThread* t, object method)
{
object native = methodRuntimeDataNative(t, getMethodRuntimeData(t, method));
if (nativeFast(t, native)) {
return invokeNativeFast(t, method, nativeFunction(t, native));
2009-05-03 20:57:11 +00:00
} else {
return invokeNativeSlow(t, method, nativeFunction(t, native));
2009-05-03 20:57:11 +00:00
}
}
2007-12-09 22:45:43 +00:00
2008-07-05 20:21:13 +00:00
uint64_t
2007-12-09 22:45:43 +00:00
invokeNative(MyThread* t)
{
if (t->trace->nativeMethod == 0) {
void* ip;
if (t->tailAddress) {
ip = t->tailAddress;
t->tailAddress = 0;
} else {
ip = t->arch->frameIp(t->stack);
}
object node = findCallNode(t, ip);
2008-04-23 16:33:31 +00:00
object target = callNodeTarget(t, node);
if (callNodeFlags(t, node) & TraceElement::VirtualCall) {
2008-04-23 16:33:31 +00:00
target = resolveTarget(t, t->stack, target);
2008-04-01 17:37:59 +00:00
}
2008-04-23 16:33:31 +00:00
t->trace->nativeMethod = target;
}
2008-04-01 17:37:59 +00:00
assert(t, t->tailAddress == 0);
uint64_t result = 0;
2007-10-03 00:22:48 +00:00
t->trace->targetMethod = t->trace->nativeMethod;
2007-12-09 22:45:43 +00:00
if (LIKELY(t->exception == 0)) {
2009-05-03 20:57:11 +00:00
resolveNative(t, t->trace->nativeMethod);
if (LIKELY(t->exception == 0)) {
result = invokeNative2(t, t->trace->nativeMethod);
}
2007-10-03 00:22:48 +00:00
}
2009-05-17 00:39:08 +00:00
unsigned parameterFootprint = methodParameterFootprint
(t, t->trace->targetMethod);
t->trace->targetMethod = 0;
t->trace->nativeMethod = 0;
2007-12-09 22:45:43 +00:00
if (UNLIKELY(t->exception)) {
unwind(t);
} else {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
uintptr_t* stack = static_cast<uintptr_t*>(t->stack);
if (TailCalls
and t->arch->argumentFootprint(parameterFootprint)
2009-05-17 00:39:08 +00:00
> t->arch->stackAlignmentInWords())
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
stack += t->arch->argumentFootprint(parameterFootprint)
- t->arch->stackAlignmentInWords();
2009-05-17 00:39:08 +00:00
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
stack += t->arch->frameReturnAddressSize();
transition(t, t->arch->frameIp(t->stack), stack, t->base, t->continuation,
t->trace);
2009-05-17 00:39:08 +00:00
2007-12-09 22:45:43 +00:00
return result;
2007-10-03 00:22:48 +00:00
}
2007-12-09 22:45:43 +00:00
}
void
findFrameMapInSimpleTable(MyThread* t, object method, object table,
int32_t offset, int32_t** map, unsigned* start)
{
unsigned tableSize = simpleFrameMapTableSize(t, method, table);
unsigned indexSize = intArrayLength(t, table) - tableSize;
*map = &intArrayBody(t, table, indexSize);
unsigned bottom = 0;
unsigned top = indexSize;
for (unsigned span = top - bottom; span; span = top - bottom) {
unsigned middle = bottom + (span / 2);
int32_t v = intArrayBody(t, table, middle);
if (offset == v) {
*start = frameMapSizeInBits(t, method) * middle;
return;
} else if (offset < v) {
top = middle;
} else {
bottom = middle + 1;
}
}
abort(t);
}
2007-10-03 00:22:48 +00:00
unsigned
findFrameMap(MyThread* t, void* stack, object method, object table,
unsigned pathIndex)
{
if (pathIndex) {
FrameMapTablePath* path = reinterpret_cast<FrameMapTablePath*>
(&byteArrayBody(t, table, pathIndex));
void* address = static_cast<void**>(stack)[path->stackIndex];
uint8_t* base = reinterpret_cast<uint8_t*>(methodAddress(t, method));
for (unsigned i = 0; i < path->elementCount; ++i) {
if (address == base + path->elements[i]) {
return i + (path->elementCount * findFrameMap
(t, stack, method, table, path->next));
}
}
abort(t);
} else {
return 0;
}
}
void
findFrameMapInGeneralTable(MyThread* t, void* stack, object method,
object table, int32_t offset, int32_t** map,
unsigned* start)
{
FrameMapTableHeader* header = reinterpret_cast<FrameMapTableHeader*>
(&byteArrayBody(t, table, 0));
FrameMapTableIndexElement* index
= reinterpret_cast<FrameMapTableIndexElement*>
(&byteArrayBody(t, table, sizeof(FrameMapTableHeader)));
*map = reinterpret_cast<int32_t*>(index + header->indexCount);
unsigned bottom = 0;
unsigned top = header->indexCount;
for (unsigned span = top - bottom; span; span = top - bottom) {
unsigned middle = bottom + (span / 2);
FrameMapTableIndexElement* v = index + middle;
if (offset == v->offset) {
2009-07-08 14:18:40 +00:00
*start = v->base + (findFrameMap(t, stack, method, table, v->path)
* frameMapSizeInBits(t, method));
2009-07-08 14:18:40 +00:00
return;
} else if (offset < v->offset) {
top = middle;
} else {
bottom = middle + 1;
}
}
abort(t);
}
void
findFrameMap(MyThread* t, void* stack, object method, int32_t offset,
int32_t** map, unsigned* start)
{
object table = codePool(t, methodCode(t, method));
if (objectClass(t, table) == type(t, Machine::IntArrayType)) {
findFrameMapInSimpleTable(t, method, table, offset, map, start);
} else {
findFrameMapInGeneralTable(t, stack, method, table, offset, map, start);
}
}
2007-12-09 22:45:43 +00:00
void
visitStackAndLocals(MyThread* t, Heap::Visitor* v, void* frame, object method,
void* ip)
2007-12-09 22:45:43 +00:00
{
unsigned count = frameMapSizeInBits(t, method);
2007-12-09 22:45:43 +00:00
if (count) {
void* stack = stackForFrame(t, frame, method);
int32_t* map;
unsigned offset;
findFrameMap
(t, stack, method, difference
(ip, reinterpret_cast<void*>(methodAddress(t, method))), &map, &offset);
2007-12-09 22:45:43 +00:00
for (unsigned i = 0; i < count; ++i) {
int j = offset + i;
if (map[j / 32] & (static_cast<int32_t>(1) << (j % 32))) {
v->visit(localObject(t, stack, method, i));
}
2007-09-25 23:53:11 +00:00
}
}
}
void
2009-05-15 02:08:01 +00:00
visitArgument(MyThread* t, Heap::Visitor* v, void* stack, unsigned index)
{
v->visit(static_cast<object*>(stack)
2009-05-15 02:08:01 +00:00
+ index
+ t->arch->frameReturnAddressSize()
+ t->arch->frameFooterSize());
}
void
visitArguments(MyThread* t, Heap::Visitor* v, void* stack, object method)
{
unsigned index = 0;
if ((methodFlags(t, method) & ACC_STATIC) == 0) {
2009-05-15 02:08:01 +00:00
visitArgument(t, v, stack, index++);
}
for (MethodSpecIterator it
(t, reinterpret_cast<const char*>
(&byteArrayBody(t, methodSpec(t, method), 0)));
it.hasNext();)
{
switch (*it.next()) {
case 'L':
case '[':
2009-05-15 02:08:01 +00:00
visitArgument(t, v, stack, index++);
break;
case 'J':
case 'D':
index += 2;
break;
default:
++ index;
break;
}
}
}
void
2007-12-09 22:45:43 +00:00
visitStack(MyThread* t, Heap::Visitor* v)
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
void* ip = t->arch->frameIp(t->stack);
2007-12-09 22:45:43 +00:00
void* base = t->base;
2008-08-17 19:32:40 +00:00
void* stack = t->stack;
2007-12-30 22:24:48 +00:00
2007-12-09 22:45:43 +00:00
MyThread::CallTrace* trace = t->trace;
object targetMethod = (trace ? trace->targetMethod : 0);
2007-12-09 22:45:43 +00:00
2007-12-16 22:41:07 +00:00
while (stack) {
2009-05-17 23:43:48 +00:00
if (targetMethod) {
visitArguments(t, v, stack, targetMethod);
targetMethod = 0;
}
object method = methodForIp(t, ip);
if (method) {
PROTECT(t, method);
t->arch->nextFrame(&stack, &base);
2008-01-07 14:51:07 +00:00
visitStackAndLocals(t, v, stack, method, ip);
2007-12-09 22:45:43 +00:00
ip = t->arch->frameIp(stack);
2007-12-09 22:45:43 +00:00
} else if (trace) {
2008-08-17 19:32:40 +00:00
stack = trace->stack;
2007-12-09 22:45:43 +00:00
base = trace->base;
ip = t->arch->frameIp(stack);
2007-12-09 22:45:43 +00:00
trace = trace->next;
if (trace) {
targetMethod = trace->targetMethod;
}
2007-12-09 22:45:43 +00:00
} else {
break;
}
}
2007-12-09 22:45:43 +00:00
}
2009-05-03 20:57:11 +00:00
void
walkContinuationBody(MyThread* t, Heap::Walker* w, object c, int start)
{
2009-05-17 23:43:48 +00:00
const int BodyOffset = ContinuationBody / BytesPerWord;
2009-05-03 20:57:11 +00:00
2009-05-05 01:04:17 +00:00
object method = static_cast<object>
(t->m->heap->follow(continuationMethod(t, c)));
2009-05-17 23:43:48 +00:00
int count = frameMapSizeInBits(t, method);
2009-05-03 20:57:11 +00:00
if (count) {
2009-05-17 23:43:48 +00:00
int stack = BodyOffset
+ (continuationFramePointerOffset(t, c) / BytesPerWord)
- t->arch->framePointerOffset()
- stackOffsetFromFrame(t, method);
int first = stack + localOffsetFromStack(t, count - 1, method);
if (start > first) {
count -= start - first;
}
int32_t* map;
unsigned offset;
findFrameMap
(t, reinterpret_cast<uintptr_t*>(c) + stack, method, difference
2009-05-03 20:57:11 +00:00
(continuationAddress(t, c),
reinterpret_cast<void*>(methodAddress(t, method))), &map, &offset);
2009-05-03 20:57:11 +00:00
2009-05-17 23:43:48 +00:00
for (int i = count - 1; i >= 0; --i) {
int j = offset + i;
if (map[j / 32] & (static_cast<int32_t>(1) << (j % 32))) {
2009-05-17 23:43:48 +00:00
if (not w->visit(stack + localOffsetFromStack(t, i, method))) {
2009-05-03 20:57:11 +00:00
return;
2009-05-17 23:43:48 +00:00
}
2009-05-03 20:57:11 +00:00
}
}
}
}
void
2009-05-23 22:15:06 +00:00
callContinuation(MyThread* t, object continuation, object result,
object exception, void* ip, void* base, void* stack)
{
assert(t, t->exception == 0);
if (exception) {
t->exception = exception;
MyThread::TraceContext c(t, ip, stack, base, continuation, t->trace);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
findUnwindTarget(t, &ip, &base, &stack, &continuation);
2009-05-23 22:15:06 +00:00
}
2009-05-25 04:27:50 +00:00
t->trace->nativeMethod = 0;
t->trace->targetMethod = 0;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
transition(t, ip, stack, base, continuation, t->trace);
2009-05-23 22:15:06 +00:00
vmJump(ip, base, stack, t, reinterpret_cast<uintptr_t>(result), 0);
}
int8_t*
2009-05-23 22:15:06 +00:00
returnSpec(MyThread* t, object method)
{
int8_t* s = &byteArrayBody(t, methodSpec(t, method), 0);
2009-05-23 22:15:06 +00:00
while (*s and *s != ')') ++ s;
expect(t, *s == ')');
return s + 1;
}
object
returnClass(MyThread* t, object method)
{
PROTECT(t, method);
int8_t* spec = returnSpec(t, method);
unsigned length = strlen(reinterpret_cast<char*>(spec));
object name;
if (*spec == '[') {
name = makeByteArray(t, length + 1);
memcpy(&byteArrayBody(t, name, 0), spec, length);
} else {
assert(t, *spec == 'L');
assert(t, spec[length - 1] == ';');
name = makeByteArray(t, length - 1);
memcpy(&byteArrayBody(t, name, 0), spec + 1, length - 2);
}
return resolveClass(t, classLoader(t, methodClass(t, method)), name);
}
2009-05-23 22:15:06 +00:00
bool
compatibleReturnType(MyThread* t, object oldMethod, object newMethod)
{
if (oldMethod == newMethod) {
return true;
} else if (methodReturnCode(t, oldMethod) == methodReturnCode(t, newMethod))
{
if (methodReturnCode(t, oldMethod) == ObjectField) {
PROTECT(t, newMethod);
2009-05-23 22:15:06 +00:00
object oldClass = returnClass(t, oldMethod);
PROTECT(t, oldClass);
object newClass = returnClass(t, newMethod);
return isAssignableFrom(t, oldClass, newClass);
2009-05-23 22:15:06 +00:00
} else {
return true;
}
} else {
return methodReturnCode(t, oldMethod) == VoidField;
}
}
void
jumpAndInvoke(MyThread* t, object method, void* base, void* stack, ...)
2009-05-23 22:15:06 +00:00
{
t->trace->targetMethod = 0;
2009-05-23 22:15:06 +00:00
if (methodFlags(t, method) & ACC_NATIVE) {
t->trace->nativeMethod = method;
} else {
t->trace->nativeMethod = 0;
}
2009-05-23 22:15:06 +00:00
2009-05-25 04:27:50 +00:00
unsigned argumentCount = methodParameterFootprint(t, method);
RUNTIME_ARRAY(uintptr_t, arguments, argumentCount);
va_list a; va_start(a, stack);
for (unsigned i = 0; i < argumentCount; ++i) {
RUNTIME_ARRAY_BODY(arguments)[i] = va_arg(a, uintptr_t);
}
va_end(a);
vmJumpAndInvoke
(t, reinterpret_cast<void*>(methodAddress(t, method)),
base,
stack,
argumentCount * BytesPerWord,
RUNTIME_ARRAY_BODY(arguments),
(t->arch->alignFrameSize(t->arch->argumentFootprint(argumentCount))
+ t->arch->frameReturnAddressSize())
* BytesPerWord);
2009-05-23 22:15:06 +00:00
}
void
callContinuation(MyThread* t, object continuation, object result,
object exception)
{
enum {
Call,
Unwind,
Rewind,
Throw
} action;
object nextContinuation = 0;
if (t->continuation == 0
2009-05-23 22:15:06 +00:00
or continuationContext(t, t->continuation)
!= continuationContext(t, continuation))
{
PROTECT(t, continuation);
PROTECT(t, result);
PROTECT(t, exception);
if (compatibleReturnType
2009-05-23 22:15:06 +00:00
(t, t->trace->originalMethod, continuationContextMethod
2009-05-25 04:36:16 +00:00
(t, continuationContext(t, continuation))))
2009-05-23 22:15:06 +00:00
{
object oldContext;
object unwindContext;
2009-05-23 22:15:06 +00:00
if (t->continuation) {
oldContext = continuationContext(t, t->continuation);
unwindContext = oldContext;
} else {
oldContext = 0;
unwindContext = 0;
}
object rewindContext = 0;
for (object newContext = continuationContext(t, continuation);
newContext; newContext = continuationContextNext(t, newContext))
{
if (newContext == oldContext) {
unwindContext = 0;
break;
} else {
rewindContext = newContext;
}
}
if (unwindContext
and continuationContextContinuation(t, unwindContext))
{
nextContinuation = continuationContextContinuation(t, unwindContext);
2009-05-23 22:15:06 +00:00
result = makeUnwindResult(t, continuation, result, exception);
action = Unwind;
} else if (rewindContext
and continuationContextContinuation(t, rewindContext))
{
nextContinuation = continuationContextContinuation(t, rewindContext);
action = Rewind;
if (root(t, RewindMethod) == 0) {
PROTECT(t, nextContinuation);
2009-05-23 22:15:06 +00:00
object method = resolveMethod
(t, root(t, Machine::BootLoader), "avian/Continuations", "rewind",
"(Ljava/lang/Runnable;Lavian/Callback;Ljava/lang/Object;"
"Ljava/lang/Throwable;)V");
2009-05-23 22:15:06 +00:00
if (method) {
setRoot(t, RewindMethod, method);
2009-05-25 00:22:36 +00:00
compile(t, local::codeAllocator(t), 0, method);
2009-05-25 00:22:36 +00:00
if (UNLIKELY(t->exception)) {
action = Throw;
}
} else {
action = Throw;
2009-05-23 22:15:06 +00:00
}
}
} else {
action = Call;
}
} else {
t->exception = makeThrowable
(t, Machine::IncompatibleContinuationExceptionType);
action = Throw;
2009-05-23 22:15:06 +00:00
}
} else {
action = Call;
}
void* ip;
void* base;
void* stack;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
object threadContinuation;
findUnwindTarget(t, &ip, &base, &stack, &threadContinuation);
2009-05-23 22:15:06 +00:00
switch (action) {
case Call: {
callContinuation(t, continuation, result, exception, ip, base, stack);
2009-05-23 22:15:06 +00:00
} break;
case Unwind: {
callContinuation(t, nextContinuation, result, 0, ip, base, stack);
} break;
case Rewind: {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
transition(t, 0, 0, 0, nextContinuation, t->trace);
2009-05-23 22:15:06 +00:00
jumpAndInvoke
(t, root(t, RewindMethod), base, stack,
2009-05-25 04:27:50 +00:00
continuationContextBefore(t, continuationContext(t, nextContinuation)),
continuation, result, exception);
2009-05-23 22:15:06 +00:00
} break;
case Throw: {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
transition(t, ip, stack, base, threadContinuation, t->trace);
2009-05-23 22:15:06 +00:00
vmJump(ip, base, stack, t, 0, 0);
} break;
default:
abort(t);
}
}
void
callWithCurrentContinuation(MyThread* t, object receiver)
{
object method = 0;
void* ip = 0;
void* base = 0;
void* stack = 0;
{ PROTECT(t, receiver);
if (root(t, ReceiveMethod) == 0) {
object m = resolveMethod
(t, root(t, Machine::BootLoader), "avian/CallbackReceiver", "receive",
"(Lavian/Callback;)Ljava/lang/Object;");
if (m) {
setRoot(t, ReceiveMethod, m);
object continuationClass = type(t, Machine::ContinuationType);
if (classVmFlags(t, continuationClass) & BootstrapFlag) {
resolveSystemClass
(t, root(t, Machine::BootLoader),
vm::className(t, continuationClass));
}
}
}
if (LIKELY(t->exception == 0)) {
method = findInterfaceMethod
(t, root(t, ReceiveMethod), objectClass(t, receiver));
PROTECT(t, method);
compile(t, local::codeAllocator(t), 0, method);
if (LIKELY(t->exception == 0)) {
t->continuation = makeCurrentContinuation(t, &ip, &base, &stack);
}
}
}
if (LIKELY(t->exception == 0)) {
jumpAndInvoke(t, method, base, stack, receiver, t->continuation);
} else {
unwind(t);
}
}
void
dynamicWind(MyThread* t, object before, object thunk, object after)
{
void* ip = 0;
void* base = 0;
void* stack = 0;
{ PROTECT(t, before);
PROTECT(t, thunk);
PROTECT(t, after);
if (root(t, WindMethod) == 0) {
object method = resolveMethod
(t, root(t, Machine::BootLoader), "avian/Continuations", "wind",
"(Ljava/lang/Runnable;Ljava/util/concurrent/Callable;"
"Ljava/lang/Runnable;)Lavian/Continuations$UnwindResult;");
if (method) {
setRoot(t, WindMethod, method);
compile(t, local::codeAllocator(t), 0, method);
}
}
if (LIKELY(t->exception == 0)) {
t->continuation = makeCurrentContinuation(t, &ip, &base, &stack);
object newContext = makeContinuationContext
(t, continuationContext(t, t->continuation), before, after,
t->continuation, t->trace->originalMethod);
set(t, t->continuation, ContinuationContext, newContext);
}
}
if (LIKELY(t->exception == 0)) {
jumpAndInvoke(t, root(t, WindMethod), base, stack, before, thunk, after);
} else {
unwind(t);
}
}
2007-09-25 23:53:11 +00:00
class ArgumentList {
public:
ArgumentList(Thread* t, uintptr_t* array, unsigned size, bool* objectMask,
object this_, const char* spec, bool indirectObjects,
va_list arguments):
2007-09-25 23:53:11 +00:00
t(static_cast<MyThread*>(t)),
array(array),
objectMask(objectMask),
size(size),
2009-05-03 20:57:11 +00:00
position(0),
protector(this)
2007-09-25 23:53:11 +00:00
{
if (this_) {
addObject(this_);
}
for (MethodSpecIterator it(t, spec); it.hasNext();) {
switch (*it.next()) {
2007-09-25 23:53:11 +00:00
case 'L':
case '[':
if (indirectObjects) {
object* v = va_arg(arguments, object*);
addObject(v ? *v : 0);
} else {
addObject(va_arg(arguments, object));
}
break;
case 'J':
case 'D':
addLong(va_arg(arguments, uint64_t));
break;
2007-09-25 23:53:11 +00:00
default:
addInt(va_arg(arguments, uint32_t));
break;
2007-09-25 23:53:11 +00:00
}
}
2007-09-25 23:53:11 +00:00
}
ArgumentList(Thread* t, uintptr_t* array, unsigned size, bool* objectMask,
object this_, const char* spec, object arguments):
2007-09-25 23:53:11 +00:00
t(static_cast<MyThread*>(t)),
array(array),
objectMask(objectMask),
size(size),
2009-05-03 20:57:11 +00:00
position(0),
protector(this)
2007-09-25 23:53:11 +00:00
{
if (this_) {
addObject(this_);
}
unsigned index = 0;
for (MethodSpecIterator it(t, spec); it.hasNext();) {
switch (*it.next()) {
2007-09-25 23:53:11 +00:00
case 'L':
case '[':
addObject(objectArrayBody(t, arguments, index++));
break;
case 'J':
case 'D':
addLong(cast<int64_t>(objectArrayBody(t, arguments, index++),
BytesPerWord));
break;
default:
addInt(cast<int32_t>(objectArrayBody(t, arguments, index++),
BytesPerWord));
break;
2007-09-25 23:53:11 +00:00
}
}
}
void addObject(object v) {
2009-05-03 20:57:11 +00:00
assert(t, position < size);
2007-09-25 23:53:11 +00:00
array[position] = reinterpret_cast<uintptr_t>(v);
objectMask[position] = true;
2009-05-03 20:57:11 +00:00
++ position;
2007-09-25 23:53:11 +00:00
}
2007-10-12 22:06:33 +00:00
void addInt(uintptr_t v) {
2009-05-03 20:57:11 +00:00
assert(t, position < size);
2007-09-25 23:53:11 +00:00
array[position] = v;
objectMask[position] = false;
2009-05-03 20:57:11 +00:00
++ position;
2007-09-25 23:53:11 +00:00
}
void addLong(uint64_t v) {
2009-05-03 20:57:11 +00:00
assert(t, position < size - 1);
2009-05-03 20:57:11 +00:00
memcpy(array + position, &v, 8);
2007-09-25 23:53:11 +00:00
objectMask[position] = false;
2007-12-23 20:06:24 +00:00
objectMask[position + 1] = false;
2009-05-03 20:57:11 +00:00
position += 2;
2007-09-25 23:53:11 +00:00
}
MyThread* t;
uintptr_t* array;
bool* objectMask;
unsigned size;
2007-09-25 23:53:11 +00:00
unsigned position;
class MyProtector: public Thread::Protector {
public:
MyProtector(ArgumentList* list): Protector(list->t), list(list) { }
virtual void visit(Heap::Visitor* v) {
2009-05-17 23:43:48 +00:00
for (unsigned i = 0; i < list->position; ++i) {
if (list->objectMask[i]) {
v->visit(reinterpret_cast<object*>(list->array + i));
}
}
}
ArgumentList* list;
} protector;
2007-09-25 23:53:11 +00:00
};
object
invoke(Thread* thread, object method, ArgumentList* arguments)
{
MyThread* t = static_cast<MyThread*>(thread);
uintptr_t stackLimit = t->stackLimit;
uintptr_t stackPosition = reinterpret_cast<uintptr_t>(&t);
if (stackLimit == 0) {
t->stackLimit = stackPosition - StackSizeInBytes;
} else if (stackPosition < stackLimit) {
t->exception = makeThrowable(t, Machine::StackOverflowErrorType);
return 0;
}
unsigned returnCode = methodReturnCode(t, method);
2007-09-25 23:53:11 +00:00
unsigned returnType = fieldType(t, returnCode);
2007-12-30 22:24:48 +00:00
uint64_t result;
2007-09-30 02:48:27 +00:00
{ MyThread::CallTrace trace(t, method);
2009-05-15 02:08:01 +00:00
assert(t, arguments->position == arguments->size);
2007-12-30 22:24:48 +00:00
result = vmInvoke
2008-12-02 02:38:00 +00:00
(t, reinterpret_cast<void*>(methodAddress(t, method)),
2009-05-15 02:08:01 +00:00
arguments->array,
arguments->position * BytesPerWord,
t->arch->alignFrameSize
2009-05-17 00:39:08 +00:00
(t->arch->argumentFootprint(arguments->position))
* BytesPerWord,
returnType);
2007-12-30 22:24:48 +00:00
}
2007-09-25 23:53:11 +00:00
t->stackLimit = stackLimit;
if (t->exception) {
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
collect(t, Heap::MinorCollection);
}
return 0;
}
2007-09-25 23:53:11 +00:00
object r;
switch (returnCode) {
case ByteField:
case BooleanField:
case CharField:
case ShortField:
case FloatField:
case IntField:
r = makeInt(t, result);
break;
case LongField:
case DoubleField:
r = makeLong(t, result);
break;
case ObjectField:
2007-12-16 00:24:15 +00:00
r = reinterpret_cast<object>(result);
2007-09-25 23:53:11 +00:00
break;
case VoidField:
r = 0;
break;
default:
abort(t);
}
return r;
}
class SignalHandler: public System::SignalHandler {
2007-12-30 22:24:48 +00:00
public:
SignalHandler(Machine::Type type, Machine::Root root, unsigned fixedSize):
m(0), type(type), root(root), fixedSize(fixedSize) { }
2007-12-30 22:24:48 +00:00
virtual bool handleSignal(void** ip, void** base, void** stack,
void** thread)
{
2007-12-30 22:24:48 +00:00
MyThread* t = static_cast<MyThread*>(m->localThread->get());
if (t and t->state == Thread::ActiveState) {
object node = methodForIp(t, *ip);
2008-01-02 01:07:12 +00:00
if (node) {
2009-09-04 23:08:45 +00:00
// add one to the IP since findLineNumber will subtract one
// when we make the trace:
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
MyThread::TraceContext context
(t, static_cast<uint8_t*>(*ip) + 1,
static_cast<void**>(*stack) - t->arch->frameReturnAddressSize(),
*base, t->continuation, t->trace);
2008-04-09 19:08:13 +00:00
if (ensure(t, fixedSize + traceSize(t))) {
atomicOr(&(t->flags), Thread::TracingFlag);
t->exception = makeThrowable(t, type);
atomicAnd(&(t->flags), ~Thread::TracingFlag);
} else {
// not enough memory available for a new exception and stack
// trace -- use a preallocated instance instead
t->exception = vm::root(t, root);
}
2008-01-02 01:07:12 +00:00
// printTrace(t, t->exception);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
object continuation;
findUnwindTarget(t, ip, base, stack, &continuation);
2008-04-23 16:33:31 +00:00
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
transition(t, ip, stack, base, continuation, t->trace);
2008-04-23 16:33:31 +00:00
2008-01-02 01:07:12 +00:00
*thread = t;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
2008-01-02 01:07:12 +00:00
return true;
}
2007-12-30 22:24:48 +00:00
}
if (compileLog) {
fflush(compileLog);
}
2008-01-02 01:07:12 +00:00
return false;
2007-12-30 22:24:48 +00:00
}
Machine* m;
Machine::Type type;
Machine::Root root;
unsigned fixedSize;
2007-12-30 22:24:48 +00:00
};
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
bool
isThunk(MyThread* t, void* ip);
bool
isVirtualThunk(MyThread* t, void* ip);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
bool
isThunkUnsafeStack(MyThread* t, void* ip);
2008-12-02 02:38:00 +00:00
void
boot(MyThread* t, BootImage* image);
2008-11-23 23:58:01 +00:00
class MyProcessor;
MyProcessor*
processor(MyThread* t);
void
compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p);
2007-09-25 23:53:11 +00:00
class MyProcessor: public Processor {
public:
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
class Thunk {
public:
Thunk():
start(0), frameSavedOffset(0), length(0)
{ }
Thunk(uint8_t* start, unsigned frameSavedOffset, unsigned length):
start(start), frameSavedOffset(frameSavedOffset), length(length)
{ }
uint8_t* start;
unsigned frameSavedOffset;
unsigned length;
};
class ThunkCollection {
public:
Thunk default_;
Thunk defaultVirtual;
Thunk native;
Thunk aioob;
Thunk stackOverflow;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
Thunk table;
};
MyProcessor(System* s, Allocator* allocator, bool useNativeFeatures):
2007-09-25 23:53:11 +00:00
s(s),
allocator(allocator),
roots(0),
bootImage(0),
segFaultHandler(Machine::NullPointerExceptionType,
Machine::NullPointerException,
FixedSizeOfNullPointerException),
divideByZeroHandler(Machine::ArithmeticExceptionType,
Machine::ArithmeticException,
FixedSizeOfArithmeticException),
2009-06-01 03:16:58 +00:00
codeAllocator(s, 0, 0),
callTableSize(0),
useNativeFeatures(useNativeFeatures)
{ }
2007-09-25 23:53:11 +00:00
virtual Thread*
makeThread(Machine* m, object javaThread, Thread* parent)
{
2008-04-13 18:15:04 +00:00
MyThread* t = new (m->heap->allocate(sizeof(MyThread)))
MyThread(m, javaThread, static_cast<MyThread*>(parent),
useNativeFeatures);
t->init();
2009-05-05 01:04:17 +00:00
2009-05-25 00:22:36 +00:00
if (false) {
fprintf(stderr, "%d\n", difference(&(t->stack), t));
fprintf(stderr, "%d\n", difference(&(t->continuation), t));
fprintf(stderr, "%d\n", difference(&(t->exception), t));
fprintf(stderr, "%d\n", difference(&(t->exceptionStackAdjustment), t));
fprintf(stderr, "%d\n", difference(&(t->exceptionOffset), t));
fprintf(stderr, "%d\n", difference(&(t->exceptionHandler), t));
2009-05-25 00:22:36 +00:00
exit(0);
}
return t;
2007-09-25 23:53:11 +00:00
}
2007-12-09 22:45:43 +00:00
virtual object
makeMethod(vm::Thread* t,
uint8_t vmFlags,
uint8_t returnCode,
uint8_t parameterCount,
uint8_t parameterFootprint,
uint16_t flags,
uint16_t offset,
object name,
object spec,
object addendum,
2007-12-09 22:45:43 +00:00
object class_,
object code)
2007-10-04 03:19:39 +00:00
{
if (code) {
codeCompiled(t, code) = local::defaultThunk(static_cast<MyThread*>(t));
}
2007-12-09 22:45:43 +00:00
return vm::makeMethod
(t, vmFlags, returnCode, parameterCount, parameterFootprint, flags,
offset, 0, 0, name, spec, addendum, class_, code);
2007-10-04 03:19:39 +00:00
}
2007-12-09 22:45:43 +00:00
virtual object
makeClass(vm::Thread* t,
uint16_t flags,
uint16_t vmFlags,
2007-12-09 22:45:43 +00:00
uint16_t fixedSize,
uint8_t arrayElementSize,
uint8_t arrayDimensions,
2007-12-09 22:45:43 +00:00
object objectMask,
object name,
object sourceFile,
2007-12-09 22:45:43 +00:00
object super,
object interfaceTable,
object virtualTable,
object fieldTable,
object methodTable,
object staticTable,
object addendum,
2007-12-09 22:45:43 +00:00
object loader,
unsigned vtableLength)
{
2007-12-11 21:26:59 +00:00
return vm::makeClass
(t, flags, vmFlags, fixedSize, arrayElementSize, arrayDimensions,
0, objectMask, name, sourceFile, super, interfaceTable,
virtualTable, fieldTable, methodTable, staticTable, addendum, loader,
vtableLength);
2007-12-11 21:26:59 +00:00
}
2007-12-11 21:26:59 +00:00
virtual void
initVtable(Thread* t, object c)
{
PROTECT(t, c);
for (int i = classLength(t, c) - 1; i >= 0; --i) {
void* thunk = reinterpret_cast<void*>
(virtualThunk(static_cast<MyThread*>(t), i));
classVtable(t, c, i) = thunk;
}
}
virtual bool
isInitializing(Thread* t, object c)
{
for (Thread::ClassInitStack* s = t->classInitStack; s; s = s->next) {
if (s->class_ == c) {
return true;
}
}
return false;
}
2007-09-25 23:53:11 +00:00
virtual void
visitObjects(Thread* vmt, Heap::Visitor* v)
2007-09-25 23:53:11 +00:00
{
MyThread* t = static_cast<MyThread*>(vmt);
2007-12-09 22:45:43 +00:00
if (t == t->m->rootThread) {
v->visit(&roots);
}
2007-09-25 23:53:11 +00:00
for (MyThread::CallTrace* trace = t->trace; trace; trace = trace->next) {
2009-05-03 20:57:11 +00:00
v->visit(&(trace->continuation));
v->visit(&(trace->nativeMethod));
v->visit(&(trace->targetMethod));
v->visit(&(trace->originalMethod));
}
2008-04-01 17:37:59 +00:00
2009-05-03 20:57:11 +00:00
v->visit(&(t->continuation));
2007-12-09 22:45:43 +00:00
for (Reference* r = t->reference; r; r = r->next) {
v->visit(&(r->target));
}
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
visitStack(t, v);
2007-09-25 23:53:11 +00:00
}
2007-12-09 22:45:43 +00:00
virtual void
walkStack(Thread* vmt, StackVisitor* v)
2007-09-25 23:53:11 +00:00
{
2007-12-09 22:45:43 +00:00
MyThread* t = static_cast<MyThread*>(vmt);
2007-09-25 23:53:11 +00:00
2007-12-09 22:45:43 +00:00
MyStackWalker walker(t);
walker.walk(v);
2007-09-25 23:53:11 +00:00
}
2007-10-04 22:41:19 +00:00
virtual int
2007-12-09 22:45:43 +00:00
lineNumber(Thread* vmt, object method, int ip)
2007-10-04 22:41:19 +00:00
{
2007-12-09 22:45:43 +00:00
return findLineNumber(static_cast<MyThread*>(vmt), method, ip);
2007-10-04 22:41:19 +00:00
}
2007-09-25 23:53:11 +00:00
virtual object*
makeLocalReference(Thread* vmt, object o)
2007-09-25 23:53:11 +00:00
{
if (o) {
MyThread* t = static_cast<MyThread*>(vmt);
for (Reference* r = t->reference; r; r = r->next) {
if (r->target == o) {
acquire(t, r);
return &(r->target);
}
}
2008-04-13 18:15:04 +00:00
Reference* r = new (t->m->heap->allocate(sizeof(Reference)))
Reference(o, &(t->reference));
acquire(t, r);
return &(r->target);
} else {
return 0;
}
2007-09-25 23:53:11 +00:00
}
virtual void
disposeLocalReference(Thread* t, object* r)
2007-09-25 23:53:11 +00:00
{
if (r) {
release(t, reinterpret_cast<Reference*>(r));
}
2007-09-25 23:53:11 +00:00
}
virtual object
invokeArray(Thread* t, object method, object this_, object arguments)
{
2008-04-01 17:37:59 +00:00
if (UNLIKELY(t->exception)) return 0;
assert(t, t->state == Thread::ActiveState
or t->state == Thread::ExclusiveState);
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
method = findMethod(t, method, this_);
2007-09-25 23:53:11 +00:00
const char* spec = reinterpret_cast<char*>
(&byteArrayBody(t, methodSpec(t, method), 0));
2007-12-09 22:45:43 +00:00
unsigned size = methodParameterFootprint(t, method);
RUNTIME_ARRAY(uintptr_t, array, size);
RUNTIME_ARRAY(bool, objectMask, size);
ArgumentList list
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
this_, spec, arguments);
2007-12-09 22:45:43 +00:00
PROTECT(t, method);
compile(static_cast<MyThread*>(t),
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
2007-12-09 22:45:43 +00:00
if (LIKELY(t->exception == 0)) {
return local::invoke(t, method, &list);
2007-12-09 22:45:43 +00:00
}
return 0;
}
virtual object
invokeList(Thread* t, object method, object this_, bool indirectObjects,
va_list arguments)
{
2008-04-01 17:37:59 +00:00
if (UNLIKELY(t->exception)) return 0;
assert(t, t->state == Thread::ActiveState
or t->state == Thread::ExclusiveState);
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
method = findMethod(t, method, this_);
2007-09-25 23:53:11 +00:00
const char* spec = reinterpret_cast<char*>
(&byteArrayBody(t, methodSpec(t, method), 0));
2007-12-09 22:45:43 +00:00
unsigned size = methodParameterFootprint(t, method);
RUNTIME_ARRAY(uintptr_t, array, size);
RUNTIME_ARRAY(bool, objectMask, size);
2007-09-25 23:53:11 +00:00
ArgumentList list
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
this_, spec, indirectObjects, arguments);
2007-12-09 22:45:43 +00:00
PROTECT(t, method);
compile(static_cast<MyThread*>(t),
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
2007-12-09 22:45:43 +00:00
if (LIKELY(t->exception == 0)) {
return local::invoke(t, method, &list);
2007-12-09 22:45:43 +00:00
}
return 0;
}
virtual object
invokeList(Thread* t, object loader, const char* className,
const char* methodName, const char* methodSpec,
object this_, va_list arguments)
{
2008-04-01 17:37:59 +00:00
if (UNLIKELY(t->exception)) return 0;
assert(t, t->state == Thread::ActiveState
or t->state == Thread::ExclusiveState);
unsigned size = parameterFootprint(t, methodSpec, this_ == 0);
RUNTIME_ARRAY(uintptr_t, array, size);
RUNTIME_ARRAY(bool, objectMask, size);
2007-09-25 23:53:11 +00:00
ArgumentList list
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
this_, methodSpec, false, arguments);
object method = resolveMethod
(t, loader, className, methodName, methodSpec);
if (LIKELY(t->exception == 0)) {
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
2007-12-09 22:45:43 +00:00
PROTECT(t, method);
compile(static_cast<MyThread*>(t),
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
2007-12-09 22:45:43 +00:00
if (LIKELY(t->exception == 0)) {
return local::invoke(t, method, &list);
2007-12-09 22:45:43 +00:00
}
2007-10-03 00:22:48 +00:00
}
2007-12-09 22:45:43 +00:00
return 0;
}
2007-10-03 00:22:48 +00:00
2007-12-30 22:24:48 +00:00
virtual void dispose(Thread* vmt) {
MyThread* t = static_cast<MyThread*>(vmt);
2007-12-11 21:26:59 +00:00
while (t->reference) {
vm::dispose(t, t->reference);
}
t->arch->release();
2008-04-13 18:15:04 +00:00
t->m->heap->free(t, sizeof(*t));
2007-12-11 21:26:59 +00:00
}
2007-12-09 22:45:43 +00:00
virtual void dispose() {
if (codeAllocator.base) {
s->freeExecutable(codeAllocator.base, codeAllocator.capacity);
}
s->handleSegFault(0);
2008-04-13 18:15:04 +00:00
allocator->free(this, sizeof(*this));
}
2008-04-09 19:08:13 +00:00
virtual object getStackTrace(Thread* vmt, Thread* vmTarget) {
MyThread* t = static_cast<MyThread*>(vmt);
MyThread* target = static_cast<MyThread*>(vmTarget);
2008-12-02 02:38:00 +00:00
MyProcessor* p = this;
2008-04-09 19:08:13 +00:00
class Visitor: public System::ThreadVisitor {
public:
Visitor(MyThread* t, MyProcessor* p, MyThread* target):
t(t), p(p), target(target), trace(0)
{ }
2008-04-09 19:08:13 +00:00
virtual void visit(void* ip, void* base, void* stack) {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
MyThread::TraceContext c(target);
2008-04-09 19:08:13 +00:00
if (methodForIp(t, ip)) {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
// we caught the thread in Java code - use the register values
c.ip = ip;
c.base = base;
c.stack = stack;
} else if (target->transition) {
// we caught the thread in native code while in the middle
// of updating the context fields (MyThread::stack,
// MyThread::base, etc.)
static_cast<MyThread::Context&>(c) = *(target->transition);
} else if (isVmInvokeUnsafeStack(ip)) {
// we caught the thread in native code just after returning
// from java code, but before clearing MyThread::stack
// (which now contains a garbage value), and the most recent
// Java frame, if any, can be found in
// MyThread::continuation or MyThread::trace
c.ip = 0;
c.base = 0;
c.stack = 0;
} else if (target->stack
and (not isThunkUnsafeStack(t, ip))
and (not isVirtualThunk(t, ip)))
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
// we caught the thread in a thunk or native code, and the
// saved stack and base pointers indicate the most recent
// Java frame on the stack
c.ip = t->arch->frameIp(target->stack);
c.base = target->base;
c.stack = target->stack;
} else if (isThunk(t, ip) or isVirtualThunk(t, ip)) {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
// we caught the thread in a thunk where the stack and base
// registers indicate the most recent Java frame on the
// stack
c.ip = t->arch->frameIp(stack);
c.base = base;
c.stack = stack;
} else {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
// we caught the thread in native code, and the most recent
// Java frame, if any, can be found in
// MyThread::continuation or MyThread::trace
c.ip = 0;
c.base = 0;
c.stack = 0;
}
2008-04-09 19:08:13 +00:00
if (ensure(t, traceSize(target))) {
atomicOr(&(t->flags), Thread::TracingFlag);
trace = makeTrace(t, target);
atomicAnd(&(t->flags), ~Thread::TracingFlag);
}
2008-04-09 19:08:13 +00:00
}
MyThread* t;
MyProcessor* p;
2008-04-09 19:08:13 +00:00
MyThread* target;
object trace;
} visitor(t, p, target);
2008-04-09 19:08:13 +00:00
t->m->system->visit(t->systemThread, target->systemThread, &visitor);
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
2008-04-09 19:08:13 +00:00
PROTECT(t, visitor.trace);
collect(t, Heap::MinorCollection);
}
return visitor.trace ? visitor.trace : makeObjectArray(t, 0);
2008-04-09 19:08:13 +00:00
}
2008-11-23 23:58:01 +00:00
2009-06-01 03:16:58 +00:00
virtual void initialize(BootImage* image, uint8_t* code, unsigned capacity) {
bootImage = image;
codeAllocator.base = code;
codeAllocator.capacity = capacity;
2008-11-23 23:58:01 +00:00
}
virtual void compileMethod(Thread* vmt, Zone* zone, object* constants,
object* calls, DelayedPromise** addresses,
object method)
2008-11-23 23:58:01 +00:00
{
MyThread* t = static_cast<MyThread*>(vmt);
BootContext bootContext(t, *constants, *calls, *addresses, zone);
2008-11-23 23:58:01 +00:00
compile(t, &codeAllocator, &bootContext, method);
2008-11-23 23:58:01 +00:00
*constants = bootContext.constants;
*calls = bootContext.calls;
*addresses = bootContext.addresses;
2008-11-23 23:58:01 +00:00
}
virtual void visitRoots(Thread* t, HeapWalker* w) {
bootImage->methodTree = w->visitRoot(root(t, MethodTree));
bootImage->methodTreeSentinal = w->visitRoot(root(t, MethodTreeSentinal));
bootImage->virtualThunks = w->visitRoot(root(t, VirtualThunks));
}
2009-06-01 03:16:58 +00:00
virtual unsigned* makeCallTable(Thread* t, HeapWalker* w) {
bootImage->codeSize = codeAllocator.offset;
bootImage->callCount = callTableSize;
2008-12-02 02:38:00 +00:00
unsigned* table = static_cast<unsigned*>
(t->m->heap->allocate(callTableSize * sizeof(unsigned) * 2));
2008-12-02 02:38:00 +00:00
unsigned index = 0;
for (unsigned i = 0; i < arrayLength(t, root(t, CallTable)); ++i) {
for (object p = arrayBody(t, root(t, CallTable), i);
p; p = callNodeNext(t, p))
{
2008-12-02 02:38:00 +00:00
table[index++] = callNodeAddress(t, p)
2009-06-01 03:16:58 +00:00
- reinterpret_cast<uintptr_t>(codeAllocator.base);
2008-12-02 02:38:00 +00:00
table[index++] = w->map()->find(callNodeTarget(t, p))
| (static_cast<unsigned>(callNodeFlags(t, p)) << BootShift);
2008-12-02 02:38:00 +00:00
}
}
2008-12-02 02:38:00 +00:00
return table;
}
2008-12-02 02:38:00 +00:00
virtual void boot(Thread* t, BootImage* image) {
2009-06-01 03:16:58 +00:00
if (codeAllocator.base == 0) {
codeAllocator.base = static_cast<uint8_t*>
(s->tryAllocateExecutable(ExecutableAreaSizeInBytes));
codeAllocator.capacity = ExecutableAreaSizeInBytes;
}
2008-12-02 02:38:00 +00:00
if (image) {
local::boot(static_cast<MyThread*>(t), image);
2008-12-02 02:38:00 +00:00
} else {
roots = makeArray(t, RootCount);
setRoot(t, CallTable, makeArray(t, 128));
setRoot(t, MethodTreeSentinal, makeTreeNode(t, 0, 0, 0));
setRoot(t, MethodTree, root(t, MethodTreeSentinal));
set(t, root(t, MethodTree), TreeNodeLeft,
root(t, MethodTreeSentinal));
set(t, root(t, MethodTree), TreeNodeRight,
root(t, MethodTreeSentinal));
2008-12-02 02:38:00 +00:00
}
local::compileThunks(static_cast<MyThread*>(t), &codeAllocator, this);
2008-12-02 02:38:00 +00:00
segFaultHandler.m = t->m;
expect(t, t->m->system->success
(t->m->system->handleSegFault(&segFaultHandler)));
divideByZeroHandler.m = t->m;
expect(t, t->m->system->success
(t->m->system->handleDivideByZero(&divideByZeroHandler)));
2008-11-23 23:58:01 +00:00
}
2009-05-03 20:57:11 +00:00
virtual void callWithCurrentContinuation(Thread* t, object receiver) {
if (Continuations) {
local::callWithCurrentContinuation(static_cast<MyThread*>(t), receiver);
2009-05-23 22:15:06 +00:00
} else {
abort(t);
2009-05-23 22:15:06 +00:00
}
2009-05-03 20:57:11 +00:00
}
virtual void dynamicWind(Thread* t, object before, object thunk,
2009-05-23 22:15:06 +00:00
object after)
2009-05-03 20:57:11 +00:00
{
if (Continuations) {
local::dynamicWind(static_cast<MyThread*>(t), before, thunk, after);
2009-05-23 22:15:06 +00:00
} else {
abort(t);
}
2009-05-23 22:15:06 +00:00
}
2009-05-06 00:29:05 +00:00
virtual void feedResultToContinuation(Thread* t, object continuation,
2009-05-23 22:15:06 +00:00
object result)
{
if (Continuations) {
callContinuation(static_cast<MyThread*>(t), continuation, result, 0);
} else {
abort(t);
}
2009-05-23 22:15:06 +00:00
}
2009-05-06 00:29:05 +00:00
virtual void feedExceptionToContinuation(Thread* t, object continuation,
2009-05-23 22:15:06 +00:00
object exception)
{
if (Continuations) {
callContinuation(static_cast<MyThread*>(t), continuation, 0, exception);
} else {
abort(t);
}
2009-05-06 00:29:05 +00:00
}
2009-05-03 20:57:11 +00:00
virtual void walkContinuationBody(Thread* t, Heap::Walker* w, object o,
unsigned start)
{
if (Continuations) {
local::walkContinuationBody(static_cast<MyThread*>(t), w, o, start);
} else {
abort(t);
}
2009-05-03 20:57:11 +00:00
}
System* s;
Allocator* allocator;
object roots;
BootImage* bootImage;
SignalHandler segFaultHandler;
SignalHandler divideByZeroHandler;
FixedAllocator codeAllocator;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
ThunkCollection thunks;
ThunkCollection bootThunks;
unsigned callTableSize;
bool useNativeFeatures;
};
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
bool
isThunk(MyProcessor::ThunkCollection* thunks, void* ip)
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
{
uint8_t* thunkStart = thunks->default_.start;
uint8_t* thunkEnd = thunks->table.start
+ (thunks->table.length * ThunkCount);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
return (reinterpret_cast<uintptr_t>(ip)
>= reinterpret_cast<uintptr_t>(thunkStart)
and reinterpret_cast<uintptr_t>(ip)
< reinterpret_cast<uintptr_t>(thunkEnd));
}
bool
isThunk(MyThread* t, void* ip)
{
MyProcessor* p = processor(t);
return isThunk(&(p->thunks), ip) or isThunk(&(p->bootThunks), ip);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
}
bool
isThunkUnsafeStack(MyProcessor::Thunk* thunk, void* ip)
{
return reinterpret_cast<uintptr_t>(ip)
>= reinterpret_cast<uintptr_t>(thunk->start)
and reinterpret_cast<uintptr_t>(ip)
< reinterpret_cast<uintptr_t>(thunk->start + thunk->frameSavedOffset);
}
bool
isThunkUnsafeStack(MyProcessor::ThunkCollection* thunks, void* ip)
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
{
const unsigned NamedThunkCount = 5;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
MyProcessor::Thunk table[NamedThunkCount + ThunkCount];
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
table[0] = thunks->default_;
table[1] = thunks->defaultVirtual;
table[2] = thunks->native;
table[3] = thunks->aioob;
table[4] = thunks->stackOverflow;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
for (unsigned i = 0; i < ThunkCount; ++i) {
new (table + NamedThunkCount + i) MyProcessor::Thunk
(thunks->table.start + (i * thunks->table.length),
thunks->table.frameSavedOffset,
thunks->table.length);
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
for (unsigned i = 0; i < NamedThunkCount + ThunkCount; ++i) {
if (isThunkUnsafeStack(table + i, ip)) {
return true;
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
}
}
return false;
}
bool
isVirtualThunk(MyThread* t, void* ip)
{
for (unsigned i = 0; i < wordArrayLength(t, root(t, VirtualThunks)); i += 2)
{
uintptr_t start = wordArrayBody(t, root(t, VirtualThunks), i);
uintptr_t end = start + wordArrayBody(t, root(t, VirtualThunks), i + 1);
if (reinterpret_cast<uintptr_t>(ip) >= start
and reinterpret_cast<uintptr_t>(ip) < end)
{
return true;
}
}
return false;
}
bool
isThunkUnsafeStack(MyThread* t, void* ip)
{
MyProcessor* p = processor(t);
return isThunk(t, ip)
and (isThunkUnsafeStack(&(p->thunks), ip)
or isThunkUnsafeStack(&(p->bootThunks), ip));
}
2008-12-02 02:38:00 +00:00
object
findCallNode(MyThread* t, void* address)
{
2008-12-02 02:38:00 +00:00
if (DebugCallTable) {
fprintf(stderr, "find call node %p\n", address);
}
// we must use a version of the call table at least as recent as the
// compiled form of the method containing the specified address (see
// compile(MyThread*, Allocator*, BootContext*, object)):
loadMemoryBarrier();
object table = root(t, CallTable);
2008-12-02 02:38:00 +00:00
intptr_t key = reinterpret_cast<intptr_t>(address);
unsigned index = static_cast<uintptr_t>(key) & (arrayLength(t, table) - 1);
2008-12-02 02:38:00 +00:00
for (object n = arrayBody(t, table, index);
n; n = callNodeNext(t, n))
{
intptr_t k = callNodeAddress(t, n);
2008-12-02 02:38:00 +00:00
if (k == key) {
return n;
}
}
2008-12-02 02:38:00 +00:00
return 0;
}
2008-12-02 02:38:00 +00:00
object
resizeTable(MyThread* t, object oldTable, unsigned newLength)
{
PROTECT(t, oldTable);
2008-12-02 02:38:00 +00:00
object oldNode = 0;
PROTECT(t, oldNode);
2008-08-16 18:46:14 +00:00
object newTable = makeArray(t, newLength);
2008-12-02 02:38:00 +00:00
PROTECT(t, newTable);
2008-12-02 02:38:00 +00:00
for (unsigned i = 0; i < arrayLength(t, oldTable); ++i) {
for (oldNode = arrayBody(t, oldTable, i);
oldNode;
oldNode = callNodeNext(t, oldNode))
{
intptr_t k = callNodeAddress(t, oldNode);
2008-12-02 02:38:00 +00:00
unsigned index = k & (newLength - 1);
object newNode = makeCallNode
(t, callNodeAddress(t, oldNode),
callNodeTarget(t, oldNode),
callNodeFlags(t, oldNode),
2008-12-02 02:38:00 +00:00
arrayBody(t, newTable, index));
set(t, newTable, ArrayBody + (index * BytesPerWord), newNode);
}
}
return newTable;
}
object
insertCallNode(MyThread* t, object table, unsigned* size, object node)
{
if (DebugCallTable) {
fprintf(stderr, "insert call node %p\n",
reinterpret_cast<void*>(callNodeAddress(t, node)));
}
PROTECT(t, table);
PROTECT(t, node);
++ (*size);
if (*size >= arrayLength(t, table) * 2) {
table = resizeTable(t, table, arrayLength(t, table) * 2);
}
intptr_t key = callNodeAddress(t, node);
unsigned index = static_cast<uintptr_t>(key) & (arrayLength(t, table) - 1);
set(t, node, CallNodeNext, arrayBody(t, table, index));
set(t, table, ArrayBody + (index * BytesPerWord), node);
return table;
}
void
insertCallNode(MyThread* t, object node)
{
setRoot(t, CallTable, insertCallNode
(t, root(t, CallTable), &(processor(t)->callTableSize), node));
2008-12-02 02:38:00 +00:00
}
object
makeClassMap(Thread* t, unsigned* table, unsigned count, uintptr_t* heap)
{
object array = makeArray(t, nextPowerOfTwo(count));
2008-12-02 02:38:00 +00:00
object map = makeHashMap(t, 0, array);
PROTECT(t, map);
for (unsigned i = 0; i < count; ++i) {
object c = bootObject(heap, table[i]);
hashMapInsert(t, map, className(t, c), c, byteArrayHash);
}
return map;
}
object
makeStaticTableArray(Thread* t, unsigned* bootTable, unsigned bootCount,
unsigned* appTable, unsigned appCount, uintptr_t* heap)
2008-12-02 02:38:00 +00:00
{
object array = makeArray(t, bootCount + appCount);
2008-12-02 02:38:00 +00:00
for (unsigned i = 0; i < bootCount; ++i) {
2008-12-02 02:38:00 +00:00
set(t, array, ArrayBody + (i * BytesPerWord),
classStaticTable(t, bootObject(heap, bootTable[i])));
}
for (unsigned i = 0; i < appCount; ++i) {
set(t, array, ArrayBody + ((bootCount + i) * BytesPerWord),
classStaticTable(t, bootObject(heap, appTable[i])));
2008-12-02 02:38:00 +00:00
}
return array;
}
object
makeStringMap(Thread* t, unsigned* table, unsigned count, uintptr_t* heap)
{
object array = makeArray(t, nextPowerOfTwo(count));
2008-12-02 02:38:00 +00:00
object map = makeWeakHashMap(t, 0, array);
PROTECT(t, map);
for (unsigned i = 0; i < count; ++i) {
object s = bootObject(heap, table[i]);
hashMapInsert(t, map, s, 0, stringHash);
}
return map;
}
object
makeCallTable(MyThread* t, uintptr_t* heap, unsigned* calls, unsigned count,
uintptr_t base)
{
object table = makeArray(t, nextPowerOfTwo(count));
2008-12-02 02:38:00 +00:00
PROTECT(t, table);
unsigned size = 0;
for (unsigned i = 0; i < count; ++i) {
unsigned address = calls[i * 2];
unsigned target = calls[(i * 2) + 1];
object node = makeCallNode
(t, base + address, bootObject(heap, target & BootMask),
target >> BootShift, 0);
table = insertCallNode(t, table, &size, node);
}
return table;
}
void
2008-12-03 02:39:56 +00:00
fixupHeap(MyThread* t UNUSED, uintptr_t* map, unsigned size, uintptr_t* heap)
2008-12-02 02:38:00 +00:00
{
for (unsigned word = 0; word < size; ++word) {
uintptr_t w = map[word];
if (w) {
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
if (w & (static_cast<uintptr_t>(1) << bit)) {
unsigned index = indexOf(word, bit);
uintptr_t* p = heap + index;
assert(t, *p);
uintptr_t number = *p & BootMask;
uintptr_t mark = *p >> BootShift;
if (number) {
*p = reinterpret_cast<uintptr_t>(heap + (number - 1)) | mark;
} else {
*p = mark;
}
}
}
}
}
}
void
2009-03-10 00:52:09 +00:00
fixupCode(Thread* t, uintptr_t* map, unsigned size, uint8_t* code,
2008-12-02 02:38:00 +00:00
uintptr_t* heap)
{
Assembler::Architecture* arch = makeArchitecture(t->m->system, false);
2009-03-10 00:52:09 +00:00
arch->acquire();
2008-12-02 02:38:00 +00:00
for (unsigned word = 0; word < size; ++word) {
uintptr_t w = map[word];
if (w) {
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
if (w & (static_cast<uintptr_t>(1) << bit)) {
unsigned index = indexOf(word, bit);
uintptr_t oldValue; memcpy(&oldValue, code + index, BytesPerWord);
uintptr_t newValue;
if (oldValue & BootHeapOffset) {
newValue = reinterpret_cast<uintptr_t>
(heap + (oldValue & BootMask) - 1);
} else {
newValue = reinterpret_cast<uintptr_t>
(code + (oldValue & BootMask));
}
if (oldValue & BootFlatConstant) {
memcpy(code + index, &newValue, BytesPerWord);
} else {
arch->setConstant(code + index, newValue);
}
2008-12-02 02:38:00 +00:00
}
}
}
}
2009-03-10 00:52:09 +00:00
arch->release();
2008-12-02 02:38:00 +00:00
}
void
fixupMethods(Thread* t, object map, BootImage* image, uint8_t* code)
2008-12-02 02:38:00 +00:00
{
for (HashMapIterator it(t, map); it.hasMore();) {
2008-12-02 02:38:00 +00:00
object c = tripleSecond(t, it.next());
if (classMethodTable(t, c)) {
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
object method = arrayBody(t, classMethodTable(t, c), i);
if (methodCode(t, method)) {
2008-12-02 02:38:00 +00:00
assert(t, (methodCompiled(t, method) - image->codeBase)
<= image->codeSize);
codeCompiled(t, methodCode(t, method))
2008-12-02 02:38:00 +00:00
= (methodCompiled(t, method) - image->codeBase)
+ reinterpret_cast<uintptr_t>(code);
if (DebugCompile) {
2008-12-02 02:38:00 +00:00
logCompile
(static_cast<MyThread*>(t),
reinterpret_cast<uint8_t*>(methodCompiled(t, method)),
reinterpret_cast<uintptr_t*>
(methodCompiled(t, method))[-1],
reinterpret_cast<char*>
(&byteArrayBody(t, className(t, methodClass(t, method)), 0)),
reinterpret_cast<char*>
(&byteArrayBody(t, methodName(t, method), 0)),
reinterpret_cast<char*>
(&byteArrayBody(t, methodSpec(t, method), 0)));
}
}
}
}
t->m->processor->initVtable(t, c);
}
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
MyProcessor::Thunk
thunkToThunk(const BootImage::Thunk& thunk, uint8_t* base)
{
return MyProcessor::Thunk
(base + thunk.start, thunk.frameSavedOffset, thunk.length);
}
2008-12-02 02:38:00 +00:00
void
fixupThunks(MyThread* t, BootImage* image, uint8_t* code)
{
MyProcessor* p = processor(t);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->bootThunks.default_ = thunkToThunk(image->thunks.default_, code);
p->bootThunks.defaultVirtual
= thunkToThunk(image->thunks.defaultVirtual, code);
p->bootThunks.native = thunkToThunk(image->thunks.native, code);
p->bootThunks.aioob = thunkToThunk(image->thunks.aioob, code);
p->bootThunks.stackOverflow
= thunkToThunk(image->thunks.stackOverflow, code);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->bootThunks.table = thunkToThunk(image->thunks.table, code);
2008-12-02 02:38:00 +00:00
updateCall(t, LongCall, code + image->compileMethodCall,
voidPointer(local::compileMethod));
2008-12-02 02:38:00 +00:00
updateCall(t, LongCall, code + image->compileVirtualMethodCall,
voidPointer(local::compileVirtualMethod));
updateCall(t, LongCall, code + image->invokeNativeCall,
2008-12-02 02:38:00 +00:00
voidPointer(invokeNative));
updateCall(t, LongCall, code + image->throwArrayIndexOutOfBoundsCall,
2008-12-02 02:38:00 +00:00
voidPointer(throwArrayIndexOutOfBounds));
updateCall(t, LongCall, code + image->throwStackOverflowCall,
voidPointer(throwStackOverflow));
2008-12-02 02:38:00 +00:00
#define THUNK(s) \
updateCall(t, LongJump, code + image->s##Call, voidPointer(s));
2008-12-02 02:38:00 +00:00
#include "thunks.cpp"
#undef THUNK
}
void
fixupVirtualThunks(MyThread* t, BootImage* image, uint8_t* code)
{
for (unsigned i = 0; i < wordArrayLength(t, root(t, VirtualThunks)); i += 2) {
if (wordArrayBody(t, root(t, VirtualThunks), i)) {
wordArrayBody(t, root(t, VirtualThunks), i)
= (wordArrayBody(t, root(t, VirtualThunks), i) - image->codeBase)
+ reinterpret_cast<uintptr_t>(code);
}
}
}
2008-12-02 02:38:00 +00:00
void
boot(MyThread* t, BootImage* image)
{
assert(t, image->magic == BootImage::Magic);
unsigned* bootClassTable = reinterpret_cast<unsigned*>(image + 1);
unsigned* appClassTable = bootClassTable + image->bootClassCount;
unsigned* stringTable = appClassTable + image->appClassCount;
2008-12-02 02:38:00 +00:00
unsigned* callTable = stringTable + image->stringCount;
uintptr_t* heapMap = reinterpret_cast<uintptr_t*>
2009-10-18 00:35:19 +00:00
(padWord(reinterpret_cast<uintptr_t>(callTable + (image->callCount * 2))));
2008-12-02 02:38:00 +00:00
unsigned heapMapSizeInWords = ceiling
(heapMapSize(image->heapSize), BytesPerWord);
uintptr_t* heap = heapMap + heapMapSizeInWords;
// fprintf(stderr, "heap from %p to %p\n",
// heap, heap + ceiling(image->heapSize, BytesPerWord));
uintptr_t* codeMap = heap + ceiling(image->heapSize, BytesPerWord);
unsigned codeMapSizeInWords = ceiling
(codeMapSize(image->codeSize), BytesPerWord);
uint8_t* code = reinterpret_cast<uint8_t*>(codeMap + codeMapSizeInWords);
// fprintf(stderr, "code from %p to %p\n",
// code, code + image->codeSize);
2008-12-02 02:38:00 +00:00
fixupHeap(t, heapMap, heapMapSizeInWords, heap);
t->m->heap->setImmortalHeap(heap, image->heapSize / BytesPerWord);
t->m->types = bootObject(heap, image->types);
t->m->roots = makeArray(t, Machine::RootCount);
setRoot(t, Machine::BootLoader, bootObject(heap, image->bootLoader));
setRoot(t, Machine::AppLoader, bootObject(heap, image->appLoader));
2008-12-02 02:38:00 +00:00
MyProcessor* p = static_cast<MyProcessor*>(t->m->processor);
p->roots = makeArray(t, RootCount);
2008-12-02 02:38:00 +00:00
setRoot(t, MethodTree, bootObject(heap, image->methodTree));
setRoot(t, MethodTreeSentinal, bootObject(heap, image->methodTreeSentinal));
2008-12-02 02:38:00 +00:00
setRoot(t, VirtualThunks, bootObject(heap, image->virtualThunks));
2008-12-02 02:38:00 +00:00
fixupCode(t, codeMap, codeMapSizeInWords, code, heap);
syncInstructionCache(code, image->codeSize);
{ object map = makeClassMap(t, bootClassTable, image->bootClassCount, heap);
set(t, root(t, Machine::BootLoader), ClassLoaderMap, map);
}
systemClassLoaderFinder(t, root(t, Machine::BootLoader)) = t->m->bootFinder;
{ object map = makeClassMap(t, appClassTable, image->appClassCount, heap);
set(t, root(t, Machine::AppLoader), ClassLoaderMap, map);
}
systemClassLoaderFinder(t, root(t, Machine::AppLoader)) = t->m->appFinder;
setRoot(t, Machine::StringMap, makeStringMap
(t, stringTable, image->stringCount, heap));
2008-12-02 02:38:00 +00:00
p->callTableSize = image->callCount;
setRoot(t, CallTable, makeCallTable
(t, heap, callTable, image->callCount,
reinterpret_cast<uintptr_t>(code)));
2008-12-02 02:38:00 +00:00
setRoot(t, StaticTableArray, makeStaticTableArray
(t, bootClassTable, image->bootClassCount,
appClassTable, image->appClassCount, heap));
2008-12-02 02:38:00 +00:00
fixupThunks(t, image, code);
fixupVirtualThunks(t, image, code);
fixupMethods
(t, classLoaderMap(t, root(t, Machine::BootLoader)), image, code);
fixupMethods(t, classLoaderMap(t, root(t, Machine::AppLoader)), image, code);
2008-12-02 02:38:00 +00:00
setRoot(t, Machine::BootstrapClassMap, makeHashMap(t, 0, 0));
2008-12-02 02:38:00 +00:00
}
intptr_t
getThunk(MyThread* t, Thunk thunk)
{
MyProcessor* p = processor(t);
return reinterpret_cast<intptr_t>
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
(p->thunks.table.start + (thunk * p->thunks.table.length));
}
BootImage::Thunk
thunkToThunk(const MyProcessor::Thunk& thunk, uint8_t* base)
{
return BootImage::Thunk
(thunk.start - base, thunk.frameSavedOffset, thunk.length);
}
void
2009-06-01 03:16:58 +00:00
compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p)
{
class ThunkContext {
public:
ThunkContext(MyThread* t, Zone* zone):
context(t), promise(t->m->system, zone)
{ }
Context context;
ListenPromise promise;
};
Zone zone(t->m->system, t->m->heap, 1024);
ThunkContext defaultContext(t, &zone);
{ Assembler* a = defaultContext.context.assembler;
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.default_.frameSavedOffset = a->length();
Assembler::Register thread(t->arch->thread());
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
Assembler::Constant proc(&(defaultContext.promise));
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
a->popFrame();
Assembler::Register result(t->arch->returnLow());
a->apply(Jump, BytesPerWord, RegisterOperand, &result);
2008-09-07 20:12:11 +00:00
p->thunks.default_.length = a->endBlock(false)->resolve(0, 0);
}
ThunkContext defaultVirtualContext(t, &zone);
{ Assembler* a = defaultVirtualContext.context.assembler;
2009-05-03 20:57:11 +00:00
Assembler::Register class_(t->arch->virtualCallTarget());
Assembler::Memory virtualCallTargetSrc
(t->arch->stack(),
2009-05-12 18:16:55 +00:00
(t->arch->frameFooterSize() + t->arch->frameReturnAddressSize())
* BytesPerWord);
2009-05-03 20:57:11 +00:00
a->apply(Move, BytesPerWord, MemoryOperand, &virtualCallTargetSrc,
BytesPerWord, RegisterOperand, &class_);
Assembler::Memory virtualCallTargetDst
(t->arch->thread(), difference(&(t->virtualCallTarget), t));
a->apply(Move, BytesPerWord, RegisterOperand, &class_,
2009-05-03 20:57:11 +00:00
BytesPerWord, MemoryOperand, &virtualCallTargetDst);
Assembler::Register index(t->arch->virtualCallIndex());
Assembler::Memory virtualCallIndex
(t->arch->thread(), difference(&(t->virtualCallIndex), t));
2009-05-03 20:57:11 +00:00
a->apply(Move, BytesPerWord, RegisterOperand, &index,
BytesPerWord, MemoryOperand, &virtualCallIndex);
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.defaultVirtual.frameSavedOffset = a->length();
Assembler::Register thread(t->arch->thread());
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
Assembler::Constant proc(&(defaultVirtualContext.promise));
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
a->popFrame();
Assembler::Register result(t->arch->returnLow());
a->apply(Jump, BytesPerWord, RegisterOperand, &result);
p->thunks.defaultVirtual.length = a->endBlock(false)->resolve(0, 0);
}
ThunkContext nativeContext(t, &zone);
{ Assembler* a = nativeContext.context.assembler;
2008-08-16 18:46:14 +00:00
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.native.frameSavedOffset = a->length();
Assembler::Register thread(t->arch->thread());
2008-08-16 18:46:14 +00:00
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
Assembler::Constant proc(&(nativeContext.promise));
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
a->popFrameAndUpdateStackAndReturn(difference(&(t->stack), t));
2008-09-07 20:12:11 +00:00
p->thunks.native.length = a->endBlock(false)->resolve(0, 0);
}
ThunkContext aioobContext(t, &zone);
{ Assembler* a = aioobContext.context.assembler;
2008-08-16 18:46:14 +00:00
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.aioob.frameSavedOffset = a->length();
Assembler::Register thread(t->arch->thread());
2008-08-16 18:46:14 +00:00
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
Assembler::Constant proc(&(aioobContext.promise));
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
2008-09-07 20:12:11 +00:00
p->thunks.aioob.length = a->endBlock(false)->resolve(0, 0);
}
ThunkContext stackOverflowContext(t, &zone);
{ Assembler* a = stackOverflowContext.context.assembler;
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
p->thunks.stackOverflow.frameSavedOffset = a->length();
Assembler::Register thread(t->arch->thread());
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
Assembler::Constant proc(&(stackOverflowContext.promise));
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
p->thunks.stackOverflow.length = a->endBlock(false)->resolve(0, 0);
}
ThunkContext tableContext(t, &zone);
{ Assembler* a = tableContext.context.assembler;
2008-08-16 18:46:14 +00:00
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.table.frameSavedOffset = a->length();
Assembler::Constant proc(&(tableContext.promise));
a->apply(LongJump, BytesPerWord, ConstantOperand, &proc);
2008-09-07 20:12:11 +00:00
p->thunks.table.length = a->endBlock(false)->resolve(0, 0);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.default_.start = finish
(t, allocator, defaultContext.context.assembler, "default",
p->thunks.default_.length);
2009-06-01 03:16:58 +00:00
BootImage* image = p->bootImage;
uint8_t* imageBase = p->codeAllocator.base;
{ void* call;
defaultContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(compileMethod)), &call);
2008-11-28 04:44:04 +00:00
if (image) {
image->compileMethodCall = static_cast<uint8_t*>(call) - imageBase;
2008-11-28 04:44:04 +00:00
}
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.defaultVirtual.start = finish
(t, allocator, defaultVirtualContext.context.assembler, "defaultVirtual",
p->thunks.defaultVirtual.length);
{ void* call;
defaultVirtualContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(compileVirtualMethod)), &call);
if (image) {
image->compileVirtualMethodCall
= static_cast<uint8_t*>(call) - imageBase;
}
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.native.start = finish
(t, allocator, nativeContext.context.assembler, "native",
p->thunks.native.length);
2008-11-23 23:58:01 +00:00
{ void* call;
nativeContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(invokeNative)), &call);
2008-11-28 04:44:04 +00:00
if (image) {
image->invokeNativeCall = static_cast<uint8_t*>(call) - imageBase;
2008-11-28 04:44:04 +00:00
}
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.aioob.start = finish
(t, allocator, aioobContext.context.assembler, "aioob",
p->thunks.aioob.length);
{ void* call;
aioobContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(throwArrayIndexOutOfBounds)),
&call);
2008-11-28 04:44:04 +00:00
if (image) {
image->throwArrayIndexOutOfBoundsCall
= static_cast<uint8_t*>(call) - imageBase;
2008-11-28 04:44:04 +00:00
}
}
p->thunks.stackOverflow.start = finish
(t, allocator, stackOverflowContext.context.assembler, "stackOverflow",
p->thunks.stackOverflow.length);
{ void* call;
stackOverflowContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(throwStackOverflow)),
&call);
if (image) {
image->throwStackOverflowCall
= static_cast<uint8_t*>(call) - imageBase;
}
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
p->thunks.table.start = static_cast<uint8_t*>
(allocator->allocate(p->thunks.table.length * ThunkCount));
if (image) {
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
image->thunks.default_ = thunkToThunk(p->thunks.default_, imageBase);
image->thunks.defaultVirtual
= thunkToThunk(p->thunks.defaultVirtual, imageBase);
image->thunks.native = thunkToThunk(p->thunks.native, imageBase);
image->thunks.aioob = thunkToThunk(p->thunks.aioob, imageBase);
image->thunks.stackOverflow
= thunkToThunk(p->thunks.stackOverflow, imageBase);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
image->thunks.table = thunkToThunk(p->thunks.table, imageBase);
}
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
logCompile(t, p->thunks.table.start, p->thunks.table.length * ThunkCount, 0,
"thunkTable", 0);
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
uint8_t* start = p->thunks.table.start;
2008-11-23 23:58:01 +00:00
#define THUNK(s) \
tableContext.context.assembler->writeTo(start); \
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
start += p->thunks.table.length; \
{ void* call; \
tableContext.promise.listener->resolve \
(reinterpret_cast<intptr_t>(voidPointer(s)), &call); \
if (image) { \
image->s##Call = static_cast<uint8_t*>(call) - imageBase; \
} \
2008-11-23 23:58:01 +00:00
}
#include "thunks.cpp"
#undef THUNK
}
2007-12-09 22:45:43 +00:00
MyProcessor*
processor(MyThread* t)
2007-10-04 03:19:39 +00:00
{
2008-12-02 02:38:00 +00:00
return static_cast<MyProcessor*>(t->m->processor);
2007-12-09 22:45:43 +00:00
}
2007-12-30 22:24:48 +00:00
2008-11-23 23:58:01 +00:00
uintptr_t
defaultThunk(MyThread* t)
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
return reinterpret_cast<uintptr_t>(processor(t)->thunks.default_.start);
}
uintptr_t
bootDefaultThunk(MyThread* t)
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.default_.start);
}
uintptr_t
defaultVirtualThunk(MyThread* t)
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
return reinterpret_cast<uintptr_t>
(processor(t)->thunks.defaultVirtual.start);
}
2008-11-23 23:58:01 +00:00
uintptr_t
nativeThunk(MyThread* t)
2008-04-09 19:08:13 +00:00
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
return reinterpret_cast<uintptr_t>(processor(t)->thunks.native.start);
2008-04-09 19:08:13 +00:00
}
uintptr_t
bootNativeThunk(MyThread* t)
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.native.start);
}
2008-11-23 23:58:01 +00:00
uintptr_t
aioobThunk(MyThread* t)
2008-04-09 19:08:13 +00:00
{
fix Thread.getStackTrace race conditions Implementing Thread.getStackTrace is tricky. A thread may interrupt another thread at any time to grab a stack trace, including while the latter is executing Java code, JNI code, helper thunks, VM code, or while transitioning between any of these. To create a stack trace we use several context fields associated with the target thread, including snapshots of the instruction pointer, stack pointer, and frame pointer. These fields must be current, accurate, and consistent with each other in order to get a reliable trace. Otherwise, we risk crashing the VM by trying to walk garbage stack frames or by misinterpreting the size and/or content of legitimate frames. This commit addresses sensitive transition points such as entering the helper thunks which bridge the transitions from Java to native code (where we must save the stack and frame registers for use from native code) and stack unwinding (where we must atomically update the thread context fields to indicate which frame we are unwinding to). When grabbing a trace for another thread, we determine what kind of code we caught the thread executing in and use that information to choose the thread context values with which to begin the trace. See MyProcessor::getStackTrace::Visitor::visit for details. In order to atomically update the thread context fields, we do the following: 1. Create a temporary "transition" object to serve as a staging area and populate it with the new field values. 2. Update a transition pointer in the thread object to point to the object created above. As long as this pointer is non-null, interrupting threads will use the context values in the staging object instead of those in the thread object. 3. Update the fields in the thread object. 4. Clear the transition pointer in the thread object. We use a memory barrier between each of these steps to ensure they are made visible to other threads in program order. See MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
return reinterpret_cast<uintptr_t>(processor(t)->thunks.aioob.start);
2008-04-09 19:08:13 +00:00
}
uintptr_t
stackOverflowThunk(MyThread* t)
{
return reinterpret_cast<uintptr_t>(processor(t)->thunks.stackOverflow.start);
}
bool
unresolved(MyThread* t, uintptr_t methodAddress)
{
return methodAddress == defaultThunk(t)
or methodAddress == bootDefaultThunk(t);
}
uintptr_t
compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
{
Context context(t);
Assembler* a = context.assembler;
ResolvedPromise indexPromise(index);
Assembler::Constant indexConstant(&indexPromise);
Assembler::Register indexRegister(t->arch->virtualCallIndex());
a->apply(Move, BytesPerWord, ConstantOperand, &indexConstant,
BytesPerWord, RegisterOperand, &indexRegister);
ResolvedPromise defaultVirtualThunkPromise(defaultVirtualThunk(t));
Assembler::Constant thunk(&defaultVirtualThunkPromise);
a->apply(Jump, BytesPerWord, ConstantOperand, &thunk);
*size = a->endBlock(false)->resolve(0, 0);
uint8_t* start = static_cast<uint8_t*>(codeAllocator(t)->allocate(*size));
a->writeTo(start);
logCompile(t, start, *size, 0, "virtualThunk", 0);
return reinterpret_cast<uintptr_t>(start);
}
uintptr_t
virtualThunk(MyThread* t, unsigned index)
{
if (root(t, VirtualThunks) == 0
or wordArrayLength(t, root(t, VirtualThunks)) <= index * 2)
{
object newArray = makeWordArray(t, nextPowerOfTwo((index + 1) * 2));
if (root(t, VirtualThunks)) {
memcpy(&wordArrayBody(t, newArray, 0),
&wordArrayBody(t, root(t, VirtualThunks), 0),
wordArrayLength(t, root(t, VirtualThunks)) * BytesPerWord);
}
setRoot(t, VirtualThunks, newArray);
}
if (wordArrayBody(t, root(t, VirtualThunks), index * 2) == 0) {
ACQUIRE(t, t->m->classLock);
if (wordArrayBody(t, root(t, VirtualThunks), index * 2) == 0) {
unsigned size;
uintptr_t thunk = compileVirtualThunk(t, index, &size);
wordArrayBody(t, root(t, VirtualThunks), index * 2) = thunk;
wordArrayBody(t, root(t, VirtualThunks), (index * 2) + 1) = size;
}
}
return wordArrayBody(t, root(t, VirtualThunks), index * 2);
}
void
2008-11-23 23:58:01 +00:00
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
object method)
2007-12-09 22:45:43 +00:00
{
PROTECT(t, method);
2007-12-09 22:45:43 +00:00
if (bootContext == 0) {
initClass(t, methodClass(t, method));
if (UNLIKELY(t->exception)) return;
}
2007-12-09 22:45:43 +00:00
if (methodAddress(t, method) != defaultThunk(t)) {
return;
}
2007-12-09 22:45:43 +00:00
assert(t, (methodFlags(t, method) & ACC_NATIVE) == 0);
// We must avoid acquiring any locks until after the first pass of
// compilation, since this pass may trigger classloading operations
// involving application classloaders and thus the potential for
// deadlock. To make this safe, we use a private clone of the
// method so that we won't be confused if another thread updates the
// original while we're working.
2007-12-11 00:48:09 +00:00
object clone = methodClone(t, method);
loadMemoryBarrier();
if (methodAddress(t, method) != defaultThunk(t)) {
return;
}
PROTECT(t, clone);
Context context(t, bootContext, clone);
compile(t, &context);
if (UNLIKELY(t->exception)) return;
{ object ehTable = codeExceptionHandlerTable(t, methodCode(t, clone));
if (ehTable) {
PROTECT(t, ehTable);
// resolve all exception handler catch types before we acquire
// the class lock:
for (unsigned i = 0; i < exceptionHandlerTableLength(t, ehTable); ++i) {
ExceptionHandler* handler = exceptionHandlerTableBody(t, ehTable, i);
if (exceptionHandlerCatchType(handler)) {
resolveClassInPool
(t, clone, exceptionHandlerCatchType(handler) - 1);
if (UNLIKELY(t->exception)) return;
}
}
}
}
ACQUIRE(t, t->m->classLock);
if (methodAddress(t, method) != defaultThunk(t)) {
return;
}
finish(t, allocator, &context);
if (UNLIKELY(t->exception)) return;
if (DebugMethodTree) {
fprintf(stderr, "insert method at %p\n",
reinterpret_cast<void*>(methodCompiled(t, clone)));
2007-12-11 00:48:09 +00:00
}
// We can't update the MethodCode field on the original method
// before it is placed into the method tree, since another thread
// might call the method, from which stack unwinding would fail
// (since there is not yet an entry in the method tree). However,
// we can't insert the original method into the tree before updating
// the MethodCode field on it since we rely on that field to
// determine its position in the tree. Therefore, we insert the
// clone in its place. Later, we'll replace the clone with the
// original to save memory.
setRoot
(t, MethodTree, treeInsert
(t, &(context.zone), root(t, MethodTree),
methodCompiled(t, clone), clone, root(t, MethodTreeSentinal),
compareIpToMethodBounds));
storeStoreMemoryBarrier();
set(t, method, MethodCode, methodCode(t, clone));
if (methodVirtual(t, method)) {
classVtable(t, methodClass(t, method), methodOffset(t, method))
= reinterpret_cast<void*>(methodCompiled(t, clone));
}
treeUpdate(t, root(t, MethodTree), methodCompiled(t, clone),
method, root(t, MethodTreeSentinal), compareIpToMethodBounds);
}
2007-12-11 00:48:09 +00:00
object&
root(Thread* t, Root root)
{
return arrayBody(t, processor(static_cast<MyThread*>(t))->roots, root);
}
void
setRoot(Thread* t, Root root, object value)
{
set(t, processor(static_cast<MyThread*>(t))->roots,
ArrayBody + (root * BytesPerWord), value);
}
FixedAllocator*
codeAllocator(MyThread* t)
{
return &(processor(t)->codeAllocator);
}
} // namespace local
2007-12-09 22:45:43 +00:00
} // namespace
namespace vm {
2007-09-25 23:53:11 +00:00
Processor*
makeProcessor(System* system, Allocator* allocator, bool useNativeFeatures)
{
return new (allocator->allocate(sizeof(local::MyProcessor)))
local::MyProcessor(system, allocator, useNativeFeatures);
}
} // namespace vm