2009-03-15 18:02:36 +00:00
|
|
|
/* Copyright (c) 2008-2009, Avian Contributors
|
2008-02-19 18:06:52 +00:00
|
|
|
|
|
|
|
Permission to use, copy, modify, and/or distribute this software
|
|
|
|
for any purpose with or without fee is hereby granted, provided
|
|
|
|
that the above copyright notice and this permission notice appear
|
|
|
|
in all copies.
|
|
|
|
|
|
|
|
There is NO WARRANTY for this software. See license.txt for
|
|
|
|
details. */
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
#include "machine.h"
|
2007-12-09 22:45:43 +00:00
|
|
|
#include "util.h"
|
|
|
|
#include "vector.h"
|
2007-09-26 23:23:03 +00:00
|
|
|
#include "process.h"
|
2008-02-11 17:21:41 +00:00
|
|
|
#include "assembler.h"
|
2007-12-09 22:45:43 +00:00
|
|
|
#include "compiler.h"
|
2008-06-04 22:21:27 +00:00
|
|
|
#include "arch.h"
|
2007-09-24 01:39:03 +00:00
|
|
|
|
|
|
|
using namespace vm;
|
|
|
|
|
2007-09-26 23:23:03 +00:00
|
|
|
extern "C" uint64_t
|
2009-02-17 02:49:28 +00:00
|
|
|
vmInvoke(void* thread, void* function, void* arguments,
|
|
|
|
unsigned argumentFootprint, unsigned frameSize, unsigned returnType);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
extern "C" void
|
|
|
|
vmInvoke_returnAddress();
|
|
|
|
|
|
|
|
extern "C" void
|
|
|
|
vmInvoke_safeStack();
|
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
extern "C" void
|
|
|
|
vmJumpAndInvoke(void* thread, void* function, void* base, void* stack,
|
2009-05-27 01:02:39 +00:00
|
|
|
unsigned argumentFootprint, uintptr_t* arguments,
|
|
|
|
unsigned frameSize);
|
2009-05-05 01:04:17 +00:00
|
|
|
|
2007-10-04 03:19:39 +00:00
|
|
|
extern "C" void
|
|
|
|
vmCall();
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
namespace {
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
namespace local {
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
const bool DebugCompile = false;
|
2007-12-26 23:59:55 +00:00
|
|
|
const bool DebugNatives = false;
|
2008-04-07 23:47:41 +00:00
|
|
|
const bool DebugCallTable = false;
|
2008-04-11 19:03:40 +00:00
|
|
|
const bool DebugMethodTree = false;
|
2008-11-08 22:36:38 +00:00
|
|
|
const bool DebugFrameMaps = false;
|
2009-08-06 16:32:00 +00:00
|
|
|
const bool DebugIntrinsics = false;
|
2007-12-11 23:52:28 +00:00
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
const bool CheckArrayBounds = true;
|
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
#ifdef AVIAN_CONTINUATIONS
|
|
|
|
const bool Continuations = true;
|
|
|
|
#else
|
|
|
|
const bool Continuations = false;
|
|
|
|
#endif
|
|
|
|
|
2008-09-28 19:00:52 +00:00
|
|
|
const unsigned MaxNativeCallFootprint = 4;
|
|
|
|
|
2008-11-25 23:01:30 +00:00
|
|
|
const unsigned InitialZoneCapacityInBytes = 64 * 1024;
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
const unsigned ExecutableAreaSizeInBytes = 16 * 1024 * 1024;
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
enum Root {
|
|
|
|
CallTable,
|
|
|
|
MethodTree,
|
|
|
|
MethodTreeSentinal,
|
|
|
|
ObjectPools,
|
|
|
|
StaticTableArray,
|
|
|
|
VirtualThunks,
|
|
|
|
ReceiveMethod,
|
|
|
|
WindMethod,
|
|
|
|
RewindMethod
|
|
|
|
};
|
|
|
|
|
|
|
|
const unsigned RootCount = RewindMethod + 1;
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
inline bool
|
|
|
|
isVmInvokeUnsafeStack(void* ip)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(ip)
|
|
|
|
>= reinterpret_cast<uintptr_t>(voidPointer(vmInvoke_returnAddress))
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
|
|
|
< reinterpret_cast<uintptr_t> (voidPointer(vmInvoke_safeStack));
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
class MyThread: public Thread {
|
|
|
|
public:
|
|
|
|
class CallTrace {
|
|
|
|
public:
|
2009-05-24 01:49:14 +00:00
|
|
|
CallTrace(MyThread* t, object method):
|
2007-12-09 22:45:43 +00:00
|
|
|
t(t),
|
2008-08-16 18:46:14 +00:00
|
|
|
base(t->base),
|
|
|
|
stack(t->stack),
|
2009-05-03 20:57:11 +00:00
|
|
|
continuation(t->continuation),
|
2009-05-24 01:49:14 +00:00
|
|
|
nativeMethod((methodFlags(t, method) & ACC_NATIVE) ? method : 0),
|
2009-04-27 01:53:42 +00:00
|
|
|
targetMethod(0),
|
2009-05-24 01:49:14 +00:00
|
|
|
originalMethod(method),
|
2007-12-09 22:45:43 +00:00
|
|
|
next(t->trace)
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
doTransition(t, 0, 0, 0, 0, this);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
~CallTrace() {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
assert(t, t->stack == 0);
|
|
|
|
|
|
|
|
doTransition(t, 0, stack, base, continuation, next);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
2008-08-16 18:46:14 +00:00
|
|
|
void* base;
|
|
|
|
void* stack;
|
2009-05-03 20:57:11 +00:00
|
|
|
object continuation;
|
2008-04-07 23:47:41 +00:00
|
|
|
object nativeMethod;
|
2009-04-27 01:53:42 +00:00
|
|
|
object targetMethod;
|
2009-05-24 01:49:14 +00:00
|
|
|
object originalMethod;
|
2007-12-09 22:45:43 +00:00
|
|
|
CallTrace* next;
|
|
|
|
};
|
2007-10-01 15:19:15 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
class Context {
|
|
|
|
public:
|
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
|
|
|
MyProtector(MyThread* t, Context* context):
|
|
|
|
Protector(t), context(context)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
virtual void visit(Heap::Visitor* v) {
|
|
|
|
v->visit(&(context->continuation));
|
|
|
|
}
|
|
|
|
|
|
|
|
Context* context;
|
|
|
|
};
|
|
|
|
|
|
|
|
Context(MyThread* t, void* ip, void* stack, void* base,
|
|
|
|
object continuation, CallTrace* trace):
|
|
|
|
ip(ip),
|
|
|
|
stack(stack),
|
|
|
|
base(base),
|
|
|
|
continuation(continuation),
|
|
|
|
trace(trace),
|
|
|
|
protector(t, this)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
void* ip;
|
|
|
|
void* stack;
|
|
|
|
void* base;
|
|
|
|
object continuation;
|
|
|
|
CallTrace* trace;
|
|
|
|
MyProtector protector;
|
|
|
|
};
|
|
|
|
|
|
|
|
class TraceContext: public Context {
|
|
|
|
public:
|
|
|
|
TraceContext(MyThread* t, void* ip, void* stack, void* base,
|
|
|
|
object continuation, CallTrace* trace):
|
|
|
|
Context(t, ip, stack, base, continuation, trace),
|
|
|
|
t(t),
|
|
|
|
next(t->traceContext)
|
|
|
|
{
|
|
|
|
t->traceContext = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
TraceContext(MyThread* t):
|
|
|
|
Context(t, t->ip, t->stack, t->base, t->continuation, t->trace),
|
|
|
|
t(t),
|
|
|
|
next(t->traceContext)
|
|
|
|
{
|
|
|
|
t->traceContext = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
~TraceContext() {
|
|
|
|
t->traceContext = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
|
|
|
TraceContext* next;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void doTransition(MyThread* t, void* ip, void* stack, void* base,
|
|
|
|
object continuation, MyThread::CallTrace* trace)
|
|
|
|
{
|
|
|
|
// in this function, we "atomically" update the thread context
|
|
|
|
// fields in such a way to ensure that another thread may
|
|
|
|
// interrupt us at any time and still get a consistent, accurate
|
2010-09-14 16:49:41 +00:00
|
|
|
// stack trace. See MyProcessor::getStackTrace for details.
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
assert(t, t->transition == 0);
|
|
|
|
|
|
|
|
Context c(t, ip, stack, base, continuation, trace);
|
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->transition = &c;
|
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->ip = ip;
|
|
|
|
t->base = base;
|
|
|
|
t->stack = stack;
|
|
|
|
t->continuation = continuation;
|
|
|
|
t->trace = trace;
|
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->transition = 0;
|
|
|
|
}
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
MyThread(Machine* m, object javaThread, MyThread* parent,
|
|
|
|
bool useNativeFeatures):
|
2007-12-09 22:45:43 +00:00
|
|
|
Thread(m, javaThread, parent),
|
2007-12-30 22:24:48 +00:00
|
|
|
ip(0),
|
2008-08-16 18:46:14 +00:00
|
|
|
base(0),
|
|
|
|
stack(0),
|
2009-05-03 20:57:11 +00:00
|
|
|
continuation(0),
|
2009-05-25 04:27:50 +00:00
|
|
|
exceptionStackAdjustment(0),
|
2009-05-03 20:57:11 +00:00
|
|
|
exceptionOffset(0),
|
|
|
|
exceptionHandler(0),
|
2009-04-07 00:34:12 +00:00
|
|
|
tailAddress(0),
|
2009-05-03 20:57:11 +00:00
|
|
|
virtualCallTarget(0),
|
2009-04-07 00:34:12 +00:00
|
|
|
virtualCallIndex(0),
|
2007-12-09 22:45:43 +00:00
|
|
|
trace(0),
|
2008-08-18 15:23:01 +00:00
|
|
|
reference(0),
|
2009-10-10 23:46:43 +00:00
|
|
|
arch(parent
|
|
|
|
? parent->arch
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
: makeArchitecture(m->system, useNativeFeatures)),
|
|
|
|
transition(0),
|
|
|
|
traceContext(0)
|
2008-08-18 15:23:01 +00:00
|
|
|
{
|
|
|
|
arch->acquire();
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
void* ip;
|
2008-08-16 18:46:14 +00:00
|
|
|
void* base;
|
|
|
|
void* stack;
|
2009-05-03 20:57:11 +00:00
|
|
|
object continuation;
|
2009-05-25 04:27:50 +00:00
|
|
|
uintptr_t exceptionStackAdjustment;
|
2009-05-03 20:57:11 +00:00
|
|
|
uintptr_t exceptionOffset;
|
|
|
|
void* exceptionHandler;
|
2009-04-07 00:34:12 +00:00
|
|
|
void* tailAddress;
|
2009-05-03 20:57:11 +00:00
|
|
|
void* virtualCallTarget;
|
2009-04-07 00:34:12 +00:00
|
|
|
uintptr_t virtualCallIndex;
|
2007-12-09 22:45:43 +00:00
|
|
|
CallTrace* trace;
|
|
|
|
Reference* reference;
|
2008-08-18 15:23:01 +00:00
|
|
|
Assembler::Architecture* arch;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
Context* transition;
|
|
|
|
TraceContext* traceContext;
|
2007-12-09 22:45:43 +00:00
|
|
|
};
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
void
|
|
|
|
transition(MyThread* t, void* ip, void* stack, void* base, object continuation,
|
|
|
|
MyThread::CallTrace* trace)
|
|
|
|
{
|
|
|
|
MyThread::doTransition(t, ip, stack, base, continuation, trace);
|
|
|
|
}
|
|
|
|
|
2009-02-28 19:33:26 +00:00
|
|
|
unsigned
|
|
|
|
parameterOffset(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return methodParameterFootprint(t, method)
|
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->frameReturnAddressSize() - 1;
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
object
|
2009-05-05 01:04:17 +00:00
|
|
|
resolveThisPointer(MyThread* t, void* stack)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
return reinterpret_cast<object*>(stack)
|
|
|
|
[t->arch->frameFooterSize() + t->arch->frameReturnAddressSize()];
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-23 19:26:35 +00:00
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
object
|
|
|
|
findMethod(Thread* t, object method, object instance)
|
|
|
|
{
|
|
|
|
if ((methodFlags(t, method) & ACC_STATIC) == 0) {
|
2010-09-10 21:05:29 +00:00
|
|
|
if (classFlags(t, methodClass(t, method)) & ACC_INTERFACE) {
|
2009-08-13 15:17:05 +00:00
|
|
|
return findInterfaceMethod(t, method, objectClass(t, instance));
|
2010-09-10 21:05:29 +00:00
|
|
|
} else if (methodVirtual(t, method)) {
|
|
|
|
return findVirtualMethod(t, method, objectClass(t, instance));
|
2009-08-13 15:17:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return method;
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object
|
|
|
|
resolveTarget(MyThread* t, void* stack, object method)
|
|
|
|
{
|
2009-05-05 01:04:17 +00:00
|
|
|
object class_ = objectClass(t, resolveThisPointer(t, stack));
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
if (classVmFlags(t, class_) & BootstrapFlag) {
|
|
|
|
PROTECT(t, method);
|
|
|
|
PROTECT(t, class_);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
resolveSystemClass(t, root(t, Machine::BootLoader), className(t, class_));
|
2008-04-07 23:47:41 +00:00
|
|
|
if (UNLIKELY(t->exception)) return 0;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
if (classFlags(t, methodClass(t, method)) & ACC_INTERFACE) {
|
|
|
|
return findInterfaceMethod(t, method, class_);
|
|
|
|
} else {
|
2009-08-13 15:17:05 +00:00
|
|
|
return findVirtualMethod(t, method, class_);
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
object
|
|
|
|
resolveTarget(MyThread* t, object class_, unsigned index)
|
|
|
|
{
|
|
|
|
if (classVmFlags(t, class_) & BootstrapFlag) {
|
|
|
|
PROTECT(t, class_);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
resolveSystemClass(t, root(t, Machine::BootLoader), className(t, class_));
|
2009-04-05 21:42:10 +00:00
|
|
|
if (UNLIKELY(t->exception)) return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return arrayBody(t, classVirtualTable(t, class_), index);
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object&
|
2010-09-14 16:49:41 +00:00
|
|
|
root(Thread* t, Root root);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
void
|
|
|
|
setRoot(Thread* t, Root root, object value);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
unsigned
|
|
|
|
compiledSize(intptr_t address)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t*>(address)[-1];
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
intptr_t
|
|
|
|
compareIpToMethodBounds(Thread* t, intptr_t ip, object method)
|
|
|
|
{
|
2008-11-23 23:58:01 +00:00
|
|
|
intptr_t start = methodCompiled(t, method);
|
2008-04-10 23:48:28 +00:00
|
|
|
|
|
|
|
if (DebugMethodTree) {
|
2009-08-07 22:27:24 +00:00
|
|
|
fprintf(stderr, "find %p in (%p,%p)\n",
|
|
|
|
reinterpret_cast<void*>(ip),
|
|
|
|
reinterpret_cast<void*>(start),
|
|
|
|
reinterpret_cast<void*>(start + compiledSize(start)));
|
2008-04-10 23:48:28 +00:00
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
if (ip < start) {
|
|
|
|
return -1;
|
2008-04-11 23:01:17 +00:00
|
|
|
} else if (ip < start + static_cast<intptr_t>
|
2008-11-29 01:23:01 +00:00
|
|
|
(compiledSize(start) + BytesPerWord))
|
2008-04-10 23:48:28 +00:00
|
|
|
{
|
2008-04-07 23:47:41 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
methodForIp(MyThread* t, void* ip)
|
|
|
|
{
|
2008-04-10 23:48:28 +00:00
|
|
|
if (DebugMethodTree) {
|
|
|
|
fprintf(stderr, "query for method containing %p\n", ip);
|
|
|
|
}
|
|
|
|
|
2009-03-03 01:40:06 +00:00
|
|
|
// we must use a version of the method tree at least as recent as the
|
|
|
|
// compiled form of the method containing the specified address (see
|
|
|
|
// compile(MyThread*, Allocator*, BootContext*, object)):
|
2009-11-30 15:38:16 +00:00
|
|
|
loadMemoryBarrier();
|
2009-03-03 01:40:06 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
return treeQuery(t, root(t, MethodTree), reinterpret_cast<intptr_t>(ip),
|
|
|
|
root(t, MethodTreeSentinal), compareIpToMethodBounds);
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
class MyStackWalker: public Processor::StackWalker {
|
|
|
|
public:
|
2008-04-23 16:33:31 +00:00
|
|
|
enum State {
|
|
|
|
Start,
|
|
|
|
Next,
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
Trace,
|
2009-05-05 01:04:17 +00:00
|
|
|
Continuation,
|
2008-04-23 16:33:31 +00:00
|
|
|
Method,
|
|
|
|
NativeMethod,
|
|
|
|
Finish
|
|
|
|
};
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
|
|
|
MyProtector(MyStackWalker* walker):
|
|
|
|
Protector(walker->t), walker(walker)
|
|
|
|
{ }
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual void visit(Heap::Visitor* v) {
|
2008-04-07 23:47:41 +00:00
|
|
|
v->visit(&(walker->method_));
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(walker->continuation));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyStackWalker* walker;
|
|
|
|
};
|
|
|
|
|
|
|
|
MyStackWalker(MyThread* t):
|
|
|
|
t(t),
|
2008-04-23 16:33:31 +00:00
|
|
|
state(Start),
|
|
|
|
method_(0),
|
2007-12-09 22:45:43 +00:00
|
|
|
protector(this)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
|
|
|
if (t->traceContext) {
|
|
|
|
ip_ = t->traceContext->ip;
|
|
|
|
base = t->traceContext->base;
|
|
|
|
stack = t->traceContext->stack;
|
|
|
|
trace = t->traceContext->trace;
|
|
|
|
continuation = t->traceContext->continuation;
|
|
|
|
} else {
|
|
|
|
ip_ = 0;
|
|
|
|
base = t->base;
|
|
|
|
stack = t->stack;
|
|
|
|
trace = t->trace;
|
|
|
|
continuation = t->continuation;
|
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
MyStackWalker(MyStackWalker* w):
|
|
|
|
t(w->t),
|
2008-04-23 16:33:31 +00:00
|
|
|
state(w->state),
|
2008-04-10 23:48:28 +00:00
|
|
|
ip_(w->ip_),
|
2007-12-09 22:45:43 +00:00
|
|
|
base(w->base),
|
|
|
|
stack(w->stack),
|
|
|
|
trace(w->trace),
|
2008-04-07 23:47:41 +00:00
|
|
|
method_(w->method_),
|
2009-05-12 18:16:55 +00:00
|
|
|
continuation(w->continuation),
|
2007-12-09 22:45:43 +00:00
|
|
|
protector(this)
|
|
|
|
{ }
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual void walk(Processor::StackVisitor* v) {
|
2008-04-23 16:33:31 +00:00
|
|
|
for (MyStackWalker it(this); it.valid();) {
|
2009-07-10 14:33:38 +00:00
|
|
|
MyStackWalker walker(&it);
|
2008-04-23 16:33:31 +00:00
|
|
|
if (not v->visit(&walker)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
it.next();
|
2007-12-14 18:27:56 +00:00
|
|
|
}
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool valid() {
|
|
|
|
while (true) {
|
|
|
|
// fprintf(stderr, "state: %d\n", state);
|
|
|
|
switch (state) {
|
|
|
|
case Start:
|
2008-08-17 19:32:40 +00:00
|
|
|
if (ip_ == 0) {
|
2008-08-18 15:23:01 +00:00
|
|
|
ip_ = t->arch->frameIp(stack);
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
2007-12-14 18:27:56 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
if (trace and trace->nativeMethod) {
|
|
|
|
method_ = trace->nativeMethod;
|
|
|
|
state = NativeMethod;
|
|
|
|
} else {
|
2008-08-14 18:13:05 +00:00
|
|
|
state = Next;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Next:
|
|
|
|
if (stack) {
|
|
|
|
method_ = methodForIp(t, ip_);
|
|
|
|
if (method_) {
|
|
|
|
state = Method;
|
2009-05-03 20:57:11 +00:00
|
|
|
} else if (continuation) {
|
2009-05-17 00:39:08 +00:00
|
|
|
method_ = continuationMethod(t, continuation);
|
2009-05-03 20:57:11 +00:00
|
|
|
state = Continuation;
|
2008-04-23 16:33:31 +00:00
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
state = Trace;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
state = Trace;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2008-04-23 16:33:31 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
case Trace: {
|
|
|
|
if (trace) {
|
|
|
|
continuation = trace->continuation;
|
|
|
|
stack = trace->stack;
|
|
|
|
base = trace->base;
|
|
|
|
ip_ = t->arch->frameIp(stack);
|
|
|
|
trace = trace->next;
|
|
|
|
|
|
|
|
state = Start;
|
|
|
|
} else {
|
|
|
|
state = Finish;
|
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
|
|
|
case NativeMethod:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case Finish:
|
|
|
|
return false;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
void next() {
|
|
|
|
switch (state) {
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
|
|
|
continuation = continuationNext(t, continuation);
|
|
|
|
break;
|
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
2008-08-17 19:32:40 +00:00
|
|
|
t->arch->nextFrame(&stack, &base);
|
2008-08-18 15:23:01 +00:00
|
|
|
ip_ = t->arch->frameIp(stack);
|
2008-04-23 16:33:31 +00:00
|
|
|
break;
|
2008-04-22 16:21:54 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case NativeMethod:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2008-04-22 16:21:54 +00:00
|
|
|
}
|
2008-08-14 18:13:05 +00:00
|
|
|
|
|
|
|
state = Next;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual object method() {
|
2008-08-14 18:13:05 +00:00
|
|
|
// fprintf(stderr, "method %s.%s\n", &byteArrayBody
|
|
|
|
// (t, className(t, methodClass(t, method_)), 0),
|
|
|
|
// &byteArrayBody(t, methodName(t, method_), 0));
|
|
|
|
return method_;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual int ip() {
|
2008-04-23 16:33:31 +00:00
|
|
|
switch (state) {
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
2009-05-05 01:04:17 +00:00
|
|
|
return reinterpret_cast<intptr_t>(continuationAddress(t, continuation))
|
2009-05-03 20:57:11 +00:00
|
|
|
- methodCompiled(t, continuationMethod(t, continuation));
|
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
2008-11-23 23:58:01 +00:00
|
|
|
return reinterpret_cast<intptr_t>(ip_) - methodCompiled(t, method_);
|
2008-04-23 16:33:31 +00:00
|
|
|
|
|
|
|
case NativeMethod:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual unsigned count() {
|
2008-04-23 16:33:31 +00:00
|
|
|
unsigned count = 0;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
for (MyStackWalker walker(this); walker.valid();) {
|
|
|
|
walker.next();
|
|
|
|
++ count;
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
return count;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
2008-04-23 16:33:31 +00:00
|
|
|
State state;
|
2008-04-07 23:47:41 +00:00
|
|
|
void* ip_;
|
2007-12-09 22:45:43 +00:00
|
|
|
void* base;
|
|
|
|
void* stack;
|
|
|
|
MyThread::CallTrace* trace;
|
2008-04-07 23:47:41 +00:00
|
|
|
object method_;
|
2009-05-03 20:57:11 +00:00
|
|
|
object continuation;
|
2007-12-09 22:45:43 +00:00
|
|
|
MyProtector protector;
|
|
|
|
};
|
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
unsigned
|
|
|
|
localSize(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
unsigned size = codeMaxLocals(t, methodCode(t, method));
|
|
|
|
if ((methodFlags(t, method) & (ACC_SYNCHRONIZED | ACC_STATIC))
|
|
|
|
== ACC_SYNCHRONIZED)
|
|
|
|
{
|
|
|
|
++ size;
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
alignedFrameSize(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return t->arch->alignFrameSize
|
|
|
|
(localSize(t, method)
|
|
|
|
- methodParameterFootprint(t, method)
|
2008-09-28 19:00:52 +00:00
|
|
|
+ codeMaxStack(t, methodCode(t, method))
|
2009-04-25 17:49:56 +00:00
|
|
|
+ t->arch->frameFootprint(MaxNativeCallFootprint));
|
2008-08-18 15:23:01 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
int
|
|
|
|
localOffset(MyThread* t, int v, object method)
|
|
|
|
{
|
2008-09-09 00:31:19 +00:00
|
|
|
int parameterFootprint = methodParameterFootprint(t, method);
|
|
|
|
int frameSize = alignedFrameSize(t, method);
|
|
|
|
|
|
|
|
int offset = ((v < parameterFootprint) ?
|
|
|
|
(frameSize
|
|
|
|
+ parameterFootprint
|
2009-02-27 01:54:25 +00:00
|
|
|
+ t->arch->frameFooterSize()
|
2008-09-09 00:31:19 +00:00
|
|
|
+ t->arch->frameHeaderSize()
|
2008-09-15 02:28:42 +00:00
|
|
|
- v - 1) :
|
2008-09-09 00:31:19 +00:00
|
|
|
(frameSize
|
|
|
|
+ parameterFootprint
|
2009-05-17 23:43:48 +00:00
|
|
|
- v - 1));
|
2008-09-09 00:31:19 +00:00
|
|
|
|
|
|
|
assert(t, offset >= 0);
|
|
|
|
return offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
int
|
|
|
|
localOffsetFromStack(MyThread* t, int index, object method)
|
|
|
|
{
|
|
|
|
return localOffset(t, index, method)
|
2009-05-17 23:43:48 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2009-04-26 21:55:35 +00:00
|
|
|
object*
|
2008-08-18 15:23:01 +00:00
|
|
|
localObject(MyThread* t, void* stack, object method, unsigned index)
|
2007-12-28 00:02:05 +00:00
|
|
|
{
|
2009-05-17 23:43:48 +00:00
|
|
|
return static_cast<object*>(stack) + localOffsetFromStack(t, index, method);
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
stackOffsetFromFrame(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return alignedFrameSize(t, method) + t->arch->frameHeaderSize();
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2009-04-26 21:55:35 +00:00
|
|
|
void*
|
|
|
|
stackForFrame(MyThread* t, void* frame, object method)
|
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
return static_cast<void**>(frame) - stackOffsetFromFrame(t, method);
|
2009-04-26 21:55:35 +00:00
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
class PoolElement: public Promise {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2008-11-23 23:58:01 +00:00
|
|
|
PoolElement(Thread* t, object target, PoolElement* next):
|
|
|
|
t(t), target(target), address(0), next(next)
|
2007-12-16 00:24:15 +00:00
|
|
|
{ }
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
virtual int64_t value() {
|
|
|
|
assert(t, resolved());
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool resolved() {
|
|
|
|
return address != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
Thread* t;
|
|
|
|
object target;
|
|
|
|
intptr_t address;
|
2007-12-31 22:40:56 +00:00
|
|
|
PoolElement* next;
|
2007-12-09 22:45:43 +00:00
|
|
|
};
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
class Context;
|
2009-06-26 21:36:04 +00:00
|
|
|
class SubroutineCall;
|
|
|
|
|
|
|
|
class Subroutine {
|
|
|
|
public:
|
2009-07-13 23:49:15 +00:00
|
|
|
Subroutine(unsigned ip, unsigned logIndex, Subroutine* listNext,
|
|
|
|
Subroutine* stackNext):
|
|
|
|
listNext(listNext),
|
|
|
|
stackNext(stackNext),
|
2009-06-26 21:36:04 +00:00
|
|
|
calls(0),
|
|
|
|
handle(0),
|
|
|
|
ip(ip),
|
|
|
|
logIndex(logIndex),
|
|
|
|
stackIndex(0),
|
|
|
|
callCount(0),
|
|
|
|
tableIndex(0),
|
|
|
|
visited(false)
|
|
|
|
{ }
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
Subroutine* listNext;
|
|
|
|
Subroutine* stackNext;
|
2009-06-26 21:36:04 +00:00
|
|
|
SubroutineCall* calls;
|
|
|
|
Compiler::Subroutine* handle;
|
|
|
|
unsigned ip;
|
|
|
|
unsigned logIndex;
|
|
|
|
unsigned stackIndex;
|
|
|
|
unsigned callCount;
|
|
|
|
unsigned tableIndex;
|
|
|
|
bool visited;
|
|
|
|
};
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
class SubroutinePath;
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
class SubroutineCall {
|
|
|
|
public:
|
|
|
|
SubroutineCall(Subroutine* subroutine, Promise* returnAddress):
|
|
|
|
subroutine(subroutine),
|
|
|
|
returnAddress(returnAddress),
|
2009-07-13 23:49:15 +00:00
|
|
|
paths(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
next(subroutine->calls)
|
|
|
|
{
|
|
|
|
subroutine->calls = this;
|
2009-07-08 14:18:40 +00:00
|
|
|
++ subroutine->callCount;
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Subroutine* subroutine;
|
|
|
|
Promise* returnAddress;
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutinePath* paths;
|
2009-06-26 21:36:04 +00:00
|
|
|
SubroutineCall* next;
|
|
|
|
};
|
|
|
|
|
|
|
|
class SubroutinePath {
|
|
|
|
public:
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutinePath(SubroutineCall* call, SubroutinePath* stackNext,
|
2009-07-08 14:18:40 +00:00
|
|
|
uintptr_t* rootTable):
|
2009-06-26 21:36:04 +00:00
|
|
|
call(call),
|
2009-07-13 23:49:15 +00:00
|
|
|
stackNext(stackNext),
|
|
|
|
listNext(call->paths),
|
2009-07-08 14:18:40 +00:00
|
|
|
rootTable(rootTable)
|
2009-07-13 23:49:15 +00:00
|
|
|
{
|
|
|
|
call->paths = this;
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
SubroutineCall* call;
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutinePath* stackNext;
|
|
|
|
SubroutinePath* listNext;
|
2009-07-08 14:18:40 +00:00
|
|
|
uintptr_t* rootTable;
|
2009-06-26 21:36:04 +00:00
|
|
|
};
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
void
|
|
|
|
print(SubroutinePath* path)
|
|
|
|
{
|
|
|
|
if (path) {
|
|
|
|
fprintf(stderr, " (");
|
|
|
|
while (true) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, "%p", path->call->returnAddress->resolved() ?
|
|
|
|
reinterpret_cast<void*>(path->call->returnAddress->value()) : 0);
|
2009-07-13 23:49:15 +00:00
|
|
|
path = path->stackNext;
|
|
|
|
if (path) {
|
|
|
|
fprintf(stderr, ", ");
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fprintf(stderr, ")");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
class SubroutineTrace {
|
|
|
|
public:
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
SubroutineTrace(SubroutinePath* path, SubroutineTrace* next,
|
|
|
|
unsigned mapSize):
|
2009-06-26 21:36:04 +00:00
|
|
|
path(path),
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
next(next),
|
|
|
|
watch(false)
|
|
|
|
{
|
|
|
|
memset(map, 0, mapSize * BytesPerWord);
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
SubroutinePath* path;
|
|
|
|
SubroutineTrace* next;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
bool watch;
|
2009-06-26 21:36:04 +00:00
|
|
|
uintptr_t map[0];
|
|
|
|
};
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
class TraceElement: public TraceHandler {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2009-04-07 00:34:12 +00:00
|
|
|
static const unsigned VirtualCall = 1 << 0;
|
|
|
|
static const unsigned TailCall = 1 << 1;
|
2009-10-18 00:18:03 +00:00
|
|
|
static const unsigned LongCall = 1 << 2;
|
2009-03-31 20:15:08 +00:00
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
TraceElement(Context* context, unsigned ip, object target, unsigned flags,
|
|
|
|
TraceElement* next, unsigned mapSize):
|
2007-12-31 22:40:56 +00:00
|
|
|
context(context),
|
|
|
|
address(0),
|
2008-11-09 23:56:37 +00:00
|
|
|
next(next),
|
2009-06-26 21:36:04 +00:00
|
|
|
subroutineTrace(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
target(target),
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
ip(ip),
|
|
|
|
subroutineTraceCount(0),
|
2009-04-27 01:53:42 +00:00
|
|
|
argumentIndex(0),
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
flags(flags),
|
|
|
|
watch(false)
|
|
|
|
{
|
|
|
|
memset(map, 0, mapSize * BytesPerWord);
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
virtual void handleTrace(Promise* address, unsigned argumentIndex) {
|
2007-12-31 22:40:56 +00:00
|
|
|
if (this->address == 0) {
|
|
|
|
this->address = address;
|
2009-04-27 01:53:42 +00:00
|
|
|
this->argumentIndex = argumentIndex;
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Context* context;
|
|
|
|
Promise* address;
|
2008-11-09 23:56:37 +00:00
|
|
|
TraceElement* next;
|
2009-06-26 21:36:04 +00:00
|
|
|
SubroutineTrace* subroutineTrace;
|
2007-12-09 22:45:43 +00:00
|
|
|
object target;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
unsigned ip;
|
|
|
|
unsigned subroutineTraceCount;
|
2009-04-27 01:53:42 +00:00
|
|
|
unsigned argumentIndex;
|
2009-03-31 20:15:08 +00:00
|
|
|
unsigned flags;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
bool watch;
|
2007-12-31 22:40:56 +00:00
|
|
|
uintptr_t map[0];
|
2007-10-03 00:22:48 +00:00
|
|
|
};
|
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
class TraceElementPromise: public Promise {
|
|
|
|
public:
|
2009-04-07 00:34:12 +00:00
|
|
|
TraceElementPromise(System* s, TraceElement* trace): s(s), trace(trace) { }
|
2009-03-31 20:15:08 +00:00
|
|
|
|
|
|
|
virtual int64_t value() {
|
|
|
|
assert(s, resolved());
|
2009-04-22 01:39:25 +00:00
|
|
|
return trace->address->value();
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool resolved() {
|
2009-04-22 01:39:25 +00:00
|
|
|
return trace->address != 0 and trace->address->resolved();
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
System* s;
|
|
|
|
TraceElement* trace;
|
|
|
|
};
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
enum Event {
|
2008-07-05 20:21:13 +00:00
|
|
|
PushContextEvent,
|
|
|
|
PopContextEvent,
|
2008-01-07 14:51:07 +00:00
|
|
|
IpEvent,
|
|
|
|
MarkEvent,
|
|
|
|
ClearEvent,
|
2009-07-13 23:49:15 +00:00
|
|
|
PushExceptionHandlerEvent,
|
2009-06-26 21:36:04 +00:00
|
|
|
TraceEvent,
|
|
|
|
PushSubroutineEvent,
|
|
|
|
PopSubroutineEvent
|
2008-01-07 14:51:07 +00:00
|
|
|
};
|
|
|
|
|
2009-04-27 14:46:43 +00:00
|
|
|
unsigned
|
|
|
|
frameMapSizeInBits(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return localSize(t, method) + codeMaxStack(t, methodCode(t, method));
|
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned
|
|
|
|
frameMapSizeInWords(MyThread* t, object method)
|
|
|
|
{
|
2009-07-13 23:49:15 +00:00
|
|
|
return ceiling(frameMapSizeInBits(t, method), BitsPerWord);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t*
|
|
|
|
makeVisitTable(MyThread* t, Zone* zone, object method)
|
|
|
|
{
|
|
|
|
unsigned size = codeLength(t, methodCode(t, method)) * 2;
|
|
|
|
uint16_t* table = static_cast<uint16_t*>(zone->allocate(size));
|
|
|
|
memset(table, 0, size);
|
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t*
|
2008-01-08 15:24:57 +00:00
|
|
|
makeRootTable(MyThread* t, Zone* zone, object method)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
|
|
|
unsigned size = frameMapSizeInWords(t, method)
|
|
|
|
* codeLength(t, methodCode(t, method))
|
|
|
|
* BytesPerWord;
|
|
|
|
uintptr_t* table = static_cast<uintptr_t*>(zone->allocate(size));
|
2008-01-08 15:24:57 +00:00
|
|
|
memset(table, 0xFF, size);
|
2008-01-07 14:51:07 +00:00
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
enum Thunk {
|
|
|
|
#define THUNK(s) s##Thunk,
|
|
|
|
|
|
|
|
#include "thunks.cpp"
|
|
|
|
|
|
|
|
#undef THUNK
|
|
|
|
};
|
|
|
|
|
|
|
|
const unsigned ThunkCount = gcIfNecessaryThunk + 1;
|
|
|
|
|
|
|
|
intptr_t
|
|
|
|
getThunk(MyThread* t, Thunk thunk);
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
class BootContext {
|
|
|
|
public:
|
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
|
|
|
MyProtector(Thread* t, BootContext* c): Protector(t), c(c) { }
|
|
|
|
|
|
|
|
virtual void visit(Heap::Visitor* v) {
|
2008-11-27 20:59:40 +00:00
|
|
|
v->visit(&(c->constants));
|
|
|
|
v->visit(&(c->calls));
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
BootContext* c;
|
|
|
|
};
|
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
BootContext(Thread* t, object constants, object calls,
|
|
|
|
DelayedPromise* addresses, Zone* zone):
|
|
|
|
protector(t, this), constants(constants), calls(calls),
|
|
|
|
addresses(addresses), addressSentinal(addresses), zone(zone)
|
2008-11-23 23:58:01 +00:00
|
|
|
{ }
|
|
|
|
|
|
|
|
MyProtector protector;
|
2008-11-27 20:59:40 +00:00
|
|
|
object constants;
|
|
|
|
object calls;
|
2008-12-02 16:45:20 +00:00
|
|
|
DelayedPromise* addresses;
|
|
|
|
DelayedPromise* addressSentinal;
|
2008-11-23 23:58:01 +00:00
|
|
|
Zone* zone;
|
|
|
|
};
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
class Context {
|
2007-10-10 22:39:40 +00:00
|
|
|
public:
|
2007-12-09 22:45:43 +00:00
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
2008-02-11 17:21:41 +00:00
|
|
|
MyProtector(Context* c): Protector(c->thread), c(c) { }
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
virtual void visit(Heap::Visitor* v) {
|
2007-12-31 22:40:56 +00:00
|
|
|
v->visit(&(c->method));
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
for (PoolElement* p = c->objectPool; p; p = p->next) {
|
2008-11-23 23:58:01 +00:00
|
|
|
v->visit(&(p->target));
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
for (TraceElement* p = c->traceLog; p; p = p->next) {
|
|
|
|
v->visit(&(p->target));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* c;
|
2007-10-10 22:39:40 +00:00
|
|
|
};
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
class MyClient: public Compiler::Client {
|
|
|
|
public:
|
|
|
|
MyClient(MyThread* t): t(t) { }
|
|
|
|
|
|
|
|
virtual intptr_t getThunk(UnaryOperation, unsigned) {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
virtual intptr_t getThunk(BinaryOperation op, unsigned size,
|
|
|
|
unsigned resultSize)
|
|
|
|
{
|
2009-10-18 01:26:14 +00:00
|
|
|
if (size == 8) {
|
|
|
|
switch(op) {
|
2009-12-01 16:21:33 +00:00
|
|
|
case Absolute:
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, absoluteLongThunk);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case FloatNegate:
|
|
|
|
assert(t, resultSize == 8);
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, negateDoubleThunk);
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case FloatSquareRoot:
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, squareRootDoubleThunk);
|
|
|
|
|
|
|
|
case Float2Float:
|
|
|
|
assert(t, resultSize == 4);
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, doubleToFloatThunk);
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case Float2Int:
|
|
|
|
if (resultSize == 8) {
|
|
|
|
return local::getThunk(t, doubleToLongThunk);
|
|
|
|
} else {
|
|
|
|
assert(t, resultSize == 4);
|
|
|
|
return local::getThunk(t, doubleToIntThunk);
|
|
|
|
}
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case Int2Float:
|
|
|
|
if (resultSize == 8) {
|
|
|
|
return local::getThunk(t, longToDoubleThunk);
|
|
|
|
} else {
|
|
|
|
assert(t, resultSize == 4);
|
|
|
|
return local::getThunk(t, longToFloatThunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
default: abort(t);
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2009-10-18 01:26:14 +00:00
|
|
|
} else {
|
|
|
|
assert(t, size == 4);
|
|
|
|
|
|
|
|
switch(op) {
|
2009-12-01 16:21:33 +00:00
|
|
|
case Absolute:
|
|
|
|
assert(t, resultSize == 4);
|
|
|
|
return local::getThunk(t, absoluteIntThunk);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case FloatNegate:
|
2009-12-01 16:21:33 +00:00
|
|
|
assert(t, resultSize == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, negateFloatThunk);
|
|
|
|
|
|
|
|
case FloatAbsolute:
|
2009-12-01 16:21:33 +00:00
|
|
|
assert(t, resultSize == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, absoluteFloatThunk);
|
|
|
|
|
|
|
|
case Float2Float:
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, floatToDoubleThunk);
|
|
|
|
|
|
|
|
case Float2Int:
|
|
|
|
if (resultSize == 4) {
|
|
|
|
return local::getThunk(t, floatToIntThunk);
|
|
|
|
} else {
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, floatToLongThunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Int2Float:
|
|
|
|
if (resultSize == 4) {
|
|
|
|
return local::getThunk(t, intToFloatThunk);
|
|
|
|
} else {
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, intToDoubleThunk);
|
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
default: abort(t);
|
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
virtual intptr_t getThunk(TernaryOperation op, unsigned size, unsigned) {
|
2009-10-10 23:46:43 +00:00
|
|
|
if (size == 8) {
|
|
|
|
switch (op) {
|
|
|
|
case Divide:
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::getThunk(t, divideLongThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case Remainder:
|
|
|
|
return local::getThunk(t, moduloLongThunk);
|
|
|
|
|
|
|
|
case FloatAdd:
|
|
|
|
return local::getThunk(t, addDoubleThunk);
|
|
|
|
|
|
|
|
case FloatSubtract:
|
|
|
|
return local::getThunk(t, subtractDoubleThunk);
|
|
|
|
|
|
|
|
case FloatMultiply:
|
|
|
|
return local::getThunk(t, multiplyDoubleThunk);
|
|
|
|
|
|
|
|
case FloatDivide:
|
|
|
|
return local::getThunk(t, divideDoubleThunk);
|
|
|
|
|
2009-10-25 01:29:20 +00:00
|
|
|
case FloatRemainder:
|
|
|
|
return local::getThunk(t, moduloDoubleThunk);
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case JumpIfFloatEqual:
|
|
|
|
case JumpIfFloatNotEqual:
|
|
|
|
case JumpIfFloatLess:
|
|
|
|
case JumpIfFloatGreater:
|
|
|
|
case JumpIfFloatLessOrEqual:
|
|
|
|
case JumpIfFloatGreaterOrUnordered:
|
|
|
|
case JumpIfFloatGreaterOrEqualOrUnordered:
|
|
|
|
return local::getThunk(t, compareDoublesGThunk);
|
|
|
|
|
|
|
|
case JumpIfFloatGreaterOrEqual:
|
|
|
|
case JumpIfFloatLessOrUnordered:
|
|
|
|
case JumpIfFloatLessOrEqualOrUnordered:
|
|
|
|
return local::getThunk(t, compareDoublesLThunk);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
default: abort(t);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
2009-10-18 01:26:14 +00:00
|
|
|
} else {
|
|
|
|
assert(t, size == 4);
|
2009-10-10 23:46:43 +00:00
|
|
|
switch (op) {
|
2009-10-29 20:23:20 +00:00
|
|
|
case Divide:
|
|
|
|
return local::getThunk(t, divideIntThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-29 20:23:20 +00:00
|
|
|
case Remainder:
|
2009-10-29 20:14:44 +00:00
|
|
|
return local::getThunk(t, moduloIntThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case FloatAdd:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, addFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case FloatSubtract:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, subtractFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case FloatMultiply:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, multiplyFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case FloatDivide:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, divideFloatThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-25 01:29:20 +00:00
|
|
|
case FloatRemainder:
|
|
|
|
return local::getThunk(t, moduloFloatThunk);
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case JumpIfFloatEqual:
|
|
|
|
case JumpIfFloatNotEqual:
|
|
|
|
case JumpIfFloatLess:
|
|
|
|
case JumpIfFloatGreater:
|
|
|
|
case JumpIfFloatLessOrEqual:
|
|
|
|
case JumpIfFloatGreaterOrUnordered:
|
|
|
|
case JumpIfFloatGreaterOrEqualOrUnordered:
|
|
|
|
return local::getThunk(t, compareFloatsGThunk);
|
|
|
|
|
|
|
|
case JumpIfFloatGreaterOrEqual:
|
|
|
|
case JumpIfFloatLessOrUnordered:
|
|
|
|
case JumpIfFloatLessOrEqualOrUnordered:
|
|
|
|
return local::getThunk(t, compareFloatsLThunk);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
default: abort(t);
|
2009-10-10 23:46:43 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
|
|
|
};
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
Context(MyThread* t, BootContext* bootContext, object method):
|
2008-02-11 17:21:41 +00:00
|
|
|
thread(t),
|
2008-11-25 23:01:30 +00:00
|
|
|
zone(t->m->system, t->m->heap, InitialZoneCapacityInBytes),
|
2008-08-19 23:38:37 +00:00
|
|
|
assembler(makeAssembler(t->m->system, t->m->heap, &zone, t->arch)),
|
2008-05-31 22:14:27 +00:00
|
|
|
client(t),
|
|
|
|
compiler(makeCompiler(t->m->system, assembler, &zone, &client)),
|
2007-10-10 22:39:40 +00:00
|
|
|
method(method),
|
2008-11-23 23:58:01 +00:00
|
|
|
bootContext(bootContext),
|
2007-12-31 22:40:56 +00:00
|
|
|
objectPool(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
subroutines(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
traceLog(0),
|
2008-01-07 14:51:07 +00:00
|
|
|
visitTable(makeVisitTable(t, &zone, method)),
|
2008-01-08 15:24:57 +00:00
|
|
|
rootTable(makeRootTable(t, &zone, method)),
|
2009-07-13 23:49:15 +00:00
|
|
|
subroutineTable(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
objectPoolCount(0),
|
|
|
|
traceLogCount(0),
|
|
|
|
dirtyRoots(false),
|
2008-01-14 23:37:24 +00:00
|
|
|
eventLog(t->m->system, t->m->heap, 1024),
|
2007-12-31 22:40:56 +00:00
|
|
|
protector(this)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
Context(MyThread* t):
|
2008-02-11 17:21:41 +00:00
|
|
|
thread(t),
|
2008-11-25 23:01:30 +00:00
|
|
|
zone(t->m->system, t->m->heap, InitialZoneCapacityInBytes),
|
2008-08-19 23:38:37 +00:00
|
|
|
assembler(makeAssembler(t->m->system, t->m->heap, &zone, t->arch)),
|
2008-05-31 22:14:27 +00:00
|
|
|
client(t),
|
2008-02-11 17:21:41 +00:00
|
|
|
compiler(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
method(0),
|
2008-11-23 23:58:01 +00:00
|
|
|
bootContext(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
objectPool(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
subroutines(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
traceLog(0),
|
2008-01-07 14:51:07 +00:00
|
|
|
visitTable(0),
|
|
|
|
rootTable(0),
|
2009-07-13 23:49:15 +00:00
|
|
|
subroutineTable(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
objectPoolCount(0),
|
|
|
|
traceLogCount(0),
|
|
|
|
dirtyRoots(false),
|
2008-01-14 23:37:24 +00:00
|
|
|
eventLog(t->m->system, t->m->heap, 0),
|
2007-12-31 22:40:56 +00:00
|
|
|
protector(this)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
~Context() {
|
2008-02-12 00:20:32 +00:00
|
|
|
if (compiler) compiler->dispose();
|
2008-02-11 17:21:41 +00:00
|
|
|
assembler->dispose();
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
MyThread* thread;
|
2007-12-31 22:40:56 +00:00
|
|
|
Zone zone;
|
2008-02-11 17:21:41 +00:00
|
|
|
Assembler* assembler;
|
2008-05-31 22:14:27 +00:00
|
|
|
MyClient client;
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler* compiler;
|
2007-12-31 22:40:56 +00:00
|
|
|
object method;
|
2008-11-23 23:58:01 +00:00
|
|
|
BootContext* bootContext;
|
2007-12-31 22:40:56 +00:00
|
|
|
PoolElement* objectPool;
|
2009-06-26 21:36:04 +00:00
|
|
|
Subroutine* subroutines;
|
2007-12-31 22:40:56 +00:00
|
|
|
TraceElement* traceLog;
|
2008-01-07 14:51:07 +00:00
|
|
|
uint16_t* visitTable;
|
|
|
|
uintptr_t* rootTable;
|
2009-07-13 23:49:15 +00:00
|
|
|
Subroutine** subroutineTable;
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned objectPoolCount;
|
|
|
|
unsigned traceLogCount;
|
2008-03-05 21:44:17 +00:00
|
|
|
bool dirtyRoots;
|
2008-01-07 14:51:07 +00:00
|
|
|
Vector eventLog;
|
2007-12-31 22:40:56 +00:00
|
|
|
MyProtector protector;
|
|
|
|
};
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned
|
|
|
|
translateLocalIndex(Context* context, unsigned footprint, unsigned index)
|
|
|
|
{
|
|
|
|
unsigned parameterFootprint = methodParameterFootprint
|
|
|
|
(context->thread, context->method);
|
|
|
|
|
|
|
|
if (index < parameterFootprint) {
|
|
|
|
return parameterFootprint - index - footprint;
|
|
|
|
} else {
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler::Operand*
|
|
|
|
loadLocal(Context* context, unsigned footprint, unsigned index)
|
|
|
|
{
|
|
|
|
return context->compiler->loadLocal
|
|
|
|
(footprint, translateLocalIndex(context, footprint, index));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
storeLocal(Context* context, unsigned footprint, Compiler::Operand* value,
|
|
|
|
unsigned index)
|
|
|
|
{
|
|
|
|
context->compiler->storeLocal
|
|
|
|
(footprint, value, translateLocalIndex(context, footprint, index));
|
|
|
|
}
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
class Frame {
|
|
|
|
public:
|
2008-02-11 17:21:41 +00:00
|
|
|
enum StackType {
|
|
|
|
Integer,
|
|
|
|
Long,
|
|
|
|
Object
|
|
|
|
};
|
|
|
|
|
|
|
|
Frame(Context* context, uint8_t* stackMap):
|
2007-12-31 22:40:56 +00:00
|
|
|
context(context),
|
2008-02-11 17:21:41 +00:00
|
|
|
t(context->thread),
|
|
|
|
c(context->compiler),
|
2009-02-14 20:26:39 +00:00
|
|
|
subroutine(0),
|
2008-01-07 14:51:07 +00:00
|
|
|
stackMap(stackMap),
|
2007-12-09 22:45:43 +00:00
|
|
|
ip(0),
|
2008-01-07 16:01:35 +00:00
|
|
|
sp(localSize()),
|
2008-01-07 14:51:07 +00:00
|
|
|
level(0)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-02-11 17:21:41 +00:00
|
|
|
memset(stackMap, 0, codeMaxStack(t, methodCode(t, context->method)));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Frame(Frame* f, uint8_t* stackMap):
|
2007-12-31 22:40:56 +00:00
|
|
|
context(f->context),
|
2008-02-11 17:21:41 +00:00
|
|
|
t(context->thread),
|
|
|
|
c(context->compiler),
|
2009-02-14 20:26:39 +00:00
|
|
|
subroutine(f->subroutine),
|
2008-01-07 14:51:07 +00:00
|
|
|
stackMap(stackMap),
|
2007-12-09 22:45:43 +00:00
|
|
|
ip(f->ip),
|
2008-01-07 14:51:07 +00:00
|
|
|
sp(f->sp),
|
|
|
|
level(f->level + 1)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-02-11 17:21:41 +00:00
|
|
|
memcpy(stackMap, f->stackMap, codeMaxStack
|
|
|
|
(t, methodCode(t, context->method)));
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
if (level > 1) {
|
2008-07-05 20:21:13 +00:00
|
|
|
context->eventLog.append(PushContextEvent);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~Frame() {
|
2008-04-28 15:53:48 +00:00
|
|
|
if (t->exception == 0) {
|
|
|
|
if (level > 1) {
|
2008-07-05 20:21:13 +00:00
|
|
|
context->eventLog.append(PopContextEvent);
|
2008-04-28 15:53:48 +00:00
|
|
|
}
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* append(object o) {
|
2008-11-23 23:58:01 +00:00
|
|
|
if (context->bootContext) {
|
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
Promise* p = new (bc->zone->allocate(sizeof(ListenPromise)))
|
|
|
|
ListenPromise(t->m->system, bc->zone);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
PROTECT(t, o);
|
2008-11-23 23:58:01 +00:00
|
|
|
object pointer = makePointer(t, p);
|
2008-11-27 20:59:40 +00:00
|
|
|
bc->constants = makeTriple(t, o, pointer, bc->constants);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
return c->promiseConstant(p, Compiler::ObjectType);
|
2008-11-23 23:58:01 +00:00
|
|
|
} else {
|
2010-01-28 00:46:04 +00:00
|
|
|
for (PoolElement* e = context->objectPool; e; e = e->next) {
|
|
|
|
if (o == e->target) {
|
|
|
|
return c->address(e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
context->objectPool = new
|
|
|
|
(context->zone.allocate(sizeof(PoolElement)))
|
|
|
|
PoolElement(t, o, context->objectPool);
|
|
|
|
|
2008-11-29 01:23:01 +00:00
|
|
|
++ context->objectPoolCount;
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
return c->address(context->objectPool);
|
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned localSize() {
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::localSize(t, context->method);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned stackSize() {
|
|
|
|
return codeMaxStack(t, methodCode(t, context->method));
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned frameSize() {
|
|
|
|
return localSize() + stackSize();
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void set(unsigned index, uint8_t type) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < frameSize());
|
2007-10-10 22:39:40 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
if (type == Object) {
|
|
|
|
context->eventLog.append(MarkEvent);
|
|
|
|
context->eventLog.append2(index);
|
|
|
|
} else {
|
|
|
|
context->eventLog.append(ClearEvent);
|
|
|
|
context->eventLog.append2(index);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int si = index - localSize();
|
|
|
|
if (si >= 0) {
|
2008-02-11 17:21:41 +00:00
|
|
|
stackMap[si] = type;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t get(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < frameSize());
|
|
|
|
int si = index - localSize();
|
|
|
|
assert(t, si >= 0);
|
2008-02-11 17:21:41 +00:00
|
|
|
return stackMap[si];
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void pushedInt() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp++, Integer);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pushedLong() {
|
|
|
|
assert(t, sp + 2 <= frameSize());
|
|
|
|
set(sp++, Long);
|
|
|
|
set(sp++, Long);
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void pushedObject() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp++, Object);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-12-12 22:19:13 +00:00
|
|
|
|
|
|
|
void popped(unsigned count) {
|
|
|
|
assert(t, sp >= count);
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp - count >= localSize());
|
2007-12-12 22:19:13 +00:00
|
|
|
while (count) {
|
2008-02-11 17:21:41 +00:00
|
|
|
set(--sp, Integer);
|
2007-12-12 22:19:13 +00:00
|
|
|
-- count;
|
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
void poppedInt() {
|
|
|
|
assert(t, sp >= 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp - 1 >= localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
assert(t, get(sp - 1) == Integer);
|
2007-12-09 22:45:43 +00:00
|
|
|
-- sp;
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void poppedLong() {
|
|
|
|
assert(t, sp >= 1);
|
|
|
|
assert(t, sp - 2 >= localSize());
|
|
|
|
assert(t, get(sp - 1) == Long);
|
|
|
|
assert(t, get(sp - 2) == Long);
|
|
|
|
sp -= 2;
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void poppedObject() {
|
|
|
|
assert(t, sp >= 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp - 1 >= localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
assert(t, get(sp - 1) == Object);
|
|
|
|
set(--sp, Integer);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void storedInt(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
set(index, Integer);
|
|
|
|
}
|
|
|
|
|
|
|
|
void storedLong(unsigned index) {
|
|
|
|
assert(t, index + 1 < localSize());
|
|
|
|
set(index, Long);
|
|
|
|
set(index + 1, Long);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void storedObject(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
set(index, Object);
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dupped() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
|
|
|
assert(t, sp - 1 >= localSize());
|
2009-02-28 21:41:05 +00:00
|
|
|
set(sp, get(sp - 1));
|
|
|
|
++ sp;
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2007-10-16 17:21:26 +00:00
|
|
|
void duppedX1() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
|
|
|
assert(t, sp - 2 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 1, b2);
|
|
|
|
set(sp - 2, b1);
|
|
|
|
set(sp , b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
++ sp;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void duppedX2() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
|
|
|
assert(t, sp - 3 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b3 = get(sp - 3);
|
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 2, b3);
|
|
|
|
set(sp - 1, b2);
|
|
|
|
set(sp - 3, b1);
|
|
|
|
set(sp , b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
++ sp;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dupped2() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 2 <= frameSize());
|
|
|
|
assert(t, sp - 2 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp, b2);
|
|
|
|
set(sp + 1, b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
sp += 2;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dupped2X1() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 2 <= frameSize());
|
|
|
|
assert(t, sp - 3 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b3 = get(sp - 3);
|
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 1, b3);
|
|
|
|
set(sp - 3, b2);
|
|
|
|
set(sp , b2);
|
|
|
|
set(sp - 2, b1);
|
|
|
|
set(sp + 1, b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
sp += 2;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dupped2X2() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 2 <= frameSize());
|
|
|
|
assert(t, sp - 4 >= localSize());
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b4 = get(sp - 4);
|
|
|
|
uint8_t b3 = get(sp - 3);
|
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-10-10 22:39:40 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 2, b4);
|
|
|
|
set(sp - 1, b3);
|
|
|
|
set(sp - 4, b2);
|
|
|
|
set(sp , b2);
|
|
|
|
set(sp - 3, b1);
|
|
|
|
set(sp + 1, b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
sp += 2;
|
2007-10-13 21:48:40 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void swapped() {
|
2008-01-07 16:01:35 +00:00
|
|
|
assert(t, sp - 2 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t saved = get(sp - 1);
|
2007-10-10 22:39:40 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 1, get(sp - 2));
|
|
|
|
set(sp - 2, saved);
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
Promise* addressPromise(Promise* p) {
|
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
if (bc) {
|
|
|
|
bc->addresses = new (bc->zone->allocate(sizeof(DelayedPromise)))
|
|
|
|
DelayedPromise(t->m->system, bc->zone, p, bc->addresses);
|
|
|
|
return bc->addresses;
|
|
|
|
} else {
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler::Operand* addressOperand(Promise* p) {
|
2009-09-20 21:43:32 +00:00
|
|
|
return c->promiseConstant(addressPromise(p), Compiler::AddressType);
|
2008-12-02 16:45:20 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* machineIp(unsigned logicalIp) {
|
2009-09-20 21:43:32 +00:00
|
|
|
return c->promiseConstant(c->machineIp(logicalIp), Compiler::AddressType);
|
2007-12-16 00:24:15 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
void visitLogicalIp(unsigned ip) {
|
2008-04-20 00:43:12 +00:00
|
|
|
c->visitLogicalIp(ip);
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
context->eventLog.append(IpEvent);
|
2008-01-07 16:01:35 +00:00
|
|
|
context->eventLog.append2(ip);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void startLogicalIp(unsigned ip) {
|
2009-07-13 23:49:15 +00:00
|
|
|
if (subroutine) {
|
|
|
|
context->subroutineTable[ip] = subroutine;
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
c->startLogicalIp(ip);
|
2007-12-26 16:56:14 +00:00
|
|
|
|
2008-04-20 05:23:08 +00:00
|
|
|
context->eventLog.append(IpEvent);
|
|
|
|
context->eventLog.append2(ip);
|
2007-12-26 16:56:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
this->ip = ip;
|
2007-12-26 16:56:14 +00:00
|
|
|
}
|
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
void pushQuiet(unsigned footprint, Compiler::Operand* o) {
|
|
|
|
c->push(footprint, o);
|
2008-09-23 21:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void pushLongQuiet(Compiler::Operand* o) {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(2, o);
|
2008-07-05 20:21:13 +00:00
|
|
|
}
|
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* popQuiet(unsigned footprint) {
|
|
|
|
return c->pop(footprint);
|
2008-09-23 21:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Compiler::Operand* popLongQuiet() {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* r = popQuiet(2);
|
2008-07-05 20:21:13 +00:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void pushInt(Compiler::Operand* o) {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, o);
|
2007-12-09 22:45:43 +00:00
|
|
|
pushedInt();
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void pushAddress(Compiler::Operand* o) {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, o);
|
2007-12-26 23:59:55 +00:00
|
|
|
pushedInt();
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void pushObject(Compiler::Operand* o) {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, o);
|
2007-12-09 22:45:43 +00:00
|
|
|
pushedObject();
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
void pushObject() {
|
2008-07-05 20:21:13 +00:00
|
|
|
c->pushed();
|
2007-12-12 22:19:13 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
pushedObject();
|
2008-02-12 02:06:12 +00:00
|
|
|
}
|
2007-12-16 21:30:19 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
void pushLong(Compiler::Operand* o) {
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(o);
|
2008-02-11 17:21:41 +00:00
|
|
|
pushedLong();
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void pop(unsigned count) {
|
2007-12-12 22:19:13 +00:00
|
|
|
popped(count);
|
2009-05-15 02:08:01 +00:00
|
|
|
c->popped(count);
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* popInt() {
|
2007-12-09 22:45:43 +00:00
|
|
|
poppedInt();
|
2008-11-02 22:25:51 +00:00
|
|
|
return popQuiet(1);
|
2008-01-03 18:37:00 +00:00
|
|
|
}
|
2007-12-16 21:30:19 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* popLong() {
|
|
|
|
poppedLong();
|
2008-09-23 21:18:41 +00:00
|
|
|
return popLongQuiet();
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* popObject() {
|
2007-12-09 22:45:43 +00:00
|
|
|
poppedObject();
|
2008-11-02 22:25:51 +00:00
|
|
|
return popQuiet(1);
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void loadInt(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < localSize());
|
2009-05-03 20:57:11 +00:00
|
|
|
pushInt(loadLocal(context, 1, index));
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void loadLong(unsigned index) {
|
2008-02-11 17:21:41 +00:00
|
|
|
assert(t, index < static_cast<unsigned>(localSize() - 1));
|
2009-05-03 20:57:11 +00:00
|
|
|
pushLong(loadLocal(context, 2, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void loadObject(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < localSize());
|
2009-05-03 20:57:11 +00:00
|
|
|
pushObject(loadLocal(context, 1, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void storeInt(unsigned index) {
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(context, 1, popInt(), index);
|
2009-05-18 15:16:17 +00:00
|
|
|
storedInt(translateLocalIndex(context, 1, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void storeLong(unsigned index) {
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(context, 2, popLong(), index);
|
2009-05-18 15:16:17 +00:00
|
|
|
storedLong(translateLocalIndex(context, 2, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void storeObject(unsigned index) {
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(context, 1, popObject(), index);
|
2009-05-18 15:16:17 +00:00
|
|
|
storedObject(translateLocalIndex(context, 1, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
void storeObjectOrAddress(unsigned index) {
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(context, 1, popQuiet(1), index);
|
2007-12-26 23:59:55 +00:00
|
|
|
|
|
|
|
assert(t, sp >= 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp - 1 >= localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 1) == Object) {
|
2009-05-18 15:16:17 +00:00
|
|
|
storedObject(translateLocalIndex(context, 1, index));
|
2007-12-26 23:59:55 +00:00
|
|
|
} else {
|
2009-05-18 15:16:17 +00:00
|
|
|
storedInt(translateLocalIndex(context, 1, index));
|
2007-12-26 23:59:55 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
popped(1);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dup() {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, c->peek(1, 0));
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
dupped();
|
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dupX1() {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
duppedX1();
|
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dupX2() {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
|
|
|
if (get(sp - 2) == Long) {
|
2008-09-23 21:18:41 +00:00
|
|
|
Compiler::Operand* s1 = popLongQuiet();
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s1);
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
|
|
|
Compiler::Operand* s2 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s2);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
duppedX2();
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dup2() {
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 1) == Long) {
|
2008-11-07 00:39:38 +00:00
|
|
|
pushLongQuiet(c->peek(2, 0));
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
dupped2();
|
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dup2X1() {
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 1) == Long) {
|
2008-09-23 21:18:41 +00:00
|
|
|
Compiler::Operand* s0 = popLongQuiet();
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s1);
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
2008-02-12 02:06:12 +00:00
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
|
|
|
Compiler::Operand* s2 = popQuiet(1);
|
|
|
|
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s2);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
dupped2X1();
|
2007-10-18 00:41:49 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dup2X2() {
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 1) == Long) {
|
2008-09-23 21:18:41 +00:00
|
|
|
Compiler::Operand* s0 = popLongQuiet();
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 3) == Long) {
|
2008-09-23 21:18:41 +00:00
|
|
|
Compiler::Operand* s1 = popLongQuiet();
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
|
|
|
pushLongQuiet(s1);
|
|
|
|
pushLongQuiet(s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
|
|
|
Compiler::Operand* s2 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s2);
|
|
|
|
pushQuiet(1, s1);
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
|
|
|
Compiler::Operand* s2 = popQuiet(1);
|
|
|
|
Compiler::Operand* s3 = popQuiet(1);
|
|
|
|
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s3);
|
|
|
|
pushQuiet(1, s2);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
dupped2X2();
|
|
|
|
}
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void swap() {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s1);
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
swapped();
|
2007-10-02 00:08:17 +00:00
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
TraceElement* trace(object target, unsigned flags) {
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned mapSize = frameMapSizeInWords(t, context->method);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
|
|
|
TraceElement* e = context->traceLog = new
|
|
|
|
(context->zone.allocate(sizeof(TraceElement) + (mapSize * BytesPerWord)))
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
TraceElement(context, ip, target, flags, context->traceLog, mapSize);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
++ context->traceLogCount;
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
context->eventLog.append(TraceEvent);
|
|
|
|
context->eventLog.appendAddress(e);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
|
|
|
return e;
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
unsigned startSubroutine(unsigned ip, Promise* returnAddress) {
|
|
|
|
pushAddress(addressOperand(returnAddress));
|
|
|
|
|
|
|
|
Subroutine* subroutine = 0;
|
2009-07-13 23:49:15 +00:00
|
|
|
for (Subroutine* s = context->subroutines; s; s = s->listNext) {
|
2009-06-26 21:36:04 +00:00
|
|
|
if (s->ip == ip) {
|
|
|
|
subroutine = s;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (subroutine == 0) {
|
|
|
|
context->subroutines = subroutine = new
|
|
|
|
(context->zone.allocate(sizeof(Subroutine)))
|
2009-07-08 14:18:40 +00:00
|
|
|
Subroutine(ip, context->eventLog.length() + 1 + BytesPerWord + 2,
|
2009-07-13 23:49:15 +00:00
|
|
|
context->subroutines, this->subroutine);
|
|
|
|
|
|
|
|
if (context->subroutineTable == 0) {
|
|
|
|
unsigned size = codeLength(t, methodCode(t, context->method))
|
|
|
|
* sizeof(Subroutine*);
|
|
|
|
|
|
|
|
context->subroutineTable = static_cast<Subroutine**>
|
|
|
|
(context->zone.allocate(size));
|
|
|
|
|
|
|
|
memset(context->subroutineTable, 0, size);
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
subroutine->handle = c->startSubroutine();
|
|
|
|
this->subroutine = subroutine;
|
|
|
|
|
|
|
|
SubroutineCall* call = new
|
|
|
|
(context->zone.allocate(sizeof(SubroutineCall)))
|
|
|
|
SubroutineCall(subroutine, returnAddress);
|
|
|
|
|
|
|
|
context->eventLog.append(PushSubroutineEvent);
|
|
|
|
context->eventLog.appendAddress(call);
|
|
|
|
|
|
|
|
unsigned nextIndexIndex = context->eventLog.length();
|
|
|
|
context->eventLog.append2(0);
|
|
|
|
|
|
|
|
c->saveLocals();
|
|
|
|
|
|
|
|
return nextIndexIndex;
|
|
|
|
}
|
|
|
|
|
|
|
|
void returnFromSubroutine(unsigned returnAddressLocal) {
|
2010-01-05 00:17:16 +00:00
|
|
|
c->returnFromSubroutine
|
|
|
|
(subroutine->handle, loadLocal(context, 1, returnAddressLocal));
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
subroutine->stackIndex = localOffsetFromStack
|
|
|
|
(t, translateLocalIndex(context, 1, returnAddressLocal),
|
|
|
|
context->method);
|
|
|
|
}
|
|
|
|
|
|
|
|
void endSubroutine(unsigned nextIndexIndex) {
|
2009-07-20 14:26:01 +00:00
|
|
|
c->linkSubroutine(subroutine->handle);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
poppedInt();
|
|
|
|
|
|
|
|
context->eventLog.append(PopSubroutineEvent);
|
2009-07-08 14:18:40 +00:00
|
|
|
|
|
|
|
context->eventLog.set2(nextIndexIndex, context->eventLog.length());
|
2009-07-13 23:49:15 +00:00
|
|
|
|
|
|
|
subroutine = subroutine->stackNext;
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* context;
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
|
|
|
Compiler* c;
|
2009-06-26 21:36:04 +00:00
|
|
|
Subroutine* subroutine;
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t* stackMap;
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned ip;
|
|
|
|
unsigned sp;
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned level;
|
2007-12-30 22:24:48 +00:00
|
|
|
};
|
|
|
|
|
2008-01-20 18:55:08 +00:00
|
|
|
unsigned
|
|
|
|
savedTargetIndex(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return codeMaxLocals(t, methodCode(t, method));
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object
|
|
|
|
findCallNode(MyThread* t, void* address);
|
|
|
|
|
|
|
|
void
|
|
|
|
insertCallNode(MyThread* t, object node);
|
|
|
|
|
2008-04-11 19:03:40 +00:00
|
|
|
void*
|
|
|
|
findExceptionHandler(Thread* t, object method, void* ip)
|
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
if (t->exception) {
|
|
|
|
object table = codeExceptionHandlerTable(t, methodCode(t, method));
|
|
|
|
if (table) {
|
|
|
|
object index = arrayBody(t, table, 0);
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
uint8_t* compiled = reinterpret_cast<uint8_t*>
|
|
|
|
(methodCompiled(t, method));
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
for (unsigned i = 0; i < arrayLength(t, table) - 1; ++i) {
|
|
|
|
unsigned start = intArrayBody(t, index, i * 3);
|
|
|
|
unsigned end = intArrayBody(t, index, (i * 3) + 1);
|
|
|
|
unsigned key = difference(ip, compiled) - 1;
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
if (key >= start and key < end) {
|
|
|
|
object catchType = arrayBody(t, table, i + 1);
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
if (catchType == 0 or instanceOf(t, catchType, t->exception)) {
|
|
|
|
return compiled + intArrayBody(t, index, (i * 3) + 2);
|
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
void
|
2009-05-05 01:04:17 +00:00
|
|
|
releaseLock(MyThread* t, object method, void* stack)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
|
|
|
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
|
|
|
|
object lock;
|
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
lock = methodClass(t, method);
|
|
|
|
} else {
|
|
|
|
lock = *localObject
|
|
|
|
(t, stackForFrame(t, stack, method), method,
|
|
|
|
savedTargetIndex(t, method));
|
|
|
|
}
|
|
|
|
|
|
|
|
release(t, lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-01 17:08:47 +00:00
|
|
|
void
|
|
|
|
findUnwindTarget(MyThread* t, void** targetIp, void** targetBase,
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
void** targetStack, object* targetContinuation)
|
2007-09-29 21:08:29 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
void* ip;
|
|
|
|
void* base;
|
|
|
|
void* stack;
|
|
|
|
object continuation;
|
|
|
|
|
|
|
|
if (t->traceContext) {
|
|
|
|
ip = t->traceContext->ip;
|
|
|
|
base = t->traceContext->base;
|
|
|
|
stack = t->traceContext->stack;
|
|
|
|
continuation = t->traceContext->continuation;
|
|
|
|
} else {
|
|
|
|
ip = 0;
|
|
|
|
base = t->base;
|
|
|
|
stack = t->stack;
|
|
|
|
continuation = t->continuation;
|
|
|
|
}
|
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
if (ip == 0) {
|
2008-08-18 15:23:01 +00:00
|
|
|
ip = t->arch->frameIp(stack);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
|
|
|
|
2009-05-17 00:39:08 +00:00
|
|
|
object target = t->trace->targetMethod;
|
|
|
|
|
2008-01-01 17:08:47 +00:00
|
|
|
*targetIp = 0;
|
|
|
|
while (*targetIp == 0) {
|
2008-04-07 23:47:41 +00:00
|
|
|
object method = methodForIp(t, ip);
|
|
|
|
if (method) {
|
2009-05-24 01:49:14 +00:00
|
|
|
void* handler = findExceptionHandler(t, method, ip);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2009-04-26 22:06:15 +00:00
|
|
|
if (handler) {
|
|
|
|
*targetIp = handler;
|
|
|
|
*targetBase = base;
|
2009-04-26 21:55:35 +00:00
|
|
|
|
2009-04-26 22:06:15 +00:00
|
|
|
t->arch->nextFrame(&stack, &base);
|
2009-04-26 21:55:35 +00:00
|
|
|
|
2009-04-26 22:06:15 +00:00
|
|
|
void** sp = static_cast<void**>(stackForFrame(t, stack, method))
|
2009-02-27 01:54:25 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-04-26 22:06:15 +00:00
|
|
|
*targetStack = sp;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
*targetContinuation = continuation;
|
2009-04-26 22:06:15 +00:00
|
|
|
|
2009-05-17 23:43:48 +00:00
|
|
|
sp[localOffset(t, localSize(t, method), method)] = t->exception;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
t->exception = 0;
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2009-04-26 22:06:15 +00:00
|
|
|
t->arch->nextFrame(&stack, &base);
|
|
|
|
ip = t->arch->frameIp(stack);
|
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
if (t->exception) {
|
|
|
|
releaseLock(t, method, stack);
|
|
|
|
}
|
2009-05-17 00:39:08 +00:00
|
|
|
|
|
|
|
target = method;
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*targetIp = ip;
|
|
|
|
*targetBase = base;
|
|
|
|
*targetStack = static_cast<void**>(stack)
|
|
|
|
+ t->arch->frameReturnAddressSize();
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
*targetContinuation = continuation;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
while (Continuations and *targetContinuation) {
|
|
|
|
object c = *targetContinuation;
|
2009-05-25 04:27:50 +00:00
|
|
|
|
|
|
|
object method = continuationMethod(t, c);
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
void* handler = findExceptionHandler
|
2009-05-25 04:27:50 +00:00
|
|
|
(t, method, continuationAddress(t, c));
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
if (handler) {
|
|
|
|
t->exceptionHandler = handler;
|
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
t->exceptionStackAdjustment
|
|
|
|
= (stackOffsetFromFrame(t, method)
|
|
|
|
- ((continuationFramePointerOffset(t, c) / BytesPerWord)
|
|
|
|
- t->arch->framePointerOffset()
|
|
|
|
+ t->arch->frameReturnAddressSize())) * BytesPerWord;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-17 23:43:48 +00:00
|
|
|
t->exceptionOffset
|
|
|
|
= localOffset(t, localSize(t, method), method) * BytesPerWord;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
break;
|
2009-05-23 22:15:06 +00:00
|
|
|
} else if (t->exception) {
|
2009-05-17 00:39:08 +00:00
|
|
|
releaseLock(t, method,
|
2009-05-25 04:27:50 +00:00
|
|
|
reinterpret_cast<uint8_t*>(c)
|
2009-05-05 01:04:17 +00:00
|
|
|
+ ContinuationBody
|
2009-05-25 04:27:50 +00:00
|
|
|
+ continuationReturnAddressOffset(t, c)
|
2009-05-16 08:03:03 +00:00
|
|
|
- t->arch->returnAddressOffset());
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
*targetContinuation = continuationNext(t, c);
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
2009-05-25 04:27:50 +00:00
|
|
|
makeCurrentContinuation(MyThread* t, void** targetIp, void** targetBase,
|
2009-05-29 00:56:05 +00:00
|
|
|
void** targetStack)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
void* ip = t->arch->frameIp(t->stack);
|
2009-05-03 20:57:11 +00:00
|
|
|
void* base = t->base;
|
|
|
|
void* stack = t->stack;
|
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
object context = t->continuation
|
|
|
|
? continuationContext(t, t->continuation)
|
|
|
|
: makeContinuationContext(t, 0, 0, 0, 0, t->trace->originalMethod);
|
|
|
|
PROTECT(t, context);
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
object target = t->trace->targetMethod;
|
|
|
|
PROTECT(t, target);
|
|
|
|
|
|
|
|
object first = 0;
|
|
|
|
PROTECT(t, first);
|
|
|
|
|
|
|
|
object last = 0;
|
|
|
|
PROTECT(t, last);
|
|
|
|
|
|
|
|
*targetIp = 0;
|
|
|
|
while (*targetIp == 0) {
|
|
|
|
object method = methodForIp(t, ip);
|
|
|
|
if (method) {
|
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-05-16 08:03:03 +00:00
|
|
|
void** top = static_cast<void**>(stack)
|
2009-05-29 00:56:05 +00:00
|
|
|
+ t->arch->frameReturnAddressSize()
|
|
|
|
+ t->arch->frameFooterSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned argumentFootprint
|
|
|
|
= t->arch->argumentFootprint(methodParameterFootprint(t, target));
|
|
|
|
unsigned alignment = t->arch->stackAlignmentInWords();
|
2009-05-26 05:27:10 +00:00
|
|
|
if (TailCalls and argumentFootprint > alignment) {
|
2009-05-03 20:57:11 +00:00
|
|
|
top += argumentFootprint - alignment;
|
|
|
|
}
|
|
|
|
|
|
|
|
t->arch->nextFrame(&stack, &base);
|
|
|
|
|
2009-05-16 08:03:03 +00:00
|
|
|
void** bottom = static_cast<void**>(stack)
|
|
|
|
+ t->arch->frameReturnAddressSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned frameSize = bottom - top;
|
|
|
|
unsigned totalSize = frameSize
|
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->argumentFootprint(methodParameterFootprint(t, method));
|
|
|
|
|
|
|
|
object c = makeContinuation
|
2009-05-23 22:15:06 +00:00
|
|
|
(t, 0, context, method, ip,
|
2009-05-16 08:03:03 +00:00
|
|
|
((frameSize
|
2009-05-29 00:56:05 +00:00
|
|
|
+ t->arch->frameFooterSize()
|
2009-05-16 08:03:03 +00:00
|
|
|
+ t->arch->returnAddressOffset()
|
|
|
|
- t->arch->frameReturnAddressSize()) * BytesPerWord),
|
|
|
|
((frameSize
|
2009-05-29 00:56:05 +00:00
|
|
|
+ t->arch->frameFooterSize()
|
2009-05-16 08:03:03 +00:00
|
|
|
+ t->arch->framePointerOffset()
|
|
|
|
- t->arch->frameReturnAddressSize()) * BytesPerWord),
|
2009-05-03 20:57:11 +00:00
|
|
|
totalSize);
|
|
|
|
|
|
|
|
memcpy(&continuationBody(t, c, 0), top, totalSize * BytesPerWord);
|
|
|
|
|
|
|
|
if (last) {
|
|
|
|
set(t, last, ContinuationNext, c);
|
|
|
|
} else {
|
|
|
|
first = c;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
last = c;
|
|
|
|
|
|
|
|
ip = t->arch->frameIp(stack);
|
|
|
|
|
|
|
|
target = method;
|
2007-10-12 22:06:33 +00:00
|
|
|
} else {
|
2008-01-01 17:08:47 +00:00
|
|
|
*targetIp = ip;
|
|
|
|
*targetBase = base;
|
2008-11-09 23:56:37 +00:00
|
|
|
*targetStack = static_cast<void**>(stack)
|
2009-12-03 06:09:05 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
expect(t, last);
|
|
|
|
set(t, last, ContinuationNext, t->continuation);
|
|
|
|
|
|
|
|
return first;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
|
2008-01-01 17:08:47 +00:00
|
|
|
void NO_RETURN
|
|
|
|
unwind(MyThread* t)
|
|
|
|
{
|
|
|
|
void* ip;
|
|
|
|
void* base;
|
|
|
|
void* stack;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
object continuation;
|
|
|
|
findUnwindTarget(t, &ip, &base, &stack, &continuation);
|
|
|
|
|
|
|
|
transition(t, ip, stack, base, continuation, t->trace);
|
|
|
|
|
2009-05-05 01:04:17 +00:00
|
|
|
vmJump(ip, base, stack, t, 0, 0);
|
2008-01-01 17:08:47 +00:00
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t
|
|
|
|
defaultThunk(MyThread* t);
|
|
|
|
|
|
|
|
uintptr_t
|
|
|
|
nativeThunk(MyThread* t);
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
uintptr_t
|
|
|
|
bootNativeThunk(MyThread* t);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t
|
|
|
|
aioobThunk(MyThread* t);
|
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
uintptr_t
|
|
|
|
virtualThunk(MyThread* t, unsigned index);
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
bool
|
|
|
|
unresolved(MyThread* t, uintptr_t methodAddress);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t
|
|
|
|
methodAddress(Thread* t, object method)
|
|
|
|
{
|
|
|
|
if (methodFlags(t, method) & ACC_NATIVE) {
|
|
|
|
return nativeThunk(static_cast<MyThread*>(t));
|
|
|
|
} else {
|
|
|
|
return methodCompiled(t, method);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
void
|
|
|
|
tryInitClass(MyThread* t, object class_)
|
|
|
|
{
|
|
|
|
initClass(t, class_);
|
|
|
|
if (UNLIKELY(t->exception)) unwind(t);
|
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
FixedAllocator*
|
|
|
|
codeAllocator(MyThread* t);
|
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
void
|
|
|
|
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
|
|
|
|
object method);
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-12-30 22:24:48 +00:00
|
|
|
findInterfaceMethodFromInstance(MyThread* t, object method, object instance)
|
2007-10-16 17:21:26 +00:00
|
|
|
{
|
2007-12-30 22:24:48 +00:00
|
|
|
if (instance) {
|
2009-04-05 21:42:10 +00:00
|
|
|
object target = findInterfaceMethod(t, method, objectClass(t, instance));
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
if (unresolved(t, methodAddress(t, target))) {
|
2009-04-05 21:42:10 +00:00
|
|
|
PROTECT(t, target);
|
|
|
|
|
|
|
|
compile(t, codeAllocator(t), 0, target);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (UNLIKELY(t->exception)) {
|
|
|
|
unwind(t);
|
|
|
|
} else {
|
2009-05-16 08:03:03 +00:00
|
|
|
if (methodFlags(t, target) & ACC_NATIVE) {
|
|
|
|
t->trace->nativeMethod = target;
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
return methodAddress(t, target);
|
|
|
|
}
|
2007-12-30 22:24:48 +00:00
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NullPointerExceptionType);
|
2007-12-30 22:24:48 +00:00
|
|
|
unwind(t);
|
|
|
|
}
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
compareDoublesG(uint64_t bi, uint64_t ai)
|
|
|
|
{
|
|
|
|
double a = bitsToDouble(ai);
|
|
|
|
double b = bitsToDouble(bi);
|
|
|
|
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else if (a == b) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
compareDoublesL(uint64_t bi, uint64_t ai)
|
|
|
|
{
|
|
|
|
double a = bitsToDouble(ai);
|
|
|
|
double b = bitsToDouble(bi);
|
|
|
|
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else if (a == b) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
compareFloatsG(uint32_t bi, uint32_t ai)
|
|
|
|
{
|
|
|
|
float a = bitsToFloat(ai);
|
|
|
|
float b = bitsToFloat(bi);
|
|
|
|
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else if (a == b) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
compareFloatsL(uint32_t bi, uint32_t ai)
|
|
|
|
{
|
|
|
|
float a = bitsToFloat(ai);
|
|
|
|
float b = bitsToFloat(bi);
|
|
|
|
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else if (a == b) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
int64_t
|
|
|
|
compareLongs(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
addDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(bitsToDouble(a) + bitsToDouble(b));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
subtractDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(bitsToDouble(a) - bitsToDouble(b));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
multiplyDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(bitsToDouble(a) * bitsToDouble(b));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
divideDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(bitsToDouble(a) / bitsToDouble(b));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
moduloDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(fmod(bitsToDouble(a), bitsToDouble(b)));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
negateDouble(uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(- bitsToDouble(a));
|
|
|
|
}
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
uint64_t
|
|
|
|
squareRootDouble(uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(sqrt(bitsToDouble(a)));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
doubleToFloat(int64_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(static_cast<float>(bitsToDouble(a)));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
doubleToInt(int64_t a)
|
|
|
|
{
|
|
|
|
return static_cast<int32_t>(bitsToDouble(a));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
int64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
doubleToLong(int64_t a)
|
|
|
|
{
|
|
|
|
return static_cast<int64_t>(bitsToDouble(a));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
addFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(bitsToFloat(a) + bitsToFloat(b));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
subtractFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(bitsToFloat(a) - bitsToFloat(b));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
multiplyFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(bitsToFloat(a) * bitsToFloat(b));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
divideFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(bitsToFloat(a) / bitsToFloat(b));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
moduloFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(fmod(bitsToFloat(a), bitsToFloat(b)));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
negateFloat(uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(- bitsToFloat(a));
|
|
|
|
}
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
uint64_t
|
|
|
|
absoluteFloat(uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(fabsf(bitsToFloat(a)));
|
|
|
|
}
|
|
|
|
|
2009-12-01 16:21:33 +00:00
|
|
|
int64_t
|
|
|
|
absoluteLong(int64_t a)
|
|
|
|
{
|
|
|
|
return a > 0 ? a : -a;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
|
|
|
absoluteInt(int32_t a)
|
|
|
|
{
|
|
|
|
return a > 0 ? a : -a;
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
divideLong(int64_t b, int64_t a)
|
2007-10-08 21:41:41 +00:00
|
|
|
{
|
|
|
|
return a / b;
|
|
|
|
}
|
|
|
|
|
2009-10-29 16:12:30 +00:00
|
|
|
int64_t
|
|
|
|
divideInt(int32_t b, int32_t a)
|
|
|
|
{
|
|
|
|
return a / b;
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
moduloLong(int64_t b, int64_t a)
|
2007-10-08 21:41:41 +00:00
|
|
|
{
|
|
|
|
return a % b;
|
|
|
|
}
|
|
|
|
|
2009-10-29 20:14:44 +00:00
|
|
|
int64_t
|
|
|
|
moduloInt(int32_t b, int32_t a) {
|
|
|
|
return a % b;
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
floatToDouble(int32_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(static_cast<double>(bitsToFloat(a)));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
floatToInt(int32_t a)
|
|
|
|
{
|
|
|
|
return static_cast<int32_t>(bitsToFloat(a));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
int64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
floatToLong(int32_t a)
|
|
|
|
{
|
|
|
|
return static_cast<int64_t>(bitsToFloat(a));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
intToDouble(int32_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(static_cast<double>(a));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
intToFloat(int32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(static_cast<float>(a));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2008-03-21 00:37:58 +00:00
|
|
|
longToDouble(int64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(static_cast<double>(a));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2008-03-21 00:37:58 +00:00
|
|
|
longToFloat(int64_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(static_cast<float>(a));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2010-09-14 16:49:41 +00:00
|
|
|
makeBlankObjectArray(MyThread* t, object class_, int32_t length)
|
2007-09-30 02:48:27 +00:00
|
|
|
{
|
2008-04-26 20:56:03 +00:00
|
|
|
if (length >= 0) {
|
2010-09-14 16:49:41 +00:00
|
|
|
return reinterpret_cast<uint64_t>(makeObjectArray(t, class_, length));
|
2008-04-26 20:56:03 +00:00
|
|
|
} else {
|
|
|
|
object message = makeString(t, "%d", length);
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NegativeArraySizeExceptionType, message);
|
2008-04-26 20:56:03 +00:00
|
|
|
unwind(t);
|
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2008-11-30 01:39:42 +00:00
|
|
|
makeBlankArray(MyThread* t, unsigned type, int32_t length)
|
2007-09-30 02:48:27 +00:00
|
|
|
{
|
2008-04-26 20:56:03 +00:00
|
|
|
if (length >= 0) {
|
2009-03-04 03:05:48 +00:00
|
|
|
object (*constructor)(Thread*, uintptr_t);
|
2008-11-30 01:39:42 +00:00
|
|
|
switch (type) {
|
|
|
|
case T_BOOLEAN:
|
|
|
|
constructor = makeBooleanArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_CHAR:
|
|
|
|
constructor = makeCharArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_FLOAT:
|
|
|
|
constructor = makeFloatArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_DOUBLE:
|
|
|
|
constructor = makeDoubleArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_BYTE:
|
|
|
|
constructor = makeByteArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_SHORT:
|
|
|
|
constructor = makeShortArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_INT:
|
|
|
|
constructor = makeIntArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_LONG:
|
|
|
|
constructor = makeLongArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default: abort(t);
|
|
|
|
}
|
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(constructor(t, length));
|
2008-04-26 20:56:03 +00:00
|
|
|
} else {
|
|
|
|
object message = makeString(t, "%d", length);
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NegativeArraySizeExceptionType, message);
|
2008-04-26 20:56:03 +00:00
|
|
|
unwind(t);
|
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
lookUpAddress(int32_t key, uintptr_t* start, int32_t count,
|
2007-12-17 20:55:31 +00:00
|
|
|
uintptr_t default_)
|
2007-10-01 15:19:15 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t bottom = 0;
|
|
|
|
int32_t top = count;
|
|
|
|
for (int32_t span = top - bottom; span; span = top - bottom) {
|
|
|
|
int32_t middle = bottom + (span / 2);
|
|
|
|
uintptr_t* p = start + (middle * 2);
|
|
|
|
int32_t k = *p;
|
|
|
|
|
|
|
|
if (key < k) {
|
|
|
|
top = middle;
|
|
|
|
} else if (key > k) {
|
|
|
|
bottom = middle + 1;
|
|
|
|
} else {
|
|
|
|
return p[1];
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-17 20:55:31 +00:00
|
|
|
return default_;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2007-12-30 22:24:48 +00:00
|
|
|
setMaybeNull(MyThread* t, object o, unsigned offset, object value)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-30 22:24:48 +00:00
|
|
|
set(t, o, offset, value);
|
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NullPointerExceptionType);
|
2007-12-30 22:24:48 +00:00
|
|
|
unwind(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2007-12-30 22:24:48 +00:00
|
|
|
acquireMonitorForObject(MyThread* t, object o)
|
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-30 22:24:48 +00:00
|
|
|
acquire(t, o);
|
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NullPointerExceptionType);
|
2007-12-30 22:24:48 +00:00
|
|
|
unwind(t);
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2007-12-30 22:24:48 +00:00
|
|
|
releaseMonitorForObject(MyThread* t, object o)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-30 22:24:48 +00:00
|
|
|
release(t, o);
|
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NullPointerExceptionType);
|
2007-12-30 22:24:48 +00:00
|
|
|
unwind(t);
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
object
|
2008-11-11 00:07:44 +00:00
|
|
|
makeMultidimensionalArray2(MyThread* t, object class_, uintptr_t* countStack,
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t dimensions)
|
|
|
|
{
|
|
|
|
PROTECT(t, class_);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(int32_t, counts, dimensions);
|
2007-12-09 22:45:43 +00:00
|
|
|
for (int i = dimensions - 1; i >= 0; --i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(counts)[i] = countStack[dimensions - i - 1];
|
|
|
|
if (UNLIKELY(RUNTIME_ARRAY_BODY(counts)[i] < 0)) {
|
|
|
|
object message = makeString(t, "%d", RUNTIME_ARRAY_BODY(counts)[i]);
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NegativeArraySizeExceptionType, message);
|
2007-12-09 22:45:43 +00:00
|
|
|
return 0;
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
object array = makeArray(t, RUNTIME_ARRAY_BODY(counts)[0]);
|
2007-12-09 22:45:43 +00:00
|
|
|
setObjectClass(t, array, class_);
|
|
|
|
PROTECT(t, array);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
populateMultiArray(t, array, RUNTIME_ARRAY_BODY(counts), 0, dimensions);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
return array;
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2008-05-18 15:45:11 +00:00
|
|
|
makeMultidimensionalArray(MyThread* t, object class_, int32_t dimensions,
|
2008-11-11 00:07:44 +00:00
|
|
|
int32_t offset)
|
2007-10-01 15:19:15 +00:00
|
|
|
{
|
2008-11-11 00:07:44 +00:00
|
|
|
object r = makeMultidimensionalArray2
|
|
|
|
(t, class_, static_cast<uintptr_t*>(t->stack) + offset, dimensions);
|
|
|
|
|
2007-10-01 15:19:15 +00:00
|
|
|
if (UNLIKELY(t->exception)) {
|
|
|
|
unwind(t);
|
|
|
|
} else {
|
2009-02-28 21:20:43 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(r);
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
unsigned
|
|
|
|
traceSize(Thread* t)
|
2007-09-27 00:01:38 +00:00
|
|
|
{
|
2008-05-31 22:14:27 +00:00
|
|
|
class Counter: public Processor::StackVisitor {
|
|
|
|
public:
|
|
|
|
Counter(): count(0) { }
|
|
|
|
|
|
|
|
virtual bool visit(Processor::StackWalker*) {
|
|
|
|
++ count;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned count;
|
|
|
|
} counter;
|
|
|
|
|
|
|
|
t->m->processor->walkStack(t, &counter);
|
|
|
|
|
|
|
|
return FixedSizeOfArray + (counter.count * ArrayElementSizeOfArray)
|
|
|
|
+ (counter.count * FixedSizeOfTraceElement);
|
2008-01-03 18:37:00 +00:00
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void NO_RETURN
|
2008-05-31 22:14:27 +00:00
|
|
|
throwArrayIndexOutOfBounds(MyThread* t)
|
2008-01-03 18:37:00 +00:00
|
|
|
{
|
2010-06-19 22:40:21 +00:00
|
|
|
if (ensure(t, FixedSizeOfArrayIndexOutOfBoundsException + traceSize(t))) {
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicOr(&(t->flags), Thread::TracingFlag);
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, Machine::ArrayIndexOutOfBoundsExceptionType);
|
|
|
|
atomicAnd(&(t->flags), ~Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
} else {
|
|
|
|
// not enough memory available for a new exception and stack trace
|
|
|
|
// -- use a preallocated instance instead
|
2010-09-14 16:49:41 +00:00
|
|
|
t->exception = root(t, Machine::ArrayIndexOutOfBoundsException);
|
2010-06-19 22:40:21 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unwind(t);
|
2007-09-27 00:01:38 +00:00
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void NO_RETURN
|
2007-12-09 22:45:43 +00:00
|
|
|
throw_(MyThread* t, object o)
|
2007-10-09 17:15:40 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-09 22:45:43 +00:00
|
|
|
t->exception = o;
|
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NullPointerExceptionType);
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
2009-08-10 13:56:16 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
// printTrace(t, t->exception);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unwind(t);
|
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2007-12-23 00:00:35 +00:00
|
|
|
checkCast(MyThread* t, object class_, object o)
|
|
|
|
{
|
|
|
|
if (UNLIKELY(o and not isAssignableFrom(t, class_, objectClass(t, o)))) {
|
2008-01-15 23:33:20 +00:00
|
|
|
object message = makeString
|
|
|
|
(t, "%s as %s",
|
|
|
|
&byteArrayBody(t, className(t, objectClass(t, o)), 0),
|
|
|
|
&byteArrayBody(t, className(t, class_), 0));
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::ClassCastExceptionType, message);
|
2008-01-03 18:37:00 +00:00
|
|
|
unwind(t);
|
2007-12-23 00:00:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
|
|
|
instanceOf64(Thread* t, object class_, object o)
|
|
|
|
{
|
|
|
|
return instanceOf(t, class_, o);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
2009-07-22 00:57:55 +00:00
|
|
|
makeNewGeneral64(Thread* t, object class_)
|
2009-02-28 21:20:43 +00:00
|
|
|
{
|
2009-07-22 00:57:55 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeNewGeneral(t, class_));
|
2009-02-28 21:20:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
makeNew64(Thread* t, object class_)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(makeNew(t, class_));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2008-04-09 19:08:13 +00:00
|
|
|
gcIfNecessary(MyThread* t)
|
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
2008-04-09 19:08:13 +00:00
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
unsigned
|
|
|
|
resultSize(MyThread* t, unsigned code)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
2008-02-11 17:21:41 +00:00
|
|
|
case IntField:
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
case ObjectField:
|
|
|
|
return BytesPerWord;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case LongField:
|
2008-02-11 17:21:41 +00:00
|
|
|
case DoubleField:
|
|
|
|
return 8;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case VoidField:
|
2008-02-11 17:21:41 +00:00
|
|
|
return 0;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-17 01:21:35 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2008-03-11 16:57:35 +00:00
|
|
|
void
|
|
|
|
pushReturnValue(MyThread* t, Frame* frame, unsigned code,
|
|
|
|
Compiler::Operand* result)
|
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
return frame->pushInt(result);
|
|
|
|
|
|
|
|
case ObjectField:
|
|
|
|
return frame->pushObject(result);
|
|
|
|
|
|
|
|
case LongField:
|
|
|
|
case DoubleField:
|
|
|
|
return frame->pushLong(result);
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-17 01:21:35 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::OperandType
|
|
|
|
operandTypeForFieldCode(Thread* t, unsigned code)
|
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case IntField:
|
|
|
|
case LongField:
|
|
|
|
return Compiler::IntegerType;
|
|
|
|
|
|
|
|
case ObjectField:
|
|
|
|
return Compiler::ObjectType;
|
|
|
|
|
|
|
|
case FloatField:
|
|
|
|
case DoubleField:
|
|
|
|
return Compiler::FloatType;
|
|
|
|
|
|
|
|
case VoidField:
|
|
|
|
return Compiler::VoidType;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
bool
|
|
|
|
useLongJump(MyThread* t, uintptr_t target)
|
|
|
|
{
|
|
|
|
uintptr_t reach = t->arch->maximumImmediateJump();
|
|
|
|
FixedAllocator* a = codeAllocator(t);
|
|
|
|
uintptr_t start = reinterpret_cast<uintptr_t>(a->base);
|
|
|
|
uintptr_t end = reinterpret_cast<uintptr_t>(a->base) + a->capacity;
|
|
|
|
assert(t, end - start < reach);
|
|
|
|
|
|
|
|
return (target > end && (target - start) > reach)
|
|
|
|
or (target < start && (end - target) > reach);
|
|
|
|
}
|
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
Compiler::Operand*
|
|
|
|
compileDirectInvoke(MyThread* t, Frame* frame, object target, bool tailCall,
|
|
|
|
bool useThunk, unsigned rSize, Promise* addressPromise)
|
|
|
|
{
|
2009-04-07 00:34:12 +00:00
|
|
|
Compiler* c = frame->c;
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
unsigned flags = (TailCalls and tailCall ? Compiler::TailJump : 0);
|
|
|
|
unsigned traceFlags;
|
|
|
|
|
|
|
|
if (addressPromise == 0 and useLongJump(t, methodAddress(t, target))) {
|
|
|
|
flags |= Compiler::LongJumpOrCall;
|
|
|
|
traceFlags = TraceElement::LongCall;
|
|
|
|
} else {
|
|
|
|
traceFlags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (useThunk
|
|
|
|
or (TailCalls and tailCall and (methodFlags(t, target) & ACC_NATIVE)))
|
|
|
|
{
|
|
|
|
flags |= Compiler::Aligned;
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
if (TailCalls and tailCall) {
|
2009-10-18 00:18:03 +00:00
|
|
|
traceFlags |= TraceElement::TailCall;
|
|
|
|
|
|
|
|
TraceElement* trace = frame->trace(target, traceFlags);
|
2010-06-26 03:13:59 +00:00
|
|
|
|
|
|
|
Promise* returnAddressPromise = new
|
|
|
|
(frame->context->zone.allocate(sizeof(TraceElementPromise)))
|
|
|
|
TraceElementPromise(t->m->system, trace);
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2009-04-19 22:36:11 +00:00
|
|
|
Compiler::Operand* result = c->stackCall
|
2010-06-26 03:13:59 +00:00
|
|
|
(c->promiseConstant(returnAddressPromise, Compiler::AddressType),
|
2009-10-18 00:18:03 +00:00
|
|
|
flags,
|
2009-04-22 01:39:25 +00:00
|
|
|
trace,
|
2009-04-19 22:36:11 +00:00
|
|
|
rSize,
|
2009-09-20 21:43:32 +00:00
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
2009-04-19 22:36:11 +00:00
|
|
|
methodParameterFootprint(t, target));
|
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
2010-06-26 03:13:59 +00:00
|
|
|
(BytesPerWord, frame->addressOperand(returnAddressPromise),
|
|
|
|
BytesPerWord, c->memory
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->register_(t->arch->thread()), Compiler::AddressType,
|
|
|
|
difference(&(t->tailAddress), t)));
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
c->exit
|
|
|
|
(c->constant
|
|
|
|
((methodFlags(t, target) & ACC_NATIVE)
|
|
|
|
? nativeThunk(t) : defaultThunk(t),
|
|
|
|
Compiler::AddressType));
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2009-04-19 22:36:11 +00:00
|
|
|
return result;
|
|
|
|
} else {
|
2009-03-31 20:15:08 +00:00
|
|
|
return c->stackCall
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(defaultThunk(t), Compiler::AddressType),
|
2009-10-18 00:18:03 +00:00
|
|
|
flags,
|
|
|
|
frame->trace(target, traceFlags),
|
2009-03-31 20:15:08 +00:00
|
|
|
rSize,
|
2009-09-20 21:43:32 +00:00
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
2009-03-31 20:15:08 +00:00
|
|
|
methodParameterFootprint(t, target));
|
|
|
|
}
|
2009-04-19 22:36:11 +00:00
|
|
|
} else {
|
|
|
|
Compiler::Operand* address =
|
|
|
|
(addressPromise
|
2009-09-20 21:43:32 +00:00
|
|
|
? c->promiseConstant(addressPromise, Compiler::AddressType)
|
|
|
|
: c->constant(methodAddress(t, target), Compiler::AddressType));
|
2009-04-19 22:36:11 +00:00
|
|
|
|
|
|
|
return c->stackCall
|
|
|
|
(address,
|
|
|
|
flags,
|
|
|
|
tailCall ? 0 : frame->trace
|
|
|
|
((methodFlags(t, target) & ACC_NATIVE) ? target : 0, 0),
|
|
|
|
rSize,
|
2009-09-20 21:43:32 +00:00
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
2009-04-19 22:36:11 +00:00
|
|
|
methodParameterFootprint(t, target));
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-22 01:39:25 +00:00
|
|
|
bool
|
2009-03-31 20:15:08 +00:00
|
|
|
compileDirectInvoke(MyThread* t, Frame* frame, object target, bool tailCall)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-02-12 02:06:12 +00:00
|
|
|
unsigned rSize = resultSize(t, methodReturnCode(t, target));
|
|
|
|
|
2008-03-10 13:28:21 +00:00
|
|
|
Compiler::Operand* result = 0;
|
|
|
|
|
2009-04-22 01:39:25 +00:00
|
|
|
if (emptyMethod(t, target)) {
|
|
|
|
tailCall = false;
|
|
|
|
} else {
|
2008-11-30 04:58:09 +00:00
|
|
|
BootContext* bc = frame->context->bootContext;
|
|
|
|
if (bc) {
|
2010-06-26 03:13:59 +00:00
|
|
|
if ((methodClass(t, target) == methodClass(t, frame->context->method)
|
|
|
|
or (not classNeedsInit(t, methodClass(t, target))))
|
|
|
|
and (not (TailCalls and tailCall
|
|
|
|
and (methodFlags(t, target) & ACC_NATIVE))))
|
2008-11-30 04:58:09 +00:00
|
|
|
{
|
|
|
|
Promise* p = new (bc->zone->allocate(sizeof(ListenPromise)))
|
|
|
|
ListenPromise(t->m->system, bc->zone);
|
|
|
|
|
|
|
|
PROTECT(t, target);
|
|
|
|
object pointer = makePointer(t, p);
|
|
|
|
bc->calls = makeTriple(t, target, pointer, bc->calls);
|
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
result = compileDirectInvoke
|
|
|
|
(t, frame, target, tailCall, false, rSize, p);
|
2008-11-27 20:59:40 +00:00
|
|
|
} else {
|
2009-03-31 20:15:08 +00:00
|
|
|
result = compileDirectInvoke
|
|
|
|
(t, frame, target, tailCall, true, rSize, 0);
|
2008-11-27 20:59:40 +00:00
|
|
|
}
|
2009-10-18 00:18:03 +00:00
|
|
|
} else if (unresolved(t, methodAddress(t, target))
|
2008-11-30 04:58:09 +00:00
|
|
|
or classNeedsInit(t, methodClass(t, target)))
|
|
|
|
{
|
2009-03-31 20:15:08 +00:00
|
|
|
result = compileDirectInvoke
|
|
|
|
(t, frame, target, tailCall, true, rSize, 0);
|
2008-04-13 19:48:20 +00:00
|
|
|
} else {
|
2009-03-31 20:15:08 +00:00
|
|
|
result = compileDirectInvoke
|
|
|
|
(t, frame, target, tailCall, false, rSize, 0);
|
2008-04-13 19:48:20 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->pop(methodParameterFootprint(t, target));
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
if (rSize) {
|
2008-03-11 16:57:35 +00:00
|
|
|
pushReturnValue(t, frame, methodReturnCode(t, target), result);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2009-04-22 01:39:25 +00:00
|
|
|
|
|
|
|
return tailCall;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-28 00:02:05 +00:00
|
|
|
void
|
|
|
|
handleMonitorEvent(MyThread* t, Frame* frame, intptr_t function)
|
|
|
|
{
|
|
|
|
Compiler* c = frame->c;
|
2007-12-31 22:40:56 +00:00
|
|
|
object method = frame->context->method;
|
2007-12-28 00:02:05 +00:00
|
|
|
|
|
|
|
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* lock;
|
2007-12-28 00:02:05 +00:00
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
lock = frame->append(methodClass(t, method));
|
|
|
|
} else {
|
2009-05-03 20:57:11 +00:00
|
|
|
lock = loadLocal(frame->context, 1, savedTargetIndex(t, method));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
c->call(c->constant(function, Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
2, c->register_(t->arch->thread()), lock);
|
2008-01-11 22:16:24 +00:00
|
|
|
}
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
handleEntrance(MyThread* t, Frame* frame)
|
|
|
|
{
|
2008-01-20 18:55:08 +00:00
|
|
|
object method = frame->context->method;
|
|
|
|
|
2008-01-20 22:05:59 +00:00
|
|
|
if ((methodFlags(t, method) & (ACC_SYNCHRONIZED | ACC_STATIC))
|
|
|
|
== ACC_SYNCHRONIZED)
|
2008-01-20 18:55:08 +00:00
|
|
|
{
|
|
|
|
// save 'this' pointer in case it is overwritten.
|
|
|
|
unsigned index = savedTargetIndex(t, method);
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(frame->context, 1, loadLocal(frame->context, 1, 0), index);
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->set(index, Frame::Object);
|
2008-01-20 18:55:08 +00:00
|
|
|
}
|
|
|
|
|
2007-12-28 00:02:05 +00:00
|
|
|
handleMonitorEvent
|
2008-05-31 22:14:27 +00:00
|
|
|
(t, frame, getThunk(t, acquireMonitorForObjectThunk));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
handleExit(MyThread* t, Frame* frame)
|
|
|
|
{
|
|
|
|
handleMonitorEvent
|
2008-05-31 22:14:27 +00:00
|
|
|
(t, frame, getThunk(t, releaseMonitorForObjectThunk));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2008-11-25 17:34:48 +00:00
|
|
|
bool
|
|
|
|
inTryBlock(MyThread* t, object code, unsigned ip)
|
|
|
|
{
|
|
|
|
object table = codeExceptionHandlerTable(t, code);
|
|
|
|
if (table) {
|
|
|
|
unsigned length = exceptionHandlerTableLength(t, table);
|
|
|
|
for (unsigned i = 0; i < length; ++i) {
|
|
|
|
ExceptionHandler* eh = exceptionHandlerTableBody(t, table, i);
|
|
|
|
if (ip >= exceptionHandlerStart(eh)
|
|
|
|
and ip < exceptionHandlerEnd(eh))
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
bool
|
|
|
|
needsReturnBarrier(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return (methodFlags(t, method) & ConstructorFlag)
|
2009-08-18 20:26:28 +00:00
|
|
|
and (classVmFlags(t, methodClass(t, method)) & HasFinalMemberFlag);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2009-04-26 01:51:33 +00:00
|
|
|
returnsNext(MyThread* t, object code, unsigned ip)
|
2009-03-31 20:15:08 +00:00
|
|
|
{
|
2009-04-26 01:51:33 +00:00
|
|
|
switch (codeBody(t, code, ip)) {
|
|
|
|
case return_:
|
|
|
|
case areturn:
|
|
|
|
case ireturn:
|
|
|
|
case freturn:
|
|
|
|
case lreturn:
|
|
|
|
case dreturn:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case goto_: {
|
|
|
|
uint32_t offset = codeReadInt16(t, code, ++ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
|
|
|
return returnsNext(t, code, newIp);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
2009-04-26 01:51:33 +00:00
|
|
|
|
|
|
|
case goto_w: {
|
|
|
|
uint32_t offset = codeReadInt32(t, code, ++ip);
|
|
|
|
uint32_t newIp = (ip - 5) + offset;
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
|
|
|
return returnsNext(t, code, newIp);
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isTailCall(MyThread* t, object code, unsigned ip, object caller, object callee)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
return TailCalls
|
|
|
|
and ((methodFlags(t, caller) & ACC_SYNCHRONIZED) == 0)
|
|
|
|
and (not inTryBlock(t, code, ip - 1))
|
|
|
|
and (not needsReturnBarrier(t, caller))
|
|
|
|
and (methodReturnCode(t, caller) == VoidField
|
|
|
|
or methodReturnCode(t, caller) == methodReturnCode(t, callee))
|
|
|
|
and returnsNext(t, code, ip);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
void
|
|
|
|
compile(MyThread* t, Frame* initialFrame, unsigned ip,
|
2008-09-25 00:48:32 +00:00
|
|
|
int exceptionHandlerStart = -1);
|
2008-09-20 23:42:46 +00:00
|
|
|
|
|
|
|
void
|
2008-09-25 00:48:32 +00:00
|
|
|
saveStateAndCompile(MyThread* t, Frame* initialFrame, unsigned ip)
|
2008-09-20 23:42:46 +00:00
|
|
|
{
|
2008-09-22 14:28:18 +00:00
|
|
|
Compiler::State* state = initialFrame->c->saveState();
|
2008-11-07 00:39:38 +00:00
|
|
|
compile(t, initialFrame, ip);
|
2008-09-22 14:28:18 +00:00
|
|
|
initialFrame->c->restoreState(state);
|
2008-09-20 23:42:46 +00:00
|
|
|
}
|
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
bool
|
2009-10-10 21:03:23 +00:00
|
|
|
integerBranch(MyThread* t, Frame* frame, object code, unsigned& ip,
|
|
|
|
unsigned size, Compiler::Operand* a, Compiler::Operand* b)
|
2009-10-07 00:50:32 +00:00
|
|
|
{
|
|
|
|
if (ip + 3 > codeLength(t, code)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
unsigned instruction = codeBody(t, code, ip++);
|
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
|
|
|
|
|
|
|
switch (instruction) {
|
|
|
|
case ifeq:
|
|
|
|
c->jumpIfEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifne:
|
|
|
|
c->jumpIfNotEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifgt:
|
|
|
|
c->jumpIfGreater(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifge:
|
|
|
|
c->jumpIfGreaterOrEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case iflt:
|
2009-10-10 21:03:23 +00:00
|
|
|
c->jumpIfLess(size, a, b, target);
|
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifle:
|
2009-10-10 21:03:23 +00:00
|
|
|
c->jumpIfLessOrEqual(size, a, b, target);
|
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
ip -= 3;
|
|
|
|
return false;
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
|
|
|
|
saveStateAndCompile(t, frame, newIp);
|
|
|
|
return t->exception == 0;
|
2009-10-07 00:50:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2009-10-10 21:03:23 +00:00
|
|
|
floatBranch(MyThread* t, Frame* frame, object code, unsigned& ip,
|
|
|
|
unsigned size, bool lessIfUnordered, Compiler::Operand* a,
|
|
|
|
Compiler::Operand* b)
|
2009-08-06 16:01:57 +00:00
|
|
|
{
|
2009-10-07 00:50:32 +00:00
|
|
|
if (ip + 3 > codeLength(t, code)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
unsigned instruction = codeBody(t, code, ip++);
|
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
|
|
|
|
|
|
|
switch (instruction) {
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifeq:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfFloatEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifne:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfFloatNotEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifgt:
|
2009-10-07 00:50:32 +00:00
|
|
|
if (lessIfUnordered) {
|
|
|
|
c->jumpIfFloatGreater(size, a, b, target);
|
|
|
|
} else {
|
|
|
|
c->jumpIfFloatGreaterOrUnordered(size, a, b, target);
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifge:
|
2009-10-07 00:50:32 +00:00
|
|
|
if (lessIfUnordered) {
|
|
|
|
c->jumpIfFloatGreaterOrEqual(size, a, b, target);
|
|
|
|
} else {
|
|
|
|
c->jumpIfFloatGreaterOrEqualOrUnordered(size, a, b, target);
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
case iflt:
|
2009-10-07 00:50:32 +00:00
|
|
|
if (lessIfUnordered) {
|
|
|
|
c->jumpIfFloatLessOrUnordered(size, a, b, target);
|
|
|
|
} else {
|
|
|
|
c->jumpIfFloatLess(size, a, b, target);
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifle:
|
|
|
|
if (lessIfUnordered) {
|
|
|
|
c->jumpIfFloatLessOrEqualOrUnordered(size, a, b, target);
|
|
|
|
} else {
|
|
|
|
c->jumpIfFloatLessOrEqual(size, a, b, target);
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
default:
|
2009-10-07 00:50:32 +00:00
|
|
|
ip -= 3;
|
2009-08-06 16:01:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
|
|
|
|
saveStateAndCompile(t, frame, newIp);
|
|
|
|
return t->exception == 0;
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
bool
|
|
|
|
intrinsic(MyThread* t, Frame* frame, object target)
|
|
|
|
{
|
|
|
|
#define MATCH(name, constant) \
|
2009-11-28 04:01:27 +00:00
|
|
|
(byteArrayLength(t, name) == sizeof(constant) \
|
2009-12-02 15:49:10 +00:00
|
|
|
and ::strcmp(reinterpret_cast<char*>(&byteArrayBody(t, name, 0)), \
|
|
|
|
constant) == 0)
|
2009-10-18 01:26:14 +00:00
|
|
|
|
|
|
|
object className = vm::className(t, methodClass(t, target));
|
|
|
|
if (UNLIKELY(MATCH(className, "java/lang/Math"))) {
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
if (MATCH(methodName(t, target), "sqrt")
|
|
|
|
and MATCH(methodSpec(t, target), "(D)D"))
|
|
|
|
{
|
|
|
|
frame->pushLong(c->fsqrt(8, frame->popLong()));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodName(t, target), "abs")) {
|
|
|
|
if (MATCH(methodSpec(t, target), "(I)I")) {
|
|
|
|
frame->pushInt(c->abs(4, frame->popInt()));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodSpec(t, target), "(J)J")) {
|
|
|
|
frame->pushLong(c->abs(8, frame->popLong()));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodSpec(t, target), "(F)F")) {
|
|
|
|
frame->pushInt(c->fabs(4, frame->popInt()));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void
|
2008-09-25 00:48:32 +00:00
|
|
|
compile(MyThread* t, Frame* initialFrame, unsigned ip,
|
|
|
|
int exceptionHandlerStart)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uint8_t, stackMap,
|
|
|
|
codeMaxStack(t, methodCode(t, initialFrame->context->method)));
|
|
|
|
Frame myFrame(initialFrame, RUNTIME_ARRAY_BODY(stackMap));
|
2007-12-09 22:45:43 +00:00
|
|
|
Frame* frame = &myFrame;
|
|
|
|
Compiler* c = frame->c;
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* context = frame->context;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
object code = methodCode(t, context->method);
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, code);
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
while (ip < codeLength(t, code)) {
|
2008-01-07 14:51:07 +00:00
|
|
|
if (context->visitTable[ip] ++) {
|
2007-12-09 22:45:43 +00:00
|
|
|
// we've already visited this part of the code
|
2008-04-20 05:23:08 +00:00
|
|
|
frame->visitLogicalIp(ip);
|
2007-12-09 22:45:43 +00:00
|
|
|
return;
|
2007-09-30 04:07:22 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->startLogicalIp(ip);
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2008-11-08 23:21:30 +00:00
|
|
|
if (exceptionHandlerStart >= 0) {
|
2008-09-25 00:48:32 +00:00
|
|
|
c->initLocalsFromLogicalIp(exceptionHandlerStart);
|
|
|
|
|
|
|
|
exceptionHandlerStart = -1;
|
2008-04-19 07:03:59 +00:00
|
|
|
|
|
|
|
frame->pushObject();
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2008-04-17 22:07:32 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, gcIfNecessaryThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-04-17 22:07:32 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
1, c->register_(t->arch->thread()));
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
// fprintf(stderr, "ip: %d map: %ld\n", ip, *(frame->map));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned instruction = codeBody(t, code, ip++);
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aaload:
|
|
|
|
case baload:
|
|
|
|
case caload:
|
|
|
|
case daload:
|
|
|
|
case faload:
|
|
|
|
case iaload:
|
|
|
|
case laload:
|
|
|
|
case saload: {
|
2008-04-19 20:41:31 +00:00
|
|
|
Compiler::Operand* index = frame->popInt();
|
|
|
|
Compiler::Operand* array = frame->popObject();
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-11-25 17:34:48 +00:00
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
}
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
if (CheckArrayBounds) {
|
2008-11-23 23:58:01 +00:00
|
|
|
c->checkBounds(array, ArrayLength, index, aioobThunk(t));
|
2008-01-08 17:10:24 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aaload:
|
|
|
|
frame->pushObject
|
|
|
|
(c->load
|
2009-09-20 21:43:32 +00:00
|
|
|
(BytesPerWord, BytesPerWord, c->memory
|
|
|
|
(array, Compiler::ObjectType, ArrayBody, index, BytesPerWord),
|
|
|
|
BytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case faload:
|
2009-08-10 19:20:23 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
|
|
|
(array, Compiler::FloatType, ArrayBody, index, 4), BytesPerWord));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-11-30 15:08:45 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case iaload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 4),
|
|
|
|
BytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case baload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(1, 1, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 1),
|
|
|
|
BytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case caload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->loadz
|
|
|
|
(2, 2, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 2),
|
|
|
|
BytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case daload:
|
2009-08-10 19:20:23 +00:00
|
|
|
frame->pushLong
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
|
|
|
(array, Compiler::FloatType, ArrayBody, index, 8), 8));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case laload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushLong
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 8), 8));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case saload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(2, 2, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 2),
|
|
|
|
BytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aastore:
|
|
|
|
case bastore:
|
|
|
|
case castore:
|
|
|
|
case dastore:
|
|
|
|
case fastore:
|
|
|
|
case iastore:
|
|
|
|
case lastore:
|
|
|
|
case sastore: {
|
2008-04-19 22:13:57 +00:00
|
|
|
Compiler::Operand* value;
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == dastore or instruction == lastore) {
|
|
|
|
value = frame->popLong();
|
|
|
|
} else if (instruction == aastore) {
|
|
|
|
value = frame->popObject();
|
|
|
|
} else {
|
|
|
|
value = frame->popInt();
|
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-04-19 22:13:57 +00:00
|
|
|
Compiler::Operand* index = frame->popInt();
|
|
|
|
Compiler::Operand* array = frame->popObject();
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-11-25 17:34:48 +00:00
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
}
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
if (CheckArrayBounds) {
|
2008-11-23 23:58:01 +00:00
|
|
|
c->checkBounds(array, ArrayLength, index, aioobThunk(t));
|
2008-01-08 17:10:24 +00:00
|
|
|
}
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aastore: {
|
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, setMaybeNullThunk), Compiler::AddressType),
|
2008-06-10 14:49:13 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-06-10 14:49:13 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
4, c->register_(t->arch->thread()), array,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->add
|
|
|
|
(4, c->constant(ArrayBody, Compiler::IntegerType),
|
|
|
|
c->shl
|
|
|
|
(4, c->constant(log(BytesPerWord), Compiler::IntegerType), index)),
|
2008-06-10 14:49:13 +00:00
|
|
|
value);
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case fastore:
|
2009-08-10 19:20:23 +00:00
|
|
|
c->store
|
2009-09-20 21:43:32 +00:00
|
|
|
(BytesPerWord, value, 4, c->memory
|
|
|
|
(array, Compiler::FloatType, ArrayBody, index, 4));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case iastore:
|
2009-02-28 23:17:24 +00:00
|
|
|
c->store
|
2009-09-20 21:43:32 +00:00
|
|
|
(BytesPerWord, value, 4, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 4));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case bastore:
|
2009-02-28 23:17:24 +00:00
|
|
|
c->store
|
2009-09-20 21:43:32 +00:00
|
|
|
(BytesPerWord, value, 1, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 1));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case castore:
|
|
|
|
case sastore:
|
2009-02-28 23:17:24 +00:00
|
|
|
c->store
|
2009-09-20 21:43:32 +00:00
|
|
|
(BytesPerWord, value, 2, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 2));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case dastore:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
|
|
|
(array, Compiler::FloatType, ArrayBody, index, 8));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case lastore:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
|
|
|
(array, Compiler::IntegerType, ArrayBody, index, 8));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aconst_null:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushObject(c->constant(0, Compiler::ObjectType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload:
|
|
|
|
frame->loadObject(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_0:
|
|
|
|
frame->loadObject(0);
|
|
|
|
break;
|
2007-10-08 23:13:55 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_1:
|
|
|
|
frame->loadObject(1);
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_2:
|
|
|
|
frame->loadObject(2);
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_3:
|
|
|
|
frame->loadObject(3);
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case anewarray: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2008-04-26 20:56:03 +00:00
|
|
|
Compiler::Operand* length = frame->popInt();
|
2007-10-08 23:13:55 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, makeBlankObjectArrayThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
BytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::ObjectType,
|
2010-09-14 16:49:41 +00:00
|
|
|
3, c->register_(t->arch->thread()), frame->append(class_), length));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case areturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2009-05-31 20:15:45 +00:00
|
|
|
c->return_(BytesPerWord, frame->popObject());
|
2007-12-12 22:19:13 +00:00
|
|
|
} return;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case arraylength: {
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
2009-03-06 17:56:11 +00:00
|
|
|
(BytesPerWord, BytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->memory
|
|
|
|
(frame->popObject(), Compiler::IntegerType, ArrayLength, 0, 1),
|
|
|
|
BytesPerWord));
|
2007-12-12 22:19:13 +00:00
|
|
|
} break;
|
2007-10-04 03:19:39 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore:
|
2007-12-26 23:59:55 +00:00
|
|
|
frame->storeObjectOrAddress(codeBody(t, code, ip++));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_0:
|
2007-12-27 16:02:03 +00:00
|
|
|
frame->storeObjectOrAddress(0);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-17 17:22:09 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_1:
|
2007-12-27 16:02:03 +00:00
|
|
|
frame->storeObjectOrAddress(1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_2:
|
2007-12-27 16:02:03 +00:00
|
|
|
frame->storeObjectOrAddress(2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_3:
|
2007-12-27 16:02:03 +00:00
|
|
|
frame->storeObjectOrAddress(3);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case athrow: {
|
2009-03-18 22:24:13 +00:00
|
|
|
Compiler::Operand* target = frame->popObject();
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, throw_Thunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
Compiler::NoReturn,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
2, c->register_(t->arch->thread()), target);
|
2007-12-12 22:19:13 +00:00
|
|
|
} return;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case bipush:
|
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(static_cast<int8_t>(codeBody(t, code, ip++)),
|
|
|
|
Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case checkcast: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* instance = c->peek(1, 0);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, checkCastThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
3, c->register_(t->arch->thread()), frame->append(class_), instance);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2f: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->f2f(8, 4, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2i: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->f2i(8, 4, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2l: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->f2i(8, 8, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dadd: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fadd(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dcmpg: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not floatBranch(t, frame, code, ip, 8, false, a, b)) {
|
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareDoublesGThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 4,
|
2009-08-06 16:01:57 +00:00
|
|
|
static_cast<Compiler::Operand*>(0), a,
|
|
|
|
static_cast<Compiler::Operand*>(0), b));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dcmpl: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not floatBranch(t, frame, code, ip, 8, true, a, b)) {
|
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareDoublesLThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 4,
|
2009-08-06 16:01:57 +00:00
|
|
|
static_cast<Compiler::Operand*>(0), a,
|
|
|
|
static_cast<Compiler::Operand*>(0), b));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dconst_0:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong(c->constant(doubleToBits(0.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case dconst_1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong(c->constant(doubleToBits(1.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ddiv: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fdiv(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dmul: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fmul(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dneg: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fneg(8, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case vm::drem: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->frem(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dsub: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fsub(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup:
|
|
|
|
frame->dup();
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup_x1:
|
|
|
|
frame->dupX1();
|
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup_x2:
|
|
|
|
frame->dupX2();
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup2:
|
|
|
|
frame->dup2();
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup2_x1:
|
|
|
|
frame->dup2X1();
|
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup2_x2:
|
|
|
|
frame->dup2X2();
|
|
|
|
break;
|
2007-10-09 17:15:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2d: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->f2f(4, 8, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2i: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->f2i(4, 4, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2l: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->f2i(4, 8, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fadd: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fadd(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fcmpg: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not floatBranch(t, frame, code, ip, 4, false, a, b)) {
|
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareFloatsGThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 2, a, b));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fcmpl: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not floatBranch(t, frame, code, ip, 4, true, a, b)) {
|
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareFloatsLThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 2, a, b));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fconst_0:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(floatToBits(0.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case fconst_1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(floatToBits(1.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case fconst_2:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(floatToBits(2.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fdiv: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fdiv(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fmul: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fmul(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fneg: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fneg(4, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case vm::frem: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->frem(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fsub: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fsub(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case getfield:
|
|
|
|
case getstatic: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object field = resolveField(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2010-03-02 01:24:25 +00:00
|
|
|
if ((fieldFlags(t, field) & ACC_VOLATILE)
|
|
|
|
and BytesPerWord == 4
|
|
|
|
and (fieldCode(t, field) == DoubleField
|
|
|
|
or fieldCode(t, field) == LongField))
|
|
|
|
{
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, acquireMonitorForObjectThunk), Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()),
|
|
|
|
frame->append(field));
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* table;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == getstatic) {
|
2009-01-10 19:25:52 +00:00
|
|
|
assert(t, fieldFlags(t, field) & ACC_STATIC);
|
|
|
|
|
2008-11-30 04:58:09 +00:00
|
|
|
if (fieldClass(t, field) != methodClass(t, context->method)
|
2008-12-02 02:38:00 +00:00
|
|
|
and classNeedsInit(t, fieldClass(t, field)))
|
2008-04-23 22:56:02 +00:00
|
|
|
{
|
2008-03-16 19:38:43 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, tryInitClassThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-03-16 19:38:43 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
2, c->register_(t->arch->thread()),
|
|
|
|
frame->append(fieldClass(t, field)));
|
2008-04-23 22:56:02 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
table = frame->append(classStaticTable(t, fieldClass(t, field)));
|
|
|
|
} else {
|
2009-01-10 19:25:52 +00:00
|
|
|
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
table = frame->popObject();
|
2008-11-25 17:34:48 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 3)) {
|
|
|
|
c->saveLocals();
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (fieldCode(t, field)) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(1, 1, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1),
|
|
|
|
BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case CharField:
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->loadz
|
|
|
|
(2, 2, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1),
|
|
|
|
BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ShortField:
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(2, 2, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1),
|
|
|
|
BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case FloatField:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
|
|
|
(table, Compiler::FloatType, fieldOffset(t, field), 0, 1),
|
|
|
|
BytesPerWord));
|
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case IntField:
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1),
|
|
|
|
BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case DoubleField:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong
|
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
|
|
|
(table, Compiler::FloatType, fieldOffset(t, field), 0, 1), 8));
|
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case LongField:
|
2007-12-30 22:24:48 +00:00
|
|
|
frame->pushLong
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1), 8));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ObjectField:
|
2007-12-30 22:24:48 +00:00
|
|
|
frame->pushObject
|
2008-02-12 02:06:12 +00:00
|
|
|
(c->load
|
2009-03-06 17:56:11 +00:00
|
|
|
(BytesPerWord, BytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->memory
|
|
|
|
(table, Compiler::ObjectType, fieldOffset(t, field), 0, 1),
|
|
|
|
BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-03-03 03:18:15 +00:00
|
|
|
|
|
|
|
if (fieldFlags(t, field) & ACC_VOLATILE) {
|
2009-03-04 01:02:11 +00:00
|
|
|
if (BytesPerWord == 4
|
|
|
|
and (fieldCode(t, field) == DoubleField
|
|
|
|
or fieldCode(t, field) == LongField))
|
|
|
|
{
|
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, releaseMonitorForObjectThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()),
|
2010-01-28 00:46:04 +00:00
|
|
|
frame->append(field));
|
2009-03-04 01:02:11 +00:00
|
|
|
} else {
|
|
|
|
c->loadBarrier();
|
|
|
|
}
|
2009-03-03 03:18:15 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case goto_: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-16 00:24:15 +00:00
|
|
|
c->jmp(frame->machineIp(newIp));
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = newIp;
|
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case goto_w: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt32(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 5) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-16 00:24:15 +00:00
|
|
|
c->jmp(frame->machineIp(newIp));
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = newIp;
|
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2b: {
|
2009-03-06 17:56:11 +00:00
|
|
|
frame->pushInt(c->load(BytesPerWord, 1, frame->popInt(), BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2c: {
|
2009-03-06 17:56:11 +00:00
|
|
|
frame->pushInt(c->loadz(BytesPerWord, 2, frame->popInt(), BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2d: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->i2f(4, 8, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2f: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->i2f(4, 4, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
case i2l:
|
2009-03-06 17:56:11 +00:00
|
|
|
frame->pushLong(c->load(BytesPerWord, 4, frame->popInt(), 8));
|
2007-12-26 23:59:55 +00:00
|
|
|
break;
|
2007-10-04 03:19:39 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2s: {
|
2009-03-06 17:56:11 +00:00
|
|
|
frame->pushInt(c->load(BytesPerWord, 2, frame->popInt(), BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case iadd: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->add(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case iand: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->and_(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_m1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(-1, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_0:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(0, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(1, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_2:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(2, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_3:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(3, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_4:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(4, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_5:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(5, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case idiv: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->div(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case if_acmpeq:
|
|
|
|
case if_acmpne: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popObject();
|
|
|
|
Compiler::Operand* b = frame->popObject();
|
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == if_acmpeq) {
|
2009-10-10 21:03:23 +00:00
|
|
|
c->jumpIfEqual(BytesPerWord, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2009-10-10 21:03:23 +00:00
|
|
|
c->jumpIfNotEqual(BytesPerWord, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case if_icmpeq:
|
|
|
|
case if_icmpne:
|
|
|
|
case if_icmpgt:
|
|
|
|
case if_icmpge:
|
|
|
|
case if_icmplt:
|
|
|
|
case if_icmple: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case if_icmpeq:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmpne:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfNotEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmpgt:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreater(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmpge:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreaterOrEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmplt:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLess(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmple:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLessOrEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
} break;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ifeq:
|
|
|
|
case ifne:
|
|
|
|
case ifgt:
|
|
|
|
case ifge:
|
|
|
|
case iflt:
|
|
|
|
case ifle: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
Compiler::Operand* a = c->constant(0, Compiler::IntegerType);
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case ifeq:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case ifne:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfNotEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case ifgt:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreater(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case ifge:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreaterOrEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case iflt:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLess(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case ifle:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLessOrEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-06-10 14:49:13 +00:00
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ifnull:
|
|
|
|
case ifnonnull: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
Compiler::Operand* a = c->constant(0, Compiler::ObjectType);
|
|
|
|
Compiler::Operand* b = frame->popObject();
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == ifnull) {
|
2009-10-10 21:03:23 +00:00
|
|
|
c->jumpIfEqual(BytesPerWord, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2009-10-10 21:03:23 +00:00
|
|
|
c->jumpIfNotEqual(BytesPerWord, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iinc: {
|
|
|
|
uint8_t index = codeBody(t, code, ip++);
|
|
|
|
int8_t count = codeBody(t, code, ip++);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2009-05-15 02:08:01 +00:00
|
|
|
storeLocal
|
|
|
|
(context, 1,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->add
|
|
|
|
(4, c->constant(count, Compiler::IntegerType),
|
|
|
|
loadLocal(context, 1, index)),
|
2009-05-03 20:57:11 +00:00
|
|
|
index);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload:
|
|
|
|
case fload:
|
|
|
|
frame->loadInt(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_0:
|
|
|
|
case fload_0:
|
|
|
|
frame->loadInt(0);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_1:
|
|
|
|
case fload_1:
|
|
|
|
frame->loadInt(1);
|
|
|
|
break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_2:
|
|
|
|
case fload_2:
|
|
|
|
frame->loadInt(2);
|
|
|
|
break;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_3:
|
|
|
|
case fload_3:
|
|
|
|
frame->loadInt(3);
|
|
|
|
break;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case imul: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->mul(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-26 19:19:45 +00:00
|
|
|
case ineg: {
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->neg(4, frame->popInt()));
|
2007-12-26 19:19:45 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case instanceof: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-30 04:07:22 +00:00
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-09-30 04:07:22 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, instanceOf64Thunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType,
|
2009-04-07 00:34:12 +00:00
|
|
|
3, c->register_(t->arch->thread()), frame->append(class_),
|
|
|
|
frame->popObject()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokeinterface: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
ip += 2;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object target = resolveMethod(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-07-12 20:52:14 +00:00
|
|
|
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned parameterFootprint = methodParameterFootprint(t, target);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned instance = parameterFootprint - 1;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
unsigned rSize = resultSize(t, methodReturnCode(t, target));
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
Compiler::Operand* result = c->stackCall
|
2008-02-11 17:21:41 +00:00
|
|
|
(c->call
|
2007-12-16 23:52:38 +00:00
|
|
|
(c->constant
|
2009-09-20 21:43:32 +00:00
|
|
|
(getThunk(t, findInterfaceMethodFromInstanceThunk),
|
|
|
|
Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
BytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::AddressType,
|
2009-04-07 00:34:12 +00:00
|
|
|
3, c->register_(t->arch->thread()), frame->append(target),
|
2008-11-02 22:25:51 +00:00
|
|
|
c->peek(1, instance)),
|
2008-02-11 17:21:41 +00:00
|
|
|
0,
|
2009-04-05 21:42:10 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-12 02:06:12 +00:00
|
|
|
rSize,
|
2009-09-20 21:43:32 +00:00
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
2008-07-05 20:21:13 +00:00
|
|
|
parameterFootprint);
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->pop(parameterFootprint);
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
if (rSize) {
|
2008-03-11 16:57:35 +00:00
|
|
|
pushReturnValue(t, frame, methodReturnCode(t, target), result);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokespecial: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2009-08-10 13:56:16 +00:00
|
|
|
object target = resolveMethod(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-01-11 22:16:24 +00:00
|
|
|
object class_ = methodClass(t, context->method);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (isSpecialMethod(t, target, class_)) {
|
2009-08-13 15:17:05 +00:00
|
|
|
target = findVirtualMethod(t, target, classSuper(t, class_));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-07-12 20:52:14 +00:00
|
|
|
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
|
|
|
|
|
2009-04-26 01:51:33 +00:00
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2009-04-26 01:51:33 +00:00
|
|
|
compileDirectInvoke(t, frame, target, tailCall);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokestatic: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object target = resolveMethod(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2008-07-12 20:52:14 +00:00
|
|
|
assert(t, methodFlags(t, target) & ACC_STATIC);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
if (not intrinsic(t, frame, target)) {
|
2009-08-10 19:20:23 +00:00
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
|
|
|
compileDirectInvoke(t, frame, target, tailCall);
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokevirtual: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object target = resolveMethod(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2008-07-12 20:52:14 +00:00
|
|
|
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned parameterFootprint = methodParameterFootprint(t, target);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned offset = ClassVtable + (methodOffset(t, target) * BytesPerWord);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* instance = c->peek(1, parameterFootprint - 1);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
unsigned rSize = resultSize(t, methodReturnCode(t, target));
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2009-04-26 01:51:33 +00:00
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2009-04-19 22:36:11 +00:00
|
|
|
Compiler::Operand* result = c->stackCall
|
2009-05-03 20:57:11 +00:00
|
|
|
(c->memory
|
|
|
|
(c->and_
|
2009-09-20 21:43:32 +00:00
|
|
|
(BytesPerWord, c->constant(PointerMask, Compiler::IntegerType),
|
|
|
|
c->memory(instance, Compiler::ObjectType, 0, 0, 1)),
|
|
|
|
Compiler::ObjectType, offset, 0, 1),
|
2009-04-22 01:39:25 +00:00
|
|
|
tailCall ? Compiler::TailJump : 0,
|
2009-04-19 22:36:11 +00:00
|
|
|
frame->trace(0, 0),
|
|
|
|
rSize,
|
2009-09-20 21:43:32 +00:00
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
2009-04-19 22:36:11 +00:00
|
|
|
parameterFootprint);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->pop(parameterFootprint);
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
if (rSize) {
|
2008-03-11 16:57:35 +00:00
|
|
|
pushReturnValue(t, frame, methodReturnCode(t, target), result);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ior: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->or_(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case irem: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->rem(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ireturn:
|
2007-12-14 18:27:56 +00:00
|
|
|
case freturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2009-05-31 20:15:45 +00:00
|
|
|
c->return_(4, frame->popInt());
|
2007-12-14 18:27:56 +00:00
|
|
|
} return;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ishl: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->shl(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ishr: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->shr(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore:
|
|
|
|
case fstore:
|
|
|
|
frame->storeInt(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_0:
|
|
|
|
case fstore_0:
|
|
|
|
frame->storeInt(0);
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_1:
|
|
|
|
case fstore_1:
|
|
|
|
frame->storeInt(1);
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_2:
|
|
|
|
case fstore_2:
|
|
|
|
frame->storeInt(2);
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_3:
|
|
|
|
case fstore_3:
|
|
|
|
frame->storeInt(3);
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case isub: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->sub(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iushr: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->ushr(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ixor: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->xor_(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case jsr:
|
2007-12-26 23:59:55 +00:00
|
|
|
case jsr_w: {
|
2008-11-16 00:28:45 +00:00
|
|
|
uint32_t thisIp;
|
2007-12-26 23:59:55 +00:00
|
|
|
uint32_t newIp;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (instruction == jsr) {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
2008-11-16 00:28:45 +00:00
|
|
|
thisIp = ip - 3;
|
|
|
|
newIp = thisIp + offset;
|
2007-12-26 23:59:55 +00:00
|
|
|
} else {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt32(t, code, ip);
|
2008-11-16 00:28:45 +00:00
|
|
|
thisIp = ip - 5;
|
|
|
|
newIp = thisIp + offset;
|
2007-12-26 23:59:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned start = frame->startSubroutine(newIp, c->machineIp(ip));
|
2008-06-11 00:16:02 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
c->jmp(frame->machineIp(newIp));
|
|
|
|
|
2009-07-08 14:18:40 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-26 23:59:55 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-12-27 20:32:34 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
frame->endSubroutine(start);
|
2007-12-16 21:30:19 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2008-03-21 00:37:58 +00:00
|
|
|
case l2d: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->i2f(8, 8, frame->popLong()));
|
2008-03-21 00:37:58 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case l2f: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->i2f(8, 4, frame->popLong()));
|
2008-03-21 00:37:58 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
case l2i:
|
2009-03-06 17:56:11 +00:00
|
|
|
frame->pushInt(c->load(8, 8, frame->popLong(), BytesPerWord));
|
2007-12-26 23:59:55 +00:00
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ladd: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->add(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-26 19:19:45 +00:00
|
|
|
case land: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->and_(8, a, b));
|
2007-12-26 19:19:45 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lcmp: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-12 22:19:13 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not integerBranch(t, frame, code, ip, 8, a, b)) {
|
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareLongsThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 4,
|
|
|
|
static_cast<Compiler::Operand*>(0), a,
|
|
|
|
static_cast<Compiler::Operand*>(0), b));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lconst_0:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong(c->constant(0, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lconst_1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong(c->constant(1, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldc:
|
|
|
|
case ldc_w: {
|
|
|
|
uint16_t index;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == ldc) {
|
|
|
|
index = codeBody(t, code, ip++);
|
|
|
|
} else {
|
|
|
|
index = codeReadInt16(t, code, ip);
|
|
|
|
}
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
object pool = codePool(t, code);
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (singletonIsObject(t, pool, index - 1)) {
|
|
|
|
object v = singletonObject(t, pool, index - 1);
|
2010-09-14 16:49:41 +00:00
|
|
|
if (objectClass(t, v) == type(t, Machine::ReferenceType)) {
|
2009-08-10 13:56:16 +00:00
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2010-09-01 16:13:52 +00:00
|
|
|
frame->pushObject(frame->append(getJClass(t, class_)));
|
2010-09-14 16:49:41 +00:00
|
|
|
} else if (objectClass(t, v) == type(t, Machine::ClassType)) {
|
2010-09-01 16:13:52 +00:00
|
|
|
frame->pushObject(frame->append(getJClass(t, v)));
|
2007-10-08 21:41:41 +00:00
|
|
|
} else {
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->pushObject(frame->append(v));
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->constant
|
|
|
|
(singletonValue(t, pool, index - 1),
|
2009-10-18 02:11:03 +00:00
|
|
|
singletonBit(t, pool, poolSize(t, pool), index - 1)
|
2009-09-20 21:43:32 +00:00
|
|
|
? Compiler::FloatType : Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldc2_w: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
object pool = codePool(t, code);
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint64_t v;
|
|
|
|
memcpy(&v, &singletonValue(t, pool, index - 1), 8);
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong
|
|
|
|
(c->constant
|
2009-10-18 02:11:03 +00:00
|
|
|
(v, singletonBit(t, pool, poolSize(t, pool), index - 1)
|
2009-09-20 21:43:32 +00:00
|
|
|
? Compiler::FloatType : Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldiv_: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->div(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload:
|
|
|
|
case dload:
|
|
|
|
frame->loadLong(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_0:
|
|
|
|
case dload_0:
|
|
|
|
frame->loadLong(0);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_1:
|
|
|
|
case dload_1:
|
|
|
|
frame->loadLong(1);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_2:
|
|
|
|
case dload_2:
|
|
|
|
frame->loadLong(2);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_3:
|
|
|
|
case dload_3:
|
|
|
|
frame->loadLong(3);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lmul: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->mul(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lneg:
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->neg(8, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lookupswitch: {
|
|
|
|
int32_t base = ip - 1;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = (ip + 3) & ~3; // pad to four byte boundary
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* key = frame->popInt();
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
|
|
|
assert(t, defaultIp < codeLength(t, code));
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
Compiler::Operand* default_ = frame->addressOperand
|
|
|
|
(c->machineIp(defaultIp));
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t pairCount = codeReadInt32(t, code, ip);
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
if (pairCount) {
|
|
|
|
Compiler::Operand* start = 0;
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uint32_t, ipTable, pairCount);
|
2009-08-13 01:32:12 +00:00
|
|
|
for (int32_t i = 0; i < pairCount; ++i) {
|
|
|
|
unsigned index = ip + (i * 8);
|
|
|
|
int32_t key = codeReadInt32(t, code, index);
|
|
|
|
uint32_t newIp = base + codeReadInt32(t, code, index);
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(ipTable)[i] = newIp;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
Promise* p = c->poolAppend(key);
|
|
|
|
if (i == 0) {
|
|
|
|
start = frame->addressOperand(p);
|
|
|
|
}
|
|
|
|
c->poolAppendPromise(frame->addressPromise(c->machineIp(newIp)));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-08-13 01:32:12 +00:00
|
|
|
assert(t, start);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
c->jmp
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, lookUpAddressThunk), Compiler::AddressType),
|
|
|
|
0, 0, BytesPerWord, Compiler::AddressType,
|
|
|
|
4, key, start, c->constant(pairCount, Compiler::IntegerType),
|
|
|
|
default_));
|
2007-12-16 00:24:15 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
Compiler::State* state = c->saveState();
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
for (int32_t i = 0; i < pairCount; ++i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, frame, RUNTIME_ARRAY_BODY(ipTable)[i]);
|
2009-08-13 01:32:12 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
c->restoreState(state);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// a switch statement with no cases, apparently
|
|
|
|
c->jmp(default_);
|
2007-12-16 00:24:15 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
ip = defaultIp;
|
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lor: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->or_(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lrem: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->rem(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lreturn:
|
2007-12-12 22:19:13 +00:00
|
|
|
case dreturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2009-05-31 20:15:45 +00:00
|
|
|
c->return_(8, frame->popLong());
|
2007-12-12 22:19:13 +00:00
|
|
|
} return;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lshl: {
|
2008-04-28 15:53:48 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->shl(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lshr: {
|
2008-04-28 15:53:48 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->shr(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore:
|
|
|
|
case dstore:
|
|
|
|
frame->storeLong(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore_0:
|
|
|
|
case dstore_0:
|
|
|
|
frame->storeLong(0);
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore_1:
|
|
|
|
case dstore_1:
|
|
|
|
frame->storeLong(1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case lstore_2:
|
|
|
|
case dstore_2:
|
|
|
|
frame->storeLong(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case lstore_3:
|
|
|
|
case dstore_3:
|
|
|
|
frame->storeLong(3);
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lsub: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->sub(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lushr: {
|
2008-04-28 15:53:48 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->ushr(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lxor: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->xor_(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case monitorenter: {
|
2009-03-18 22:24:13 +00:00
|
|
|
Compiler::Operand* target = frame->popObject();
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, acquireMonitorForObjectThunk), Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()), target);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case monitorexit: {
|
2009-03-18 22:24:13 +00:00
|
|
|
Compiler::Operand* target = frame->popObject();
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, releaseMonitorForObjectThunk), Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()), target);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case multianewarray: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
uint8_t dimensions = codeBody(t, code, ip++);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
PROTECT(t, class_);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2009-03-01 22:39:52 +00:00
|
|
|
unsigned offset
|
2009-05-17 23:43:48 +00:00
|
|
|
= localOffset
|
|
|
|
(t, localSize(t, context->method) + c->topOfStack(), context->method)
|
2009-03-01 22:39:52 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2008-11-11 00:07:44 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* result = c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, makeMultidimensionalArrayThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
BytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::ObjectType,
|
2009-04-07 00:34:12 +00:00
|
|
|
4, c->register_(t->arch->thread()), frame->append(class_),
|
2009-09-20 21:43:32 +00:00
|
|
|
c->constant(dimensions, Compiler::IntegerType),
|
|
|
|
c->constant(offset, Compiler::IntegerType));
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->pop(dimensions);
|
|
|
|
frame->pushObject(result);
|
|
|
|
} break;
|
2007-10-12 00:30:46 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case new_: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
|
2009-07-22 00:57:55 +00:00
|
|
|
if (classVmFlags(t, class_) & (WeakReferenceFlag | HasFinalizerFlag)) {
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, makeNewGeneral64Thunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
BytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::ObjectType,
|
2009-04-07 00:34:12 +00:00
|
|
|
2, c->register_(t->arch->thread()), frame->append(class_)));
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, makeNew64Thunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
BytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::ObjectType,
|
2009-04-07 00:34:12 +00:00
|
|
|
2, c->register_(t->arch->thread()), frame->append(class_)));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case newarray: {
|
|
|
|
uint8_t type = codeBody(t, code, ip++);
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2008-04-26 20:56:03 +00:00
|
|
|
Compiler::Operand* length = frame->popInt();
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, makeBlankArrayThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
BytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::ObjectType,
|
|
|
|
3, c->register_(t->arch->thread()),
|
|
|
|
c->constant(type, Compiler::IntegerType), length));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case nop: break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case pop_:
|
|
|
|
frame->pop(1);
|
|
|
|
break;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case pop2:
|
|
|
|
frame->pop(2);
|
|
|
|
break;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case putfield:
|
|
|
|
case putstatic: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object field = resolveField(t, context->method, index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-17 22:38:59 +00:00
|
|
|
object staticTable = 0;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == putstatic) {
|
2009-01-10 19:25:52 +00:00
|
|
|
assert(t, fieldFlags(t, field) & ACC_STATIC);
|
|
|
|
|
2008-11-30 04:58:09 +00:00
|
|
|
if (fieldClass(t, field) != methodClass(t, context->method)
|
|
|
|
and classNeedsInit(t, fieldClass(t, field)))
|
2008-04-23 22:56:02 +00:00
|
|
|
{
|
2008-03-16 19:38:43 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, tryInitClassThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-03-16 19:38:43 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
2, c->register_(t->arch->thread()),
|
|
|
|
frame->append(fieldClass(t, field)));
|
2008-04-23 22:56:02 +00:00
|
|
|
}
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
staticTable = classStaticTable(t, fieldClass(t, field));
|
2009-01-10 19:25:52 +00:00
|
|
|
} else {
|
|
|
|
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
2009-02-09 23:22:01 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 3)) {
|
|
|
|
c->saveLocals();
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-12 22:06:33 +00:00
|
|
|
|
2010-03-02 01:24:25 +00:00
|
|
|
if (fieldFlags(t, field) & ACC_VOLATILE) {
|
|
|
|
if (BytesPerWord == 4
|
|
|
|
and (fieldCode(t, field) == DoubleField
|
|
|
|
or fieldCode(t, field) == LongField))
|
|
|
|
{
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, acquireMonitorForObjectThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()), frame->append(field));
|
|
|
|
} else {
|
|
|
|
c->storeStoreBarrier();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* value;
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (fieldCode(t, field)) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField: {
|
|
|
|
value = frame->popInt();
|
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case DoubleField:
|
|
|
|
case LongField: {
|
|
|
|
value = frame->popLong();
|
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ObjectField: {
|
2007-12-12 22:19:13 +00:00
|
|
|
value = frame->popObject();
|
2007-09-26 23:23:03 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default: abort(t);
|
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* table;
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == putstatic) {
|
|
|
|
table = frame->append(staticTable);
|
|
|
|
} else {
|
|
|
|
table = frame->popObject();
|
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (fieldCode(t, field)) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(BytesPerWord, value, 1, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1));
|
2007-10-17 01:21:35 +00:00
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(BytesPerWord, value, 2, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1));
|
2007-10-17 01:21:35 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
case FloatField:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(BytesPerWord, value, 4, c->memory
|
|
|
|
(table, Compiler::FloatType, fieldOffset(t, field), 0, 1));
|
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case IntField:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(BytesPerWord, value, 4, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1));
|
2007-10-10 17:26:28 +00:00
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case DoubleField:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
|
|
|
(table, Compiler::FloatType, fieldOffset(t, field), 0, 1));
|
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case LongField:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
|
|
|
(table, Compiler::IntegerType, fieldOffset(t, field), 0, 1));
|
2007-10-10 17:26:28 +00:00
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ObjectField:
|
2007-12-31 22:40:56 +00:00
|
|
|
if (instruction == putfield) {
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, setMaybeNullThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
4, c->register_(t->arch->thread()), table,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->constant(fieldOffset(t, field), Compiler::IntegerType), value);
|
2007-12-30 22:24:48 +00:00
|
|
|
} else {
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, setThunk), Compiler::AddressType),
|
|
|
|
0, 0, 0, Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
4, c->register_(t->arch->thread()), table,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->constant(fieldOffset(t, field), Compiler::IntegerType), value);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default: abort(t);
|
|
|
|
}
|
2009-03-03 03:18:15 +00:00
|
|
|
|
|
|
|
if (fieldFlags(t, field) & ACC_VOLATILE) {
|
2009-03-04 01:02:11 +00:00
|
|
|
if (BytesPerWord == 4
|
|
|
|
and (fieldCode(t, field) == DoubleField
|
|
|
|
or fieldCode(t, field) == LongField))
|
|
|
|
{
|
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, releaseMonitorForObjectThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
2010-01-28 00:46:04 +00:00
|
|
|
c->register_(t->arch->thread()), frame->append(field));
|
2009-03-04 01:02:11 +00:00
|
|
|
} else {
|
|
|
|
c->storeLoadBarrier();
|
|
|
|
}
|
2009-03-03 03:18:15 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
case ret: {
|
|
|
|
unsigned index = codeBody(t, code, ip);
|
|
|
|
frame->returnFromSubroutine(index);
|
|
|
|
} return;
|
2007-12-26 23:59:55 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case return_:
|
2009-04-26 01:51:33 +00:00
|
|
|
if (needsReturnBarrier(t, context->method)) {
|
|
|
|
c->storeStoreBarrier();
|
2009-04-19 22:36:11 +00:00
|
|
|
}
|
2009-04-26 01:51:33 +00:00
|
|
|
|
|
|
|
handleExit(t, frame);
|
|
|
|
c->return_(0, 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
return;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case sipush:
|
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(static_cast<int16_t>(codeReadInt16(t, code, ip)),
|
|
|
|
Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case swap:
|
|
|
|
frame->swap();
|
|
|
|
break;
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case tableswitch: {
|
|
|
|
int32_t base = ip - 1;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = (ip + 3) & ~3; // pad to four byte boundary
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
|
|
|
assert(t, defaultIp < codeLength(t, code));
|
|
|
|
|
|
|
|
int32_t bottom = codeReadInt32(t, code, ip);
|
|
|
|
int32_t top = codeReadInt32(t, code, ip);
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* start = 0;
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uint32_t, ipTable, top - bottom + 1);
|
2007-12-16 00:24:15 +00:00
|
|
|
for (int32_t i = 0; i < top - bottom + 1; ++i) {
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned index = ip + (i * 4);
|
|
|
|
uint32_t newIp = base + codeReadInt32(t, code, index);
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(ipTable)[i] = newIp;
|
2007-12-16 00:24:15 +00:00
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
Promise* p = c->poolAppendPromise
|
|
|
|
(frame->addressPromise(c->machineIp(newIp)));
|
2007-12-09 22:45:43 +00:00
|
|
|
if (i == 0) {
|
2008-12-02 16:45:20 +00:00
|
|
|
start = frame->addressOperand(p);
|
2007-09-26 23:23:03 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-12-16 00:24:15 +00:00
|
|
|
assert(t, start);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2008-04-19 22:13:57 +00:00
|
|
|
Compiler::Operand* key = frame->popInt();
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLess(4, c->constant(bottom, Compiler::IntegerType), key,
|
|
|
|
frame->machineIp(defaultIp));
|
2008-09-20 23:42:46 +00:00
|
|
|
|
2008-11-07 00:39:38 +00:00
|
|
|
c->save(1, key);
|
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, defaultIp);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreater(4, c->constant(top, Compiler::IntegerType), key,
|
|
|
|
frame->machineIp(defaultIp));
|
2008-09-20 23:42:46 +00:00
|
|
|
|
2008-11-07 00:39:38 +00:00
|
|
|
c->save(1, key);
|
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, defaultIp);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-03-07 00:11:14 +00:00
|
|
|
Compiler::Operand* normalizedKey
|
2009-09-20 21:43:32 +00:00
|
|
|
= (bottom
|
|
|
|
? c->sub(4, c->constant(bottom, Compiler::IntegerType), key) : key);
|
2009-03-07 00:11:14 +00:00
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
c->jmp
|
|
|
|
(c->load
|
|
|
|
(BytesPerWord, BytesPerWord, c->memory
|
|
|
|
(start, Compiler::AddressType, 0, normalizedKey, BytesPerWord),
|
|
|
|
BytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-09-13 21:09:26 +00:00
|
|
|
Compiler::State* state = c->saveState();
|
|
|
|
|
2007-12-16 00:24:15 +00:00
|
|
|
for (int32_t i = 0; i < top - bottom + 1; ++i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, frame, RUNTIME_ARRAY_BODY(ipTable)[i]);
|
2007-12-16 00:24:15 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
2008-09-13 21:09:26 +00:00
|
|
|
|
|
|
|
c->restoreState(state);
|
2007-12-16 00:24:15 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
ip = defaultIp;
|
|
|
|
} break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
case wide: {
|
|
|
|
switch (codeBody(t, code, ip++)) {
|
|
|
|
case aload: {
|
|
|
|
frame->loadObject(codeReadInt16(t, code, ip));
|
2007-10-04 22:41:19 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore: {
|
|
|
|
frame->storeObject(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iinc: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
uint16_t count = codeReadInt16(t, code, ip);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-05-15 02:08:01 +00:00
|
|
|
storeLocal
|
|
|
|
(context, 1,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->add
|
|
|
|
(4, c->constant(count, Compiler::IntegerType),
|
|
|
|
loadLocal(context, 1, index)),
|
2009-05-03 20:57:11 +00:00
|
|
|
index);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload: {
|
|
|
|
frame->loadInt(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore: {
|
|
|
|
frame->storeInt(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload: {
|
|
|
|
frame->loadLong(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore: {
|
|
|
|
frame->storeLong(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-12 14:26:36 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
case ret: {
|
|
|
|
unsigned index = codeReadInt16(t, code, ip);
|
|
|
|
c->jmp(loadLocal(context, 1, index));
|
|
|
|
frame->returnFromSubroutine(index);
|
|
|
|
} return;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default: abort(t);
|
|
|
|
}
|
|
|
|
} break;
|
2007-12-26 19:19:45 +00:00
|
|
|
|
|
|
|
default: abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-06-11 23:14:54 +00:00
|
|
|
FILE* compileLog = 0;
|
|
|
|
|
2007-12-26 16:56:14 +00:00
|
|
|
void
|
2008-11-11 15:20:49 +00:00
|
|
|
logCompile(MyThread* t, const void* code, unsigned size, const char* class_,
|
2008-01-07 22:04:53 +00:00
|
|
|
const char* name, const char* spec)
|
2007-12-26 16:56:14 +00:00
|
|
|
{
|
2008-11-11 15:20:49 +00:00
|
|
|
static bool open = false;
|
|
|
|
if (not open) {
|
|
|
|
open = true;
|
|
|
|
const char* path = findProperty(t, "avian.jit.log");
|
2008-11-11 16:17:11 +00:00
|
|
|
if (path) {
|
2009-08-27 00:26:44 +00:00
|
|
|
compileLog = vm::fopen(path, "wb");
|
2008-11-11 15:20:49 +00:00
|
|
|
} else if (DebugCompile) {
|
2009-06-11 23:14:54 +00:00
|
|
|
compileLog = stderr;
|
2008-11-11 15:20:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-11 23:14:54 +00:00
|
|
|
if (compileLog) {
|
|
|
|
fprintf(compileLog, "%p %p %s.%s%s\n",
|
2008-11-11 15:20:49 +00:00
|
|
|
code, static_cast<const uint8_t*>(code) + size,
|
|
|
|
class_, name, spec);
|
|
|
|
}
|
2007-12-26 16:56:14 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
void
|
2009-08-10 13:56:16 +00:00
|
|
|
translateExceptionHandlerTable(MyThread* t, Compiler* c, object method,
|
2008-04-11 19:03:40 +00:00
|
|
|
intptr_t start)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2009-08-10 13:56:16 +00:00
|
|
|
object oldTable = codeExceptionHandlerTable(t, methodCode(t, method));
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
if (oldTable) {
|
2009-08-10 13:56:16 +00:00
|
|
|
PROTECT(t, method);
|
2008-01-07 14:51:07 +00:00
|
|
|
PROTECT(t, oldTable);
|
|
|
|
|
|
|
|
unsigned length = exceptionHandlerTableLength(t, oldTable);
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
object newIndex = makeIntArray(t, length * 3);
|
2008-04-11 19:03:40 +00:00
|
|
|
PROTECT(t, newIndex);
|
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
object newTable = makeArray(t, length + 1);
|
2008-04-24 22:06:36 +00:00
|
|
|
PROTECT(t, newTable);
|
|
|
|
|
2008-04-11 19:03:40 +00:00
|
|
|
set(t, newTable, ArrayBody, newIndex);
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
for (unsigned i = 0; i < length; ++i) {
|
|
|
|
ExceptionHandler* oldHandler = exceptionHandlerTableBody
|
|
|
|
(t, oldTable, i);
|
|
|
|
|
2008-04-11 19:03:40 +00:00
|
|
|
intArrayBody(t, newIndex, i * 3)
|
2008-02-11 17:21:41 +00:00
|
|
|
= c->machineIp(exceptionHandlerStart(oldHandler))->value() - start;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2008-04-11 19:03:40 +00:00
|
|
|
intArrayBody(t, newIndex, (i * 3) + 1)
|
2008-02-11 17:21:41 +00:00
|
|
|
= c->machineIp(exceptionHandlerEnd(oldHandler))->value() - start;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2008-04-11 19:03:40 +00:00
|
|
|
intArrayBody(t, newIndex, (i * 3) + 2)
|
2008-02-11 17:21:41 +00:00
|
|
|
= c->machineIp(exceptionHandlerIp(oldHandler))->value() - start;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2008-04-24 22:06:36 +00:00
|
|
|
object type;
|
|
|
|
if (exceptionHandlerCatchType(oldHandler)) {
|
|
|
|
type = resolveClassInPool
|
2009-08-10 13:56:16 +00:00
|
|
|
(t, method, exceptionHandlerCatchType(oldHandler) - 1);
|
2008-04-24 22:06:36 +00:00
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
} else {
|
|
|
|
type = 0;
|
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
|
|
|
|
set(t, newTable, ArrayBody + ((i + 1) * BytesPerWord), type);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
set(t, methodCode(t, method), CodeExceptionHandlerTable, newTable);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-04-11 19:03:40 +00:00
|
|
|
translateLineNumberTable(MyThread* t, Compiler* c, object code, intptr_t start)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
|
|
|
object oldTable = codeLineNumberTable(t, code);
|
|
|
|
if (oldTable) {
|
|
|
|
PROTECT(t, code);
|
|
|
|
PROTECT(t, oldTable);
|
|
|
|
|
|
|
|
unsigned length = lineNumberTableLength(t, oldTable);
|
2009-03-04 03:05:48 +00:00
|
|
|
object newTable = makeLineNumberTable(t, length);
|
2008-01-07 14:51:07 +00:00
|
|
|
for (unsigned i = 0; i < length; ++i) {
|
|
|
|
LineNumber* oldLine = lineNumberTableBody(t, oldTable, i);
|
|
|
|
LineNumber* newLine = lineNumberTableBody(t, newTable, i);
|
|
|
|
|
|
|
|
lineNumberIp(newLine)
|
2008-02-11 17:21:41 +00:00
|
|
|
= c->machineIp(lineNumberIp(oldLine))->value() - start;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
lineNumberLine(newLine) = lineNumberLine(oldLine);
|
|
|
|
}
|
|
|
|
|
|
|
|
set(t, code, CodeLineNumberTable, newTable);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-11-25 23:01:30 +00:00
|
|
|
printSet(uintptr_t m, unsigned limit)
|
2008-01-07 16:01:35 +00:00
|
|
|
{
|
2008-11-25 23:01:30 +00:00
|
|
|
if (limit) {
|
|
|
|
for (unsigned i = 0; i < 16; ++i) {
|
|
|
|
if ((m >> i) & 1) {
|
|
|
|
fprintf(stderr, "1");
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "_");
|
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
void
|
|
|
|
calculateTryCatchRoots(Context* context, SubroutinePath* subroutinePath,
|
|
|
|
uintptr_t* roots, unsigned mapSize, unsigned start,
|
|
|
|
unsigned end)
|
|
|
|
{
|
|
|
|
memset(roots, 0xFF, mapSize * BytesPerWord);
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "calculate try/catch roots from %d to %d", start, end);
|
|
|
|
if (subroutinePath) {
|
|
|
|
fprintf(stderr, " ");
|
|
|
|
print(subroutinePath);
|
|
|
|
}
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (TraceElement* te = context->traceLog; te; te = te->next) {
|
|
|
|
if (te->ip >= start and te->ip < end) {
|
|
|
|
uintptr_t* traceRoots = 0;
|
|
|
|
if (subroutinePath == 0) {
|
|
|
|
traceRoots = te->map;
|
|
|
|
te->watch = true;
|
|
|
|
} else {
|
|
|
|
for (SubroutineTrace* t = te->subroutineTrace; t; t = t->next) {
|
|
|
|
if (t->path == subroutinePath) {
|
|
|
|
traceRoots = t->map;
|
|
|
|
t->watch = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (traceRoots) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, " use roots at ip %3d: ", te->ip);
|
|
|
|
printSet(*traceRoots, mapSize);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
|
|
|
roots[wi] &= traceRoots[wi];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, " skip roots at ip %3d\n", te->ip);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "result roots : ");
|
|
|
|
printSet(*roots, mapSize);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-07 16:01:35 +00:00
|
|
|
unsigned
|
2008-01-20 23:03:28 +00:00
|
|
|
calculateFrameMaps(MyThread* t, Context* context, uintptr_t* originalRoots,
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned eventIndex, SubroutinePath* subroutinePath = 0)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2008-01-20 23:03:28 +00:00
|
|
|
// for each instruction with more than one predecessor, and for each
|
|
|
|
// stack position, determine if there exists a path to that
|
|
|
|
// instruction such that there is not an object pointer left at that
|
|
|
|
// stack position (i.e. it is uninitialized or contains primitive
|
|
|
|
// data).
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned mapSize = frameMapSizeInWords(t, context->method);
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uintptr_t, roots, mapSize);
|
2008-01-20 23:03:28 +00:00
|
|
|
if (originalRoots) {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(RUNTIME_ARRAY_BODY(roots), originalRoots, mapSize * BytesPerWord);
|
2008-01-20 23:03:28 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
memset(RUNTIME_ARRAY_BODY(roots), 0, mapSize * BytesPerWord);
|
2008-01-20 23:03:28 +00:00
|
|
|
}
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
int32_t ip = -1;
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
// invariant: for each stack position, roots contains a zero at that
|
|
|
|
// position if there exists some path to the current instruction
|
|
|
|
// such that there is definitely not an object pointer at that
|
|
|
|
// position. Otherwise, roots contains a one at that position,
|
|
|
|
// meaning either all known paths result in an object pointer at
|
|
|
|
// that position, or the contents of that position are as yet
|
|
|
|
// unknown.
|
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
unsigned length = context->eventLog.length();
|
|
|
|
while (eventIndex < length) {
|
2008-01-20 23:03:28 +00:00
|
|
|
Event e = static_cast<Event>(context->eventLog.get(eventIndex++));
|
2008-01-07 14:51:07 +00:00
|
|
|
switch (e) {
|
2008-07-05 20:21:13 +00:00
|
|
|
case PushContextEvent: {
|
2009-07-08 14:18:40 +00:00
|
|
|
eventIndex = calculateFrameMaps
|
2009-08-27 00:26:44 +00:00
|
|
|
(t, context, RUNTIME_ARRAY_BODY(roots), eventIndex, subroutinePath);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
case PopContextEvent:
|
2008-01-20 23:03:28 +00:00
|
|
|
return eventIndex;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
case IpEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
ip = context->eventLog.get2(eventIndex);
|
2008-03-05 21:44:17 +00:00
|
|
|
eventIndex += 2;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2008-01-07 21:32:41 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " roots at ip %3d: ", ip);
|
2009-08-27 00:26:44 +00:00
|
|
|
printSet(*RUNTIME_ARRAY_BODY(roots), mapSize);
|
2008-01-07 21:32:41 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
2009-07-08 14:18:40 +00:00
|
|
|
uintptr_t* tableRoots
|
|
|
|
= (subroutinePath ? subroutinePath->rootTable : context->rootTable)
|
|
|
|
+ (ip * mapSize);
|
2008-01-20 23:03:28 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
if (context->visitTable[ip] > 1) {
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
2009-08-27 00:26:44 +00:00
|
|
|
uintptr_t newRoots = tableRoots[wi] & RUNTIME_ARRAY_BODY(roots)[wi];
|
2008-03-05 21:44:17 +00:00
|
|
|
|
|
|
|
if ((eventIndex == length
|
2008-07-05 20:21:13 +00:00
|
|
|
or context->eventLog.get(eventIndex) == PopContextEvent)
|
2008-03-05 21:44:17 +00:00
|
|
|
and newRoots != tableRoots[wi])
|
|
|
|
{
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "dirty roots!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
context->dirtyRoots = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tableRoots[wi] = newRoots;
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(roots)[wi] &= tableRoots[wi];
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 16:01:35 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " table roots at ip %3d: ", ip);
|
2008-11-25 23:01:30 +00:00
|
|
|
printSet(*tableRoots, mapSize);
|
2008-01-07 16:01:35 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
2008-01-20 23:03:28 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(tableRoots, RUNTIME_ARRAY_BODY(roots), mapSize * BytesPerWord);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case MarkEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
unsigned i = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
markBit(RUNTIME_ARRAY_BODY(roots), i);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case ClearEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
unsigned i = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
2008-03-05 21:44:17 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
clearBit(RUNTIME_ARRAY_BODY(roots), i);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
case PushExceptionHandlerEvent: {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
unsigned start = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
|
|
|
unsigned end = context->eventLog.get2(eventIndex);
|
2009-06-16 19:41:31 +00:00
|
|
|
eventIndex += 2;
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
if (context->subroutineTable and context->subroutineTable[start]) {
|
|
|
|
Subroutine* s = context->subroutineTable[start];
|
2009-07-13 23:49:15 +00:00
|
|
|
unsigned originalEventIndex = eventIndex;
|
|
|
|
|
|
|
|
for (SubroutineCall* c = s->calls; c; c = c->next) {
|
|
|
|
for (SubroutinePath* p = c->paths; p; p = p->listNext) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
calculateTryCatchRoots
|
|
|
|
(context, p, RUNTIME_ARRAY_BODY(roots), mapSize, start, end);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
|
|
|
eventIndex = calculateFrameMaps
|
2009-08-27 00:26:44 +00:00
|
|
|
(t, context, RUNTIME_ARRAY_BODY(roots), originalEventIndex, p);
|
2009-07-13 23:49:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
calculateTryCatchRoots
|
|
|
|
(context, 0, RUNTIME_ARRAY_BODY(roots), mapSize, start, end);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
eventIndex = calculateFrameMaps
|
|
|
|
(t, context, RUNTIME_ARRAY_BODY(roots), eventIndex, 0);
|
2009-07-13 23:49:15 +00:00
|
|
|
}
|
2009-06-16 19:41:31 +00:00
|
|
|
} break;
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
case TraceEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
TraceElement* te; context->eventLog.get(eventIndex, &te, BytesPerWord);
|
2008-11-09 23:56:37 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " trace roots at ip %3d: ", ip);
|
2009-08-27 00:26:44 +00:00
|
|
|
printSet(*RUNTIME_ARRAY_BODY(roots), mapSize);
|
2009-07-13 23:49:15 +00:00
|
|
|
if (subroutinePath) {
|
|
|
|
fprintf(stderr, " ");
|
|
|
|
print(subroutinePath);
|
|
|
|
}
|
2008-11-09 23:56:37 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
|
|
|
|
uintptr_t* map;
|
|
|
|
bool watch;
|
2009-06-26 21:36:04 +00:00
|
|
|
if (subroutinePath == 0) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
map = te->map;
|
|
|
|
watch = te->watch;
|
2009-06-26 21:36:04 +00:00
|
|
|
} else {
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutineTrace* trace = 0;
|
|
|
|
for (SubroutineTrace* t = te->subroutineTrace; t; t = t->next) {
|
|
|
|
if (t->path == subroutinePath) {
|
|
|
|
trace = t;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
break;
|
2009-07-13 23:49:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (trace == 0) {
|
|
|
|
te->subroutineTrace = trace = new
|
|
|
|
(context->zone.allocate
|
|
|
|
(sizeof(SubroutineTrace) + (mapSize * BytesPerWord)))
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
SubroutineTrace(subroutinePath, te->subroutineTrace, mapSize);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
++ te->subroutineTraceCount;
|
|
|
|
}
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
map = trace->map;
|
|
|
|
watch = trace->watch;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
|
|
|
uintptr_t v = RUNTIME_ARRAY_BODY(roots)[wi];
|
|
|
|
|
|
|
|
if (watch and map[wi] != v) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "dirty roots due to trace watch!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
context->dirtyRoots = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
map[wi] = v;
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2008-01-20 23:03:28 +00:00
|
|
|
eventIndex += BytesPerWord;
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
case PushSubroutineEvent: {
|
|
|
|
SubroutineCall* call;
|
|
|
|
context->eventLog.get(eventIndex, &call, BytesPerWord);
|
|
|
|
eventIndex += BytesPerWord;
|
2009-07-08 14:18:40 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned nextIndex = context->eventLog.get2(eventIndex);
|
2009-07-08 14:18:40 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
eventIndex = nextIndex;
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutinePath* path = 0;
|
|
|
|
for (SubroutinePath* p = call->paths; p; p = p->listNext) {
|
|
|
|
if (p->stackNext == subroutinePath) {
|
|
|
|
path = p;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (path == 0) {
|
|
|
|
path = new (context->zone.allocate(sizeof(SubroutinePath)))
|
|
|
|
SubroutinePath(call, subroutinePath,
|
|
|
|
makeRootTable(t, &(context->zone), context->method));
|
|
|
|
}
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
calculateFrameMaps
|
|
|
|
(t, context, RUNTIME_ARRAY_BODY(roots), call->subroutine->logIndex,
|
|
|
|
path);
|
2009-06-26 21:36:04 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case PopSubroutineEvent:
|
|
|
|
return static_cast<unsigned>(-1);
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
default: abort(t);
|
|
|
|
}
|
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2008-01-20 23:03:28 +00:00
|
|
|
return eventIndex;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
int
|
|
|
|
compareTraceElementPointers(const void* va, const void* vb)
|
|
|
|
{
|
|
|
|
TraceElement* a = *static_cast<TraceElement* const*>(va);
|
|
|
|
TraceElement* b = *static_cast<TraceElement* const*>(vb);
|
2008-04-13 19:48:20 +00:00
|
|
|
if (a->address->value() > b->address->value()) {
|
2008-04-07 23:47:41 +00:00
|
|
|
return 1;
|
2008-04-13 19:48:20 +00:00
|
|
|
} else if (a->address->value() < b->address->value()) {
|
2008-04-07 23:47:41 +00:00
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-11 21:00:18 +00:00
|
|
|
unsigned
|
2009-06-26 21:36:04 +00:00
|
|
|
simpleFrameMapTableSize(MyThread* t, object method, object map)
|
2008-04-11 21:00:18 +00:00
|
|
|
{
|
2009-04-27 14:46:43 +00:00
|
|
|
int size = frameMapSizeInBits(t, method);
|
2008-04-11 21:00:18 +00:00
|
|
|
return ceiling(intArrayLength(t, map) * size, 32 + size);
|
|
|
|
}
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
unsigned
|
|
|
|
codeSingletonSizeInBytes(MyThread*, unsigned codeSizeInBytes)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-05-31 22:14:27 +00:00
|
|
|
unsigned count = ceiling(codeSizeInBytes, BytesPerWord);
|
|
|
|
unsigned size = count + singletonMaskSize(count);
|
|
|
|
return pad(SingletonBody + (size * BytesPerWord));
|
|
|
|
}
|
2007-12-18 02:09:32 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uint8_t*
|
|
|
|
finish(MyThread* t, Allocator* allocator, Assembler* a, const char* name)
|
2008-02-11 17:21:41 +00:00
|
|
|
{
|
2008-11-23 23:58:01 +00:00
|
|
|
uint8_t* start = static_cast<uint8_t*>
|
|
|
|
(allocator->allocate(pad(a->length())));
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
a->writeTo(start);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2008-11-11 15:20:49 +00:00
|
|
|
logCompile(t, start, a->length(), 0, name, 0);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
return start;
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-12 14:26:36 +00:00
|
|
|
|
2008-11-09 23:56:37 +00:00
|
|
|
void
|
2009-06-26 21:36:04 +00:00
|
|
|
setBit(int32_t* dst, unsigned index)
|
2008-11-09 23:56:37 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
dst[index / 32] |= static_cast<int32_t>(1) << (index % 32);
|
2008-11-09 23:56:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-06-26 21:36:04 +00:00
|
|
|
clearBit(int32_t* dst, unsigned index)
|
2008-11-09 23:56:37 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
dst[index / 32] &= ~(static_cast<int32_t>(1) << (index % 32));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
copyFrameMap(int32_t* dst, uintptr_t* src, unsigned mapSizeInBits,
|
|
|
|
unsigned offset, TraceElement* p,
|
|
|
|
SubroutinePath* subroutinePath)
|
|
|
|
{
|
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " orig roots at ip %3d: ", p->ip);
|
2009-06-26 21:36:04 +00:00
|
|
|
printSet(src[0], ceiling(mapSizeInBits, BitsPerWord));
|
|
|
|
print(subroutinePath);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " final roots at ip %3d: ", p->ip);
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < p->argumentIndex; ++j) {
|
|
|
|
if (getBit(src, j)) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "1");
|
|
|
|
}
|
|
|
|
setBit(dst, offset + j);
|
|
|
|
} else {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "_");
|
|
|
|
}
|
|
|
|
clearBit(dst, offset + j);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
print(subroutinePath);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class FrameMapTableHeader {
|
|
|
|
public:
|
|
|
|
FrameMapTableHeader(unsigned indexCount):
|
|
|
|
indexCount(indexCount)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
unsigned indexCount;
|
|
|
|
};
|
|
|
|
|
|
|
|
class FrameMapTableIndexElement {
|
|
|
|
public:
|
|
|
|
FrameMapTableIndexElement(int offset, unsigned base, unsigned path):
|
|
|
|
offset(offset),
|
|
|
|
base(base),
|
|
|
|
path(path)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
int offset;
|
|
|
|
unsigned base;
|
|
|
|
unsigned path;
|
|
|
|
};
|
|
|
|
|
|
|
|
class FrameMapTablePath {
|
|
|
|
public:
|
|
|
|
FrameMapTablePath(unsigned stackIndex, unsigned elementCount, unsigned next):
|
|
|
|
stackIndex(stackIndex),
|
|
|
|
elementCount(elementCount),
|
|
|
|
next(next)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
unsigned stackIndex;
|
|
|
|
unsigned elementCount;
|
|
|
|
unsigned next;
|
|
|
|
int32_t elements[0];
|
|
|
|
};
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
int
|
|
|
|
compareInt32s(const void* va, const void* vb)
|
|
|
|
{
|
|
|
|
return *static_cast<int32_t const*>(va) - *static_cast<int32_t const*>(vb);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
compare(SubroutinePath* a, SubroutinePath* b)
|
|
|
|
{
|
|
|
|
if (a->stackNext) {
|
|
|
|
int d = compare(a->stackNext, b->stackNext);
|
|
|
|
if (d) return d;
|
|
|
|
}
|
|
|
|
int64_t av = a->call->returnAddress->value();
|
|
|
|
int64_t bv = b->call->returnAddress->value();
|
|
|
|
if (av > bv) {
|
|
|
|
return 1;
|
|
|
|
} else if (av < bv) {
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
compareSubroutineTracePointers(const void* va, const void* vb)
|
|
|
|
{
|
|
|
|
return compare((*static_cast<SubroutineTrace* const*>(va))->path,
|
|
|
|
(*static_cast<SubroutineTrace* const*>(vb))->path);
|
|
|
|
}
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
object
|
|
|
|
makeGeneralFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
|
|
|
TraceElement** elements, unsigned pathFootprint,
|
|
|
|
unsigned mapCount)
|
|
|
|
{
|
|
|
|
unsigned mapSize = frameMapSizeInBits(t, context->method);
|
|
|
|
unsigned indexOffset = sizeof(FrameMapTableHeader);
|
|
|
|
unsigned mapsOffset = indexOffset
|
|
|
|
+ (context->traceLogCount * sizeof(FrameMapTableIndexElement));
|
|
|
|
unsigned pathsOffset = mapsOffset + (ceiling(mapCount * mapSize, 32) * 4);
|
|
|
|
|
|
|
|
object table = makeByteArray(t, pathsOffset + pathFootprint);
|
|
|
|
|
|
|
|
int8_t* body = &byteArrayBody(t, table, 0);
|
|
|
|
new (body) FrameMapTableHeader(context->traceLogCount);
|
|
|
|
|
|
|
|
unsigned nextTableIndex = pathsOffset;
|
|
|
|
unsigned nextMapIndex = 0;
|
|
|
|
for (unsigned i = 0; i < context->traceLogCount; ++i) {
|
|
|
|
TraceElement* p = elements[i];
|
|
|
|
unsigned mapBase = nextMapIndex;
|
|
|
|
|
|
|
|
unsigned pathIndex;
|
|
|
|
if (p->subroutineTrace) {
|
|
|
|
FrameMapTablePath* previous = 0;
|
|
|
|
Subroutine* subroutine = p->subroutineTrace->path->call->subroutine;
|
2009-07-13 23:49:15 +00:00
|
|
|
for (Subroutine* s = subroutine; s; s = s->stackNext) {
|
2009-07-08 14:18:40 +00:00
|
|
|
if (s->tableIndex == 0) {
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned pathObjectSize = sizeof(FrameMapTablePath)
|
|
|
|
+ (sizeof(int32_t) * s->callCount);
|
|
|
|
|
|
|
|
assert(t, nextTableIndex + pathObjectSize
|
|
|
|
<= byteArrayLength(t, table));
|
|
|
|
|
|
|
|
s->tableIndex = nextTableIndex;
|
|
|
|
|
|
|
|
nextTableIndex += pathObjectSize;
|
|
|
|
|
|
|
|
FrameMapTablePath* current = new (body + s->tableIndex)
|
|
|
|
FrameMapTablePath
|
2009-07-13 23:49:15 +00:00
|
|
|
(s->stackIndex, s->callCount,
|
|
|
|
s->stackNext ? s->stackNext->tableIndex : 0);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
unsigned i = 0;
|
|
|
|
for (SubroutineCall* c = subroutine->calls; c; c = c->next) {
|
|
|
|
assert(t, i < s->callCount);
|
|
|
|
|
2009-07-08 14:18:40 +00:00
|
|
|
current->elements[i++]
|
2009-06-26 21:36:04 +00:00
|
|
|
= static_cast<intptr_t>(c->returnAddress->value())
|
|
|
|
- reinterpret_cast<intptr_t>(start);
|
|
|
|
}
|
2009-07-13 23:49:15 +00:00
|
|
|
assert(t, i == s->callCount);
|
|
|
|
|
|
|
|
qsort(current->elements, s->callCount, sizeof(int32_t),
|
|
|
|
compareInt32s);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
if (previous) {
|
|
|
|
previous->next = s->tableIndex;
|
|
|
|
}
|
|
|
|
|
|
|
|
previous = current;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pathIndex = subroutine->tableIndex;
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(SubroutineTrace*, traces, p->subroutineTraceCount);
|
2009-07-13 23:49:15 +00:00
|
|
|
unsigned i = 0;
|
2009-06-26 21:36:04 +00:00
|
|
|
for (SubroutineTrace* trace = p->subroutineTrace;
|
|
|
|
trace; trace = trace->next)
|
|
|
|
{
|
2009-07-13 23:49:15 +00:00
|
|
|
assert(t, i < p->subroutineTraceCount);
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(traces)[i++] = trace;
|
2009-07-13 23:49:15 +00:00
|
|
|
}
|
|
|
|
assert(t, i == p->subroutineTraceCount);
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
qsort(RUNTIME_ARRAY_BODY(traces), p->subroutineTraceCount,
|
|
|
|
sizeof(SubroutineTrace*), compareSubroutineTracePointers);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < p->subroutineTraceCount; ++i) {
|
|
|
|
assert(t, mapsOffset + ceiling(nextMapIndex + mapSize, 32) * 4
|
|
|
|
<= pathsOffset);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
copyFrameMap(reinterpret_cast<int32_t*>(body + mapsOffset),
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(traces)[i]->map, mapSize,
|
|
|
|
nextMapIndex, p, RUNTIME_ARRAY_BODY(traces)[i]->path);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
nextMapIndex += mapSize;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pathIndex = 0;
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
assert(t, mapsOffset + ceiling(nextMapIndex + mapSize, 32) * 4
|
|
|
|
<= pathsOffset);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
copyFrameMap(reinterpret_cast<int32_t*>(body + mapsOffset), p->map,
|
|
|
|
mapSize, nextMapIndex, p, 0);
|
|
|
|
|
|
|
|
nextMapIndex += mapSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned elementIndex = indexOffset
|
|
|
|
+ (i * sizeof(FrameMapTableIndexElement));
|
|
|
|
|
|
|
|
assert(t, elementIndex + sizeof(FrameMapTableIndexElement) <= mapsOffset);
|
|
|
|
|
|
|
|
new (body + elementIndex) FrameMapTableIndexElement
|
|
|
|
(static_cast<intptr_t>(p->address->value())
|
|
|
|
- reinterpret_cast<intptr_t>(start), mapBase, pathIndex);
|
|
|
|
}
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
assert(t, nextMapIndex == mapCount * mapSize);
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
makeSimpleFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
|
|
|
TraceElement** elements)
|
|
|
|
{
|
|
|
|
unsigned mapSize = frameMapSizeInBits(t, context->method);
|
|
|
|
object table = makeIntArray
|
|
|
|
(t, context->traceLogCount
|
|
|
|
+ ceiling(context->traceLogCount * mapSize, 32));
|
|
|
|
|
|
|
|
assert(t, intArrayLength(t, table) == context->traceLogCount
|
|
|
|
+ simpleFrameMapTableSize(t, context->method, table));
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < context->traceLogCount; ++i) {
|
|
|
|
TraceElement* p = elements[i];
|
|
|
|
|
|
|
|
intArrayBody(t, table, i) = static_cast<intptr_t>(p->address->value())
|
|
|
|
- reinterpret_cast<intptr_t>(start);
|
|
|
|
|
|
|
|
assert(t, context->traceLogCount + ceiling((i + 1) * mapSize, 32)
|
|
|
|
<= intArrayLength(t, table));
|
|
|
|
|
2009-06-30 23:35:28 +00:00
|
|
|
if (mapSize) {
|
|
|
|
copyFrameMap(&intArrayBody(t, table, context->traceLogCount), p->map,
|
|
|
|
mapSize, i * mapSize, p, 0);
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return table;
|
2008-11-09 23:56:37 +00:00
|
|
|
}
|
2009-10-20 14:20:49 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uint8_t*
|
|
|
|
finish(MyThread* t, Allocator* allocator, Context* context)
|
2008-02-11 17:21:41 +00:00
|
|
|
{
|
|
|
|
Compiler* c = context->compiler;
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-10-24 23:18:56 +00:00
|
|
|
if (false) {
|
|
|
|
logCompile
|
|
|
|
(t, 0, 0,
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, context->method), 0)),
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// for debugging:
|
|
|
|
if (false and
|
|
|
|
::strcmp
|
|
|
|
(reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
"java/lang/System") == 0 and
|
2009-10-24 23:18:56 +00:00
|
|
|
::strcmp
|
|
|
|
(reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, context->method), 0)),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
"<clinit>") == 0)
|
2009-10-24 23:18:56 +00:00
|
|
|
{
|
|
|
|
trap();
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
unsigned codeSize = c->compile();
|
2008-11-23 23:58:01 +00:00
|
|
|
uintptr_t* code = static_cast<uintptr_t*>
|
2008-11-29 01:23:01 +00:00
|
|
|
(allocator->allocate(pad(codeSize) + pad(c->poolSize()) + BytesPerWord));
|
2008-11-23 23:58:01 +00:00
|
|
|
code[0] = codeSize;
|
|
|
|
uint8_t* start = reinterpret_cast<uint8_t*>(code + 1);
|
|
|
|
|
|
|
|
if (context->objectPool) {
|
|
|
|
object pool = allocate3
|
|
|
|
(t, allocator, Machine::ImmortalAllocation,
|
2008-11-29 01:23:01 +00:00
|
|
|
FixedSizeOfArray + ((context->objectPoolCount + 1) * BytesPerWord),
|
|
|
|
true);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
initArray(t, pool, context->objectPoolCount + 1);
|
2008-11-29 01:23:01 +00:00
|
|
|
mark(t, pool, 0);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
set(t, pool, ArrayBody, root(t, ObjectPools));
|
|
|
|
setRoot(t, ObjectPools, pool);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
|
|
|
unsigned i = 1;
|
|
|
|
for (PoolElement* p = context->objectPool; p; p = p->next) {
|
|
|
|
unsigned offset = ArrayBody + ((i++) * BytesPerWord);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
p->address = reinterpret_cast<uintptr_t>(pool) + offset;
|
|
|
|
|
|
|
|
set(t, pool, offset, p->target);
|
|
|
|
}
|
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
c->writeTo(start);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
if (bc) {
|
|
|
|
for (DelayedPromise* p = bc->addresses;
|
|
|
|
p != bc->addressSentinal;
|
|
|
|
p = p->next)
|
|
|
|
{
|
|
|
|
p->basis = new (bc->zone->allocate(sizeof(ResolvedPromise)))
|
|
|
|
ResolvedPromise(p->basis->value());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
translateExceptionHandlerTable
|
|
|
|
(t, c, context->method, reinterpret_cast<intptr_t>(start));
|
2008-04-27 22:40:53 +00:00
|
|
|
if (UNLIKELY(t->exception)) return 0;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
translateLineNumberTable(t, c, methodCode(t, context->method),
|
|
|
|
reinterpret_cast<intptr_t>(start));
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
{ object code = methodCode(t, context->method);
|
2008-04-11 21:00:18 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
code = makeCode(t, 0,
|
|
|
|
codeExceptionHandlerTable(t, code),
|
|
|
|
codeLineNumberTable(t, code),
|
|
|
|
codeMaxStack(t, code),
|
|
|
|
codeMaxLocals(t, code),
|
2009-03-04 03:05:48 +00:00
|
|
|
0);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
set(t, context->method, MethodCode, code);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
if (context->traceLogCount) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(TraceElement*, elements, context->traceLogCount);
|
2008-04-13 19:48:20 +00:00
|
|
|
unsigned index = 0;
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned pathFootprint = 0;
|
|
|
|
unsigned mapCount = 0;
|
2008-04-13 19:48:20 +00:00
|
|
|
for (TraceElement* p = context->traceLog; p; p = p->next) {
|
|
|
|
assert(t, index < context->traceLogCount);
|
2008-04-11 21:00:18 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
SubroutineTrace* trace = p->subroutineTrace;
|
|
|
|
unsigned myMapCount = 1;
|
|
|
|
if (trace) {
|
2009-07-13 23:49:15 +00:00
|
|
|
for (Subroutine* s = trace->path->call->subroutine;
|
|
|
|
s; s = s->stackNext)
|
|
|
|
{
|
|
|
|
unsigned callCount = s->callCount;
|
2009-06-26 21:36:04 +00:00
|
|
|
myMapCount *= callCount;
|
2009-07-13 23:49:15 +00:00
|
|
|
if (not s->visited) {
|
|
|
|
s->visited = true;
|
2009-06-26 21:36:04 +00:00
|
|
|
pathFootprint += sizeof(FrameMapTablePath)
|
|
|
|
+ (sizeof(int32_t) * callCount);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
mapCount += myMapCount;
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(elements)[index++] = p;
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
if (p->target) {
|
|
|
|
insertCallNode
|
|
|
|
(t, makeCallNode
|
2009-03-31 20:15:08 +00:00
|
|
|
(t, p->address->value(), p->target, p->flags, 0));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-04-13 19:48:20 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
qsort(RUNTIME_ARRAY_BODY(elements), context->traceLogCount,
|
|
|
|
sizeof(TraceElement*), compareTraceElementPointers);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
object map;
|
|
|
|
if (pathFootprint) {
|
|
|
|
map = makeGeneralFrameMapTable
|
2009-08-27 00:26:44 +00:00
|
|
|
(t, context, start, RUNTIME_ARRAY_BODY(elements), pathFootprint,
|
|
|
|
mapCount);
|
2009-06-26 21:36:04 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
map = makeSimpleFrameMapTable
|
|
|
|
(t, context, start, RUNTIME_ARRAY_BODY(elements));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
set(t, methodCode(t, context->method), CodePool, map);
|
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2008-11-11 15:20:49 +00:00
|
|
|
logCompile
|
|
|
|
(t, start, codeSize,
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, context->method), 0)),
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
|
2008-02-11 17:21:41 +00:00
|
|
|
|
|
|
|
// for debugging:
|
2009-10-04 22:10:36 +00:00
|
|
|
if (false and
|
2009-08-27 00:26:44 +00:00
|
|
|
::strcmp
|
2008-02-11 17:21:41 +00:00
|
|
|
(reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
"java/lang/System") == 0 and
|
2009-08-27 00:26:44 +00:00
|
|
|
::strcmp
|
2008-02-11 17:21:41 +00:00
|
|
|
(reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, context->method), 0)),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
"<clinit>") == 0)
|
2008-02-11 17:21:41 +00:00
|
|
|
{
|
2009-03-03 01:40:06 +00:00
|
|
|
trap();
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2009-03-09 14:26:23 +00:00
|
|
|
syncInstructionCache(start, codeSize);
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
return start;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uint8_t*
|
|
|
|
compile(MyThread* t, Allocator* allocator, Context* context)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler* c = context->compiler;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-01-08 21:23:49 +00:00
|
|
|
// fprintf(stderr, "compiling %s.%s%s\n",
|
|
|
|
// &byteArrayBody(t, className(t, methodClass(t, context->method)), 0),
|
|
|
|
// &byteArrayBody(t, methodName(t, context->method), 0),
|
|
|
|
// &byteArrayBody(t, methodSpec(t, context->method), 0));
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
unsigned footprint = methodParameterFootprint(t, context->method);
|
2008-01-20 18:55:08 +00:00
|
|
|
unsigned locals = localSize(t, context->method);
|
2008-08-23 18:04:36 +00:00
|
|
|
c->init(codeLength(t, methodCode(t, context->method)), footprint, locals,
|
2008-09-28 19:00:52 +00:00
|
|
|
alignedFrameSize(t, context->method));
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uint8_t, stackMap,
|
|
|
|
codeMaxStack(t, methodCode(t, context->method)));
|
|
|
|
Frame frame(context, RUNTIME_ARRAY_BODY(stackMap));
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-05-15 02:08:01 +00:00
|
|
|
unsigned index = methodParameterFootprint(t, context->method);
|
2008-01-07 16:01:35 +00:00
|
|
|
if ((methodFlags(t, context->method) & ACC_STATIC) == 0) {
|
2009-05-15 02:08:01 +00:00
|
|
|
frame.set(--index, Frame::Object);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(1, index, Compiler::ObjectType);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 16:01:35 +00:00
|
|
|
for (MethodSpecIterator it
|
|
|
|
(t, reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
|
|
|
|
it.hasNext();)
|
|
|
|
{
|
2008-01-07 14:51:07 +00:00
|
|
|
switch (*it.next()) {
|
|
|
|
case 'L':
|
|
|
|
case '[':
|
2009-05-15 02:08:01 +00:00
|
|
|
frame.set(--index, Frame::Object);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(1, index, Compiler::ObjectType);
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'J':
|
2009-08-10 19:20:23 +00:00
|
|
|
frame.set(--index, Frame::Long);
|
|
|
|
frame.set(--index, Frame::Long);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(2, index, Compiler::IntegerType);
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-11-30 15:08:45 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
case 'D':
|
2009-05-15 02:08:01 +00:00
|
|
|
frame.set(--index, Frame::Long);
|
|
|
|
frame.set(--index, Frame::Long);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(2, index, Compiler::FloatType);
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
2009-08-10 19:20:23 +00:00
|
|
|
|
|
|
|
case 'F':
|
|
|
|
frame.set(--index, Frame::Integer);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(1, index, Compiler::FloatType);
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
default:
|
2009-05-15 02:08:01 +00:00
|
|
|
frame.set(--index, Frame::Integer);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(1, index, Compiler::IntegerType);
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-09 23:56:37 +00:00
|
|
|
handleEntrance(t, &frame);
|
|
|
|
|
2008-09-13 21:09:26 +00:00
|
|
|
Compiler::State* state = c->saveState();
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
compile(t, &frame, 0);
|
|
|
|
if (UNLIKELY(t->exception)) return 0;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
context->dirtyRoots = false;
|
2009-04-27 01:53:42 +00:00
|
|
|
unsigned eventIndex = calculateFrameMaps(t, context, 0, 0);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
object eht = codeExceptionHandlerTable(t, methodCode(t, context->method));
|
2007-12-09 22:45:43 +00:00
|
|
|
if (eht) {
|
|
|
|
PROTECT(t, eht);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-01-26 00:17:27 +00:00
|
|
|
unsigned visitCount = exceptionHandlerTableLength(t, eht);
|
2009-03-08 01:23:28 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(bool, visited, visitCount);
|
|
|
|
memset(RUNTIME_ARRAY_BODY(visited), 0, visitCount * sizeof(bool));
|
2008-01-26 00:17:27 +00:00
|
|
|
|
|
|
|
while (visitCount) {
|
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < exceptionHandlerTableLength(t, eht); ++i) {
|
2008-09-13 21:09:26 +00:00
|
|
|
c->restoreState(state);
|
|
|
|
|
2008-01-26 00:17:27 +00:00
|
|
|
ExceptionHandler* eh = exceptionHandlerTableBody(t, eht, i);
|
|
|
|
unsigned start = exceptionHandlerStart(eh);
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
if ((not RUNTIME_ARRAY_BODY(visited)[i])
|
|
|
|
and context->visitTable[start])
|
|
|
|
{
|
2008-01-26 00:17:27 +00:00
|
|
|
-- visitCount;
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(visited)[i] = true;
|
2008-01-26 00:17:27 +00:00
|
|
|
progress = true;
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uint8_t, stackMap,
|
|
|
|
codeMaxStack(t, methodCode(t, context->method)));
|
|
|
|
Frame frame2(&frame, RUNTIME_ARRAY_BODY(stackMap));
|
2008-01-26 00:17:27 +00:00
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
unsigned end = exceptionHandlerEnd(eh);
|
|
|
|
if (exceptionHandlerIp(eh) >= start
|
|
|
|
and exceptionHandlerIp(eh) < end)
|
|
|
|
{
|
|
|
|
end = exceptionHandlerIp(eh);
|
|
|
|
}
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
context->eventLog.append(PushExceptionHandlerEvent);
|
2009-06-16 19:41:31 +00:00
|
|
|
context->eventLog.append2(start);
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
context->eventLog.append2(end);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-01-26 00:17:27 +00:00
|
|
|
for (unsigned i = 1;
|
|
|
|
i < codeMaxStack(t, methodCode(t, context->method));
|
|
|
|
++i)
|
|
|
|
{
|
2008-02-11 17:21:41 +00:00
|
|
|
frame2.set(localSize(t, context->method) + i, Frame::Integer);
|
2008-01-26 00:17:27 +00:00
|
|
|
}
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2008-09-25 00:48:32 +00:00
|
|
|
compile(t, &frame2, exceptionHandlerIp(eh), start);
|
2008-01-26 00:17:27 +00:00
|
|
|
if (UNLIKELY(t->exception)) return 0;
|
2008-01-20 23:03:28 +00:00
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
context->eventLog.append(PopContextEvent);
|
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
eventIndex = calculateFrameMaps(t, context, 0, eventIndex);
|
2008-01-26 00:17:27 +00:00
|
|
|
}
|
2008-01-20 23:03:28 +00:00
|
|
|
}
|
|
|
|
|
2008-01-26 00:17:27 +00:00
|
|
|
assert(t, progress);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
while (context->dirtyRoots) {
|
|
|
|
context->dirtyRoots = false;
|
2009-04-27 01:53:42 +00:00
|
|
|
calculateFrameMaps(t, context, 0, 0);
|
2008-03-05 21:44:17 +00:00
|
|
|
}
|
2008-01-20 23:03:28 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
return finish(t, allocator, context);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-11-29 23:08:14 +00:00
|
|
|
void
|
2009-10-18 00:18:03 +00:00
|
|
|
updateCall(MyThread* t, UnaryOperation op, void* returnAddress, void* target)
|
2008-11-29 23:08:14 +00:00
|
|
|
{
|
2009-10-18 00:18:03 +00:00
|
|
|
t->arch->updateCall(op, returnAddress, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void*
|
2009-04-07 00:34:12 +00:00
|
|
|
compileMethod2(MyThread* t, void* ip)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2009-03-31 20:15:08 +00:00
|
|
|
object node = findCallNode(t, ip);
|
2008-04-07 23:47:41 +00:00
|
|
|
object target = callNodeTarget(t, node);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (LIKELY(t->exception == 0)) {
|
2009-04-27 01:53:42 +00:00
|
|
|
PROTECT(t, node);
|
|
|
|
PROTECT(t, target);
|
|
|
|
|
|
|
|
t->trace->targetMethod = target;
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(t, codeAllocator(t), 0, target);
|
2009-04-27 01:53:42 +00:00
|
|
|
|
|
|
|
t->trace->targetMethod = 0;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) {
|
2007-12-16 22:41:07 +00:00
|
|
|
return 0;
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2009-10-18 00:18:03 +00:00
|
|
|
uintptr_t address;
|
|
|
|
if ((methodFlags(t, target) & ACC_NATIVE)
|
|
|
|
and useLongJump(t, reinterpret_cast<uintptr_t>(ip)))
|
|
|
|
{
|
|
|
|
address = bootNativeThunk(t);
|
|
|
|
} else {
|
|
|
|
address = methodAddress(t, target);
|
|
|
|
}
|
2009-04-07 00:34:12 +00:00
|
|
|
uint8_t* updateIp = static_cast<uint8_t*>(ip);
|
2009-10-18 00:18:03 +00:00
|
|
|
|
|
|
|
UnaryOperation op;
|
|
|
|
if (callNodeFlags(t, node) & TraceElement::LongCall) {
|
|
|
|
if (callNodeFlags(t, node) & TraceElement::TailCall) {
|
|
|
|
op = AlignedLongJump;
|
|
|
|
} else {
|
|
|
|
op = AlignedLongCall;
|
|
|
|
}
|
|
|
|
} else if (callNodeFlags(t, node) & TraceElement::TailCall) {
|
|
|
|
op = AlignedJump;
|
|
|
|
} else {
|
|
|
|
op = AlignedCall;
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
updateCall(t, op, updateIp, reinterpret_cast<void*>(address));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
return reinterpret_cast<void*>(address);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-16 22:41:07 +00:00
|
|
|
compileMethod(MyThread* t)
|
|
|
|
{
|
2009-04-07 00:34:12 +00:00
|
|
|
void* ip;
|
2009-03-31 20:15:08 +00:00
|
|
|
if (t->tailAddress) {
|
|
|
|
ip = t->tailAddress;
|
|
|
|
t->tailAddress = 0;
|
|
|
|
} else {
|
|
|
|
ip = t->arch->frameIp(t->stack);
|
|
|
|
}
|
|
|
|
|
|
|
|
void* r = compileMethod2(t, ip);
|
|
|
|
|
|
|
|
if (UNLIKELY(t->exception)) {
|
|
|
|
unwind(t);
|
|
|
|
} else {
|
|
|
|
return reinterpret_cast<uintptr_t>(r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
void*
|
|
|
|
compileVirtualMethod2(MyThread* t, object class_, unsigned index)
|
|
|
|
{
|
2009-04-27 01:53:42 +00:00
|
|
|
// If class_ has BootstrapFlag set, that means its vtable is not yet
|
|
|
|
// available. However, we must set t->trace->targetMethod to an
|
|
|
|
// appropriate method to ensure we can accurately scan the stack for
|
|
|
|
// GC roots. We find such a method by looking for a superclass with
|
|
|
|
// a vtable and using it instead:
|
|
|
|
|
|
|
|
object c = class_;
|
|
|
|
while (classVmFlags(t, c) & BootstrapFlag) {
|
|
|
|
c = classSuper(t, c);
|
|
|
|
}
|
|
|
|
t->trace->targetMethod = arrayBody(t, classVirtualTable(t, c), index);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
PROTECT(t, class_);
|
|
|
|
|
|
|
|
object target = resolveTarget(t, class_, index);
|
2009-04-27 14:46:43 +00:00
|
|
|
PROTECT(t, target);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
|
|
|
compile(t, codeAllocator(t), 0, target);
|
|
|
|
}
|
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
if (UNLIKELY(t->exception)) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
void* address = reinterpret_cast<void*>(methodAddress(t, target));
|
2009-04-25 23:52:08 +00:00
|
|
|
if (methodFlags(t, target) & ACC_NATIVE) {
|
|
|
|
t->trace->nativeMethod = target;
|
|
|
|
} else {
|
2009-04-05 21:42:10 +00:00
|
|
|
classVtable(t, class_, methodOffset(t, target)) = address;
|
|
|
|
}
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
compileVirtualMethod(MyThread* t)
|
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
object class_ = objectClass(t, static_cast<object>(t->virtualCallTarget));
|
|
|
|
t->virtualCallTarget = 0;
|
2009-04-05 21:42:10 +00:00
|
|
|
|
|
|
|
unsigned index = t->virtualCallIndex;
|
|
|
|
t->virtualCallIndex = 0;
|
|
|
|
|
|
|
|
void* r = compileVirtualMethod2(t, class_, index);
|
|
|
|
|
|
|
|
if (UNLIKELY(t->exception)) {
|
|
|
|
unwind(t);
|
|
|
|
} else {
|
|
|
|
return reinterpret_cast<uintptr_t>(r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-05 01:04:17 +00:00
|
|
|
void
|
|
|
|
resolveNative(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
PROTECT(t, method);
|
|
|
|
|
|
|
|
assert(t, methodFlags(t, method) & ACC_NATIVE);
|
|
|
|
|
|
|
|
initClass(t, methodClass(t, method));
|
|
|
|
|
2010-09-10 21:05:29 +00:00
|
|
|
if (LIKELY(t->exception == 0) and methodCode(t, method) == 0) {
|
|
|
|
object native = resolveNativeMethod(t, method);
|
|
|
|
if (UNLIKELY(native == 0)) {
|
2009-05-05 01:04:17 +00:00
|
|
|
object message = makeString
|
|
|
|
(t, "%s.%s%s",
|
|
|
|
&byteArrayBody(t, className(t, methodClass(t, method)), 0),
|
|
|
|
&byteArrayBody(t, methodName(t, method), 0),
|
|
|
|
&byteArrayBody(t, methodSpec(t, method), 0));
|
2010-09-10 21:05:29 +00:00
|
|
|
|
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::UnsatisfiedLinkErrorType, message);
|
2009-05-05 01:04:17 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-09-10 21:05:29 +00:00
|
|
|
// ensure other threads only see the methodCode field populated
|
|
|
|
// once the object it points do has been populated:
|
2009-11-30 15:38:16 +00:00
|
|
|
storeStoreMemoryBarrier();
|
2009-05-05 01:04:17 +00:00
|
|
|
|
2010-09-10 21:05:29 +00:00
|
|
|
set(t, method, MethodCode, native);
|
2009-05-05 01:04:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
2010-09-10 21:05:29 +00:00
|
|
|
invokeNativeFast(MyThread* t, object method, void* function)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2010-09-10 21:05:29 +00:00
|
|
|
return reinterpret_cast<FastNativeFunction>(function)
|
2009-05-03 20:57:11 +00:00
|
|
|
(t, method,
|
|
|
|
static_cast<uintptr_t*>(t->stack)
|
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->frameReturnAddressSize());
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint64_t
|
2010-09-10 21:05:29 +00:00
|
|
|
invokeNativeSlow(MyThread* t, object method, void* function)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, method);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned footprint = methodParameterFootprint(t, method) + 1;
|
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
++ footprint;
|
|
|
|
}
|
2008-01-03 19:49:07 +00:00
|
|
|
unsigned count = methodParameterCount(t, method) + 2;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uintptr_t, args, footprint);
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned argOffset = 0;
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uint8_t, types, count);
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned typeOffset = 0;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = reinterpret_cast<uintptr_t>(t);
|
|
|
|
RUNTIME_ARRAY_BODY(types)[typeOffset++] = POINTER_TYPE;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uintptr_t* sp = static_cast<uintptr_t*>(t->stack)
|
2009-05-03 20:57:11 +00:00
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2010-09-01 16:13:52 +00:00
|
|
|
object jclass = 0;
|
|
|
|
PROTECT(t, jclass);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
2010-09-01 16:13:52 +00:00
|
|
|
jclass = getJClass(t, methodClass(t, method));
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++]
|
2010-09-01 16:13:52 +00:00
|
|
|
= reinterpret_cast<uintptr_t>(&jclass);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++]
|
|
|
|
= reinterpret_cast<uintptr_t>(sp++);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(types)[typeOffset++] = POINTER_TYPE;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MethodSpecIterator it
|
|
|
|
(t, reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0)));
|
|
|
|
|
|
|
|
while (it.hasNext()) {
|
2009-08-27 00:26:44 +00:00
|
|
|
unsigned type = RUNTIME_ARRAY_BODY(types)[typeOffset++]
|
2007-12-09 22:45:43 +00:00
|
|
|
= fieldType(t, fieldCode(t, *it.next()));
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (type) {
|
|
|
|
case INT8_TYPE:
|
|
|
|
case INT16_TYPE:
|
|
|
|
case INT32_TYPE:
|
|
|
|
case FLOAT_TYPE:
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = *(sp++);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case INT64_TYPE:
|
|
|
|
case DOUBLE_TYPE: {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(RUNTIME_ARRAY_BODY(args) + argOffset, sp, 8);
|
2007-12-18 02:09:32 +00:00
|
|
|
argOffset += (8 / BytesPerWord);
|
2009-05-03 20:57:11 +00:00
|
|
|
sp += 2;
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case POINTER_TYPE: {
|
2007-12-20 00:02:32 +00:00
|
|
|
if (*sp) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++]
|
|
|
|
= reinterpret_cast<uintptr_t>(sp);
|
2007-12-20 00:02:32 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = 0;
|
2007-12-20 00:02:32 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
++ sp;
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default: abort(t);
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-23 18:09:41 +00:00
|
|
|
unsigned returnCode = methodReturnCode(t, method);
|
|
|
|
unsigned returnType = fieldType(t, returnCode);
|
2007-12-09 22:45:43 +00:00
|
|
|
uint64_t result;
|
2008-01-20 22:05:59 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (DebugNatives) {
|
2007-12-09 22:45:43 +00:00
|
|
|
fprintf(stderr, "invoke native method %s.%s\n",
|
|
|
|
&byteArrayBody(t, className(t, methodClass(t, method)), 0),
|
|
|
|
&byteArrayBody(t, methodName(t, method), 0));
|
|
|
|
}
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2008-01-11 17:49:11 +00:00
|
|
|
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
|
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
acquire(t, methodClass(t, method));
|
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
acquire(t, *reinterpret_cast<object*>(RUNTIME_ARRAY_BODY(args)[1]));
|
2008-01-11 17:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-01 21:17:54 +00:00
|
|
|
Reference* reference = t->reference;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
{ ENTER(t, Thread::IdleState);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
result = t->m->system->call
|
|
|
|
(function,
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args),
|
|
|
|
RUNTIME_ARRAY_BODY(types),
|
2008-01-03 19:49:07 +00:00
|
|
|
count,
|
2007-12-09 22:45:43 +00:00
|
|
|
footprint * BytesPerWord,
|
|
|
|
returnType);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-01-11 17:49:11 +00:00
|
|
|
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
|
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
release(t, methodClass(t, method));
|
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
release(t, *reinterpret_cast<object*>(RUNTIME_ARRAY_BODY(args)[1]));
|
2008-01-11 17:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (DebugNatives) {
|
2007-12-09 22:45:43 +00:00
|
|
|
fprintf(stderr, "return from native method %s.%s\n",
|
|
|
|
&byteArrayBody(t, className(t, methodClass(t, method)), 0),
|
|
|
|
&byteArrayBody(t, methodName(t, method), 0));
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2007-12-23 18:09:41 +00:00
|
|
|
if (LIKELY(t->exception == 0)) {
|
|
|
|
switch (returnCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
2008-02-01 21:17:54 +00:00
|
|
|
result = static_cast<int8_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
|
|
|
case CharField:
|
2008-02-01 21:17:54 +00:00
|
|
|
result = static_cast<uint16_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
|
|
|
case ShortField:
|
2008-02-01 21:17:54 +00:00
|
|
|
result = static_cast<int16_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
2008-02-01 21:17:54 +00:00
|
|
|
result = static_cast<int32_t>(result);
|
|
|
|
break;
|
2007-12-18 02:09:32 +00:00
|
|
|
|
2007-12-23 18:09:41 +00:00
|
|
|
case LongField:
|
|
|
|
case DoubleField:
|
2008-02-01 21:17:54 +00:00
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
|
|
|
case ObjectField:
|
2008-02-01 21:17:54 +00:00
|
|
|
result = static_cast<uintptr_t>(result) ? *reinterpret_cast<uintptr_t*>
|
2007-12-23 18:09:41 +00:00
|
|
|
(static_cast<uintptr_t>(result)) : 0;
|
2008-02-01 21:17:54 +00:00
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
|
|
|
case VoidField:
|
2008-02-01 21:17:54 +00:00
|
|
|
result = 0;
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
|
|
|
default: abort(t);
|
2007-12-18 02:09:32 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2008-02-01 21:17:54 +00:00
|
|
|
result = 0;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
2008-02-01 21:17:54 +00:00
|
|
|
|
|
|
|
while (t->reference != reference) {
|
|
|
|
dispose(t, t->reference);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
uint64_t
|
|
|
|
invokeNative2(MyThread* t, object method)
|
|
|
|
{
|
2010-09-10 21:05:29 +00:00
|
|
|
object native = methodCode(t, method);
|
|
|
|
if (nativeFast(t, native)) {
|
|
|
|
return invokeNativeFast(t, method, nativeFunction(t, native));
|
2009-05-03 20:57:11 +00:00
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
return invokeNativeSlow(t, method, nativeFunction(t, native));
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
invokeNative(MyThread* t)
|
|
|
|
{
|
2008-04-07 23:47:41 +00:00
|
|
|
if (t->trace->nativeMethod == 0) {
|
2009-04-07 00:34:12 +00:00
|
|
|
void* ip;
|
2009-03-31 20:15:08 +00:00
|
|
|
if (t->tailAddress) {
|
|
|
|
ip = t->tailAddress;
|
|
|
|
t->tailAddress = 0;
|
|
|
|
} else {
|
|
|
|
ip = t->arch->frameIp(t->stack);
|
|
|
|
}
|
|
|
|
|
|
|
|
object node = findCallNode(t, ip);
|
2008-04-23 16:33:31 +00:00
|
|
|
object target = callNodeTarget(t, node);
|
2009-03-31 20:15:08 +00:00
|
|
|
if (callNodeFlags(t, node) & TraceElement::VirtualCall) {
|
2008-04-23 16:33:31 +00:00
|
|
|
target = resolveTarget(t, t->stack, target);
|
2008-04-01 17:37:59 +00:00
|
|
|
}
|
2008-04-23 16:33:31 +00:00
|
|
|
t->trace->nativeMethod = target;
|
2008-01-11 22:16:24 +00:00
|
|
|
}
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
assert(t, t->tailAddress == 0);
|
|
|
|
|
2007-12-17 22:38:59 +00:00
|
|
|
uint64_t result = 0;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
t->trace->targetMethod = t->trace->nativeMethod;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (LIKELY(t->exception == 0)) {
|
2009-05-03 20:57:11 +00:00
|
|
|
resolveNative(t, t->trace->nativeMethod);
|
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
|
|
|
result = invokeNative2(t, t->trace->nativeMethod);
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2009-05-17 00:39:08 +00:00
|
|
|
unsigned parameterFootprint = methodParameterFootprint
|
|
|
|
(t, t->trace->targetMethod);
|
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
t->trace->targetMethod = 0;
|
2008-04-10 23:48:28 +00:00
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (UNLIKELY(t->exception)) {
|
|
|
|
unwind(t);
|
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
uintptr_t* stack = static_cast<uintptr_t*>(t->stack);
|
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
if (TailCalls
|
|
|
|
and t->arch->argumentFootprint(parameterFootprint)
|
2009-05-17 00:39:08 +00:00
|
|
|
> t->arch->stackAlignmentInWords())
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
stack += t->arch->argumentFootprint(parameterFootprint)
|
|
|
|
- t->arch->stackAlignmentInWords();
|
2009-05-17 00:39:08 +00:00
|
|
|
}
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
stack += t->arch->frameReturnAddressSize();
|
|
|
|
|
|
|
|
transition(t, t->arch->frameIp(t->stack), stack, t->base, t->continuation,
|
|
|
|
t->trace);
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
return result;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-04-10 23:48:28 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
void
|
|
|
|
findFrameMapInSimpleTable(MyThread* t, object method, object table,
|
|
|
|
int32_t offset, int32_t** map, unsigned* start)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned tableSize = simpleFrameMapTableSize(t, method, table);
|
|
|
|
unsigned indexSize = intArrayLength(t, table) - tableSize;
|
|
|
|
|
|
|
|
*map = &intArrayBody(t, table, indexSize);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
|
|
|
unsigned bottom = 0;
|
|
|
|
unsigned top = indexSize;
|
|
|
|
for (unsigned span = top - bottom; span; span = top - bottom) {
|
|
|
|
unsigned middle = bottom + (span / 2);
|
2009-06-26 21:36:04 +00:00
|
|
|
int32_t v = intArrayBody(t, table, middle);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
|
|
|
if (offset == v) {
|
2009-06-26 21:36:04 +00:00
|
|
|
*start = frameMapSizeInBits(t, method) * middle;
|
|
|
|
return;
|
2008-04-07 23:47:41 +00:00
|
|
|
} else if (offset < v) {
|
|
|
|
top = middle;
|
|
|
|
} else {
|
|
|
|
bottom = middle + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
abort(t);
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned
|
|
|
|
findFrameMap(MyThread* t, void* stack, object method, object table,
|
|
|
|
unsigned pathIndex)
|
|
|
|
{
|
|
|
|
if (pathIndex) {
|
|
|
|
FrameMapTablePath* path = reinterpret_cast<FrameMapTablePath*>
|
|
|
|
(&byteArrayBody(t, table, pathIndex));
|
|
|
|
|
|
|
|
void* address = static_cast<void**>(stack)[path->stackIndex];
|
|
|
|
uint8_t* base = reinterpret_cast<uint8_t*>(methodAddress(t, method));
|
|
|
|
for (unsigned i = 0; i < path->elementCount; ++i) {
|
|
|
|
if (address == base + path->elements[i]) {
|
|
|
|
return i + (path->elementCount * findFrameMap
|
|
|
|
(t, stack, method, table, path->next));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
abort(t);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
findFrameMapInGeneralTable(MyThread* t, void* stack, object method,
|
|
|
|
object table, int32_t offset, int32_t** map,
|
|
|
|
unsigned* start)
|
|
|
|
{
|
|
|
|
FrameMapTableHeader* header = reinterpret_cast<FrameMapTableHeader*>
|
|
|
|
(&byteArrayBody(t, table, 0));
|
|
|
|
|
|
|
|
FrameMapTableIndexElement* index
|
|
|
|
= reinterpret_cast<FrameMapTableIndexElement*>
|
|
|
|
(&byteArrayBody(t, table, sizeof(FrameMapTableHeader)));
|
|
|
|
|
|
|
|
*map = reinterpret_cast<int32_t*>(index + header->indexCount);
|
|
|
|
|
|
|
|
unsigned bottom = 0;
|
|
|
|
unsigned top = header->indexCount;
|
|
|
|
for (unsigned span = top - bottom; span; span = top - bottom) {
|
|
|
|
unsigned middle = bottom + (span / 2);
|
|
|
|
FrameMapTableIndexElement* v = index + middle;
|
|
|
|
|
|
|
|
if (offset == v->offset) {
|
2009-07-08 14:18:40 +00:00
|
|
|
*start = v->base + (findFrameMap(t, stack, method, table, v->path)
|
2009-07-13 23:49:15 +00:00
|
|
|
* frameMapSizeInBits(t, method));
|
2009-07-08 14:18:40 +00:00
|
|
|
return;
|
2009-06-26 21:36:04 +00:00
|
|
|
} else if (offset < v->offset) {
|
|
|
|
top = middle;
|
|
|
|
} else {
|
|
|
|
bottom = middle + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
findFrameMap(MyThread* t, void* stack, object method, int32_t offset,
|
|
|
|
int32_t** map, unsigned* start)
|
|
|
|
{
|
|
|
|
object table = codePool(t, methodCode(t, method));
|
2010-09-14 16:49:41 +00:00
|
|
|
if (objectClass(t, table) == type(t, Machine::IntArrayType)) {
|
2009-06-26 21:36:04 +00:00
|
|
|
findFrameMapInSimpleTable(t, method, table, offset, map, start);
|
|
|
|
} else {
|
|
|
|
findFrameMapInGeneralTable(t, stack, method, table, offset, map, start);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void
|
2009-04-26 21:55:35 +00:00
|
|
|
visitStackAndLocals(MyThread* t, Heap::Visitor* v, void* frame, object method,
|
2009-04-27 01:53:42 +00:00
|
|
|
void* ip)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2009-04-27 14:46:43 +00:00
|
|
|
unsigned count = frameMapSizeInBits(t, method);
|
2009-04-26 21:55:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (count) {
|
2009-04-26 21:55:35 +00:00
|
|
|
void* stack = stackForFrame(t, frame, method);
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
int32_t* map;
|
|
|
|
unsigned offset;
|
|
|
|
findFrameMap
|
|
|
|
(t, stack, method, difference
|
|
|
|
(ip, reinterpret_cast<void*>(methodAddress(t, method))), &map, &offset);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
int j = offset + i;
|
|
|
|
if (map[j / 32] & (static_cast<int32_t>(1) << (j % 32))) {
|
2008-08-18 15:23:01 +00:00
|
|
|
v->visit(localObject(t, stack, method, i));
|
2007-10-17 01:21:35 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
void
|
2009-05-15 02:08:01 +00:00
|
|
|
visitArgument(MyThread* t, Heap::Visitor* v, void* stack, unsigned index)
|
2009-04-27 01:53:42 +00:00
|
|
|
{
|
|
|
|
v->visit(static_cast<object*>(stack)
|
2009-05-15 02:08:01 +00:00
|
|
|
+ index
|
2009-04-27 01:53:42 +00:00
|
|
|
+ t->arch->frameReturnAddressSize()
|
|
|
|
+ t->arch->frameFooterSize());
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
visitArguments(MyThread* t, Heap::Visitor* v, void* stack, object method)
|
|
|
|
{
|
|
|
|
unsigned index = 0;
|
|
|
|
|
|
|
|
if ((methodFlags(t, method) & ACC_STATIC) == 0) {
|
2009-05-15 02:08:01 +00:00
|
|
|
visitArgument(t, v, stack, index++);
|
2009-04-27 01:53:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (MethodSpecIterator it
|
|
|
|
(t, reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0)));
|
|
|
|
it.hasNext();)
|
|
|
|
{
|
|
|
|
switch (*it.next()) {
|
|
|
|
case 'L':
|
|
|
|
case '[':
|
2009-05-15 02:08:01 +00:00
|
|
|
visitArgument(t, v, stack, index++);
|
2009-04-27 01:53:42 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'J':
|
|
|
|
case 'D':
|
|
|
|
index += 2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
++ index;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-26 23:23:03 +00:00
|
|
|
void
|
2007-12-09 22:45:43 +00:00
|
|
|
visitStack(MyThread* t, Heap::Visitor* v)
|
2007-09-26 23:23:03 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
void* ip = t->arch->frameIp(t->stack);
|
2007-12-09 22:45:43 +00:00
|
|
|
void* base = t->base;
|
2008-08-17 19:32:40 +00:00
|
|
|
void* stack = t->stack;
|
2007-12-30 22:24:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread::CallTrace* trace = t->trace;
|
2009-04-27 01:53:42 +00:00
|
|
|
object targetMethod = (trace ? trace->targetMethod : 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-16 22:41:07 +00:00
|
|
|
while (stack) {
|
2009-05-17 23:43:48 +00:00
|
|
|
if (targetMethod) {
|
|
|
|
visitArguments(t, v, stack, targetMethod);
|
|
|
|
targetMethod = 0;
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object method = methodForIp(t, ip);
|
|
|
|
if (method) {
|
|
|
|
PROTECT(t, method);
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
t->arch->nextFrame(&stack, &base);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
visitStackAndLocals(t, v, stack, method, ip);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
ip = t->arch->frameIp(stack);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else if (trace) {
|
2008-08-17 19:32:40 +00:00
|
|
|
stack = trace->stack;
|
2007-12-09 22:45:43 +00:00
|
|
|
base = trace->base;
|
2008-08-18 15:23:01 +00:00
|
|
|
ip = t->arch->frameIp(stack);
|
2007-12-09 22:45:43 +00:00
|
|
|
trace = trace->next;
|
2009-04-27 01:53:42 +00:00
|
|
|
|
|
|
|
if (trace) {
|
|
|
|
targetMethod = trace->targetMethod;
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2008-07-12 00:11:13 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
void
|
|
|
|
walkContinuationBody(MyThread* t, Heap::Walker* w, object c, int start)
|
|
|
|
{
|
2009-05-17 23:43:48 +00:00
|
|
|
const int BodyOffset = ContinuationBody / BytesPerWord;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-05 01:04:17 +00:00
|
|
|
object method = static_cast<object>
|
|
|
|
(t->m->heap->follow(continuationMethod(t, c)));
|
2009-05-17 23:43:48 +00:00
|
|
|
int count = frameMapSizeInBits(t, method);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
if (count) {
|
2009-05-17 23:43:48 +00:00
|
|
|
int stack = BodyOffset
|
|
|
|
+ (continuationFramePointerOffset(t, c) / BytesPerWord)
|
|
|
|
- t->arch->framePointerOffset()
|
|
|
|
- stackOffsetFromFrame(t, method);
|
|
|
|
|
|
|
|
int first = stack + localOffsetFromStack(t, count - 1, method);
|
|
|
|
if (start > first) {
|
|
|
|
count -= start - first;
|
|
|
|
}
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
int32_t* map;
|
|
|
|
unsigned offset;
|
|
|
|
findFrameMap
|
|
|
|
(t, reinterpret_cast<uintptr_t*>(c) + stack, method, difference
|
2009-05-03 20:57:11 +00:00
|
|
|
(continuationAddress(t, c),
|
2009-06-26 21:36:04 +00:00
|
|
|
reinterpret_cast<void*>(methodAddress(t, method))), &map, &offset);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-17 23:43:48 +00:00
|
|
|
for (int i = count - 1; i >= 0; --i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
int j = offset + i;
|
|
|
|
if (map[j / 32] & (static_cast<int32_t>(1) << (j % 32))) {
|
2009-05-17 23:43:48 +00:00
|
|
|
if (not w->visit(stack + localOffsetFromStack(t, i, method))) {
|
2009-05-03 20:57:11 +00:00
|
|
|
return;
|
2009-05-17 23:43:48 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-05-23 22:15:06 +00:00
|
|
|
callContinuation(MyThread* t, object continuation, object result,
|
|
|
|
object exception, void* ip, void* base, void* stack)
|
|
|
|
{
|
|
|
|
assert(t, t->exception == 0);
|
|
|
|
|
|
|
|
if (exception) {
|
|
|
|
t->exception = exception;
|
|
|
|
|
2010-06-25 15:51:35 +00:00
|
|
|
MyThread::TraceContext c(t, ip, stack, base, continuation, t->trace);
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
findUnwindTarget(t, &ip, &base, &stack, &continuation);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
transition(t, ip, stack, base, continuation, t->trace);
|
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
vmJump(ip, base, stack, t, reinterpret_cast<uintptr_t>(result), 0);
|
|
|
|
}
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
int8_t*
|
2009-05-23 22:15:06 +00:00
|
|
|
returnSpec(MyThread* t, object method)
|
|
|
|
{
|
2009-05-24 01:49:14 +00:00
|
|
|
int8_t* s = &byteArrayBody(t, methodSpec(t, method), 0);
|
2009-05-23 22:15:06 +00:00
|
|
|
while (*s and *s != ')') ++ s;
|
|
|
|
expect(t, *s == ')');
|
|
|
|
return s + 1;
|
|
|
|
}
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
object
|
|
|
|
returnClass(MyThread* t, object method)
|
|
|
|
{
|
2009-08-10 13:56:16 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
int8_t* spec = returnSpec(t, method);
|
|
|
|
unsigned length = strlen(reinterpret_cast<char*>(spec));
|
|
|
|
object name;
|
|
|
|
if (*spec == '[') {
|
|
|
|
name = makeByteArray(t, length + 1);
|
|
|
|
memcpy(&byteArrayBody(t, name, 0), spec, length);
|
|
|
|
} else {
|
|
|
|
assert(t, *spec == 'L');
|
|
|
|
assert(t, spec[length - 1] == ';');
|
|
|
|
name = makeByteArray(t, length - 1);
|
|
|
|
memcpy(&byteArrayBody(t, name, 0), spec + 1, length - 2);
|
|
|
|
}
|
2009-08-10 13:56:16 +00:00
|
|
|
|
|
|
|
return resolveClass(t, classLoader(t, methodClass(t, method)), name);
|
2009-05-24 01:49:14 +00:00
|
|
|
}
|
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
bool
|
|
|
|
compatibleReturnType(MyThread* t, object oldMethod, object newMethod)
|
|
|
|
{
|
|
|
|
if (oldMethod == newMethod) {
|
|
|
|
return true;
|
|
|
|
} else if (methodReturnCode(t, oldMethod) == methodReturnCode(t, newMethod))
|
|
|
|
{
|
|
|
|
if (methodReturnCode(t, oldMethod) == ObjectField) {
|
2009-05-24 01:49:14 +00:00
|
|
|
PROTECT(t, newMethod);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
object oldClass = returnClass(t, oldMethod);
|
|
|
|
PROTECT(t, oldClass);
|
|
|
|
|
|
|
|
object newClass = returnClass(t, newMethod);
|
|
|
|
|
|
|
|
return isAssignableFrom(t, oldClass, newClass);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return methodReturnCode(t, oldMethod) == VoidField;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
void
|
2009-05-29 00:56:05 +00:00
|
|
|
jumpAndInvoke(MyThread* t, object method, void* base, void* stack, ...)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2009-05-24 01:49:14 +00:00
|
|
|
t->trace->targetMethod = 0;
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (methodFlags(t, method) & ACC_NATIVE) {
|
|
|
|
t->trace->nativeMethod = method;
|
|
|
|
} else {
|
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
unsigned argumentCount = methodParameterFootprint(t, method);
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uintptr_t, arguments, argumentCount);
|
2009-05-29 00:56:05 +00:00
|
|
|
va_list a; va_start(a, stack);
|
2009-05-24 01:49:14 +00:00
|
|
|
for (unsigned i = 0; i < argumentCount; ++i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(arguments)[i] = va_arg(a, uintptr_t);
|
2009-05-24 01:49:14 +00:00
|
|
|
}
|
|
|
|
va_end(a);
|
|
|
|
|
|
|
|
vmJumpAndInvoke
|
|
|
|
(t, reinterpret_cast<void*>(methodAddress(t, method)),
|
|
|
|
base,
|
2009-05-27 01:02:39 +00:00
|
|
|
stack,
|
2009-05-24 01:49:14 +00:00
|
|
|
argumentCount * BytesPerWord,
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(arguments),
|
2009-05-29 01:50:44 +00:00
|
|
|
(t->arch->alignFrameSize(t->arch->argumentFootprint(argumentCount))
|
|
|
|
+ t->arch->frameReturnAddressSize())
|
2009-05-29 00:56:05 +00:00
|
|
|
* BytesPerWord);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
callContinuation(MyThread* t, object continuation, object result,
|
|
|
|
object exception)
|
|
|
|
{
|
|
|
|
enum {
|
|
|
|
Call,
|
|
|
|
Unwind,
|
|
|
|
Rewind,
|
|
|
|
Throw
|
|
|
|
} action;
|
|
|
|
|
|
|
|
object nextContinuation = 0;
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (t->continuation == 0
|
2009-05-23 22:15:06 +00:00
|
|
|
or continuationContext(t, t->continuation)
|
|
|
|
!= continuationContext(t, continuation))
|
|
|
|
{
|
|
|
|
PROTECT(t, continuation);
|
|
|
|
PROTECT(t, result);
|
|
|
|
PROTECT(t, exception);
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (compatibleReturnType
|
2009-05-23 22:15:06 +00:00
|
|
|
(t, t->trace->originalMethod, continuationContextMethod
|
2009-05-25 04:36:16 +00:00
|
|
|
(t, continuationContext(t, continuation))))
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2009-05-24 01:49:14 +00:00
|
|
|
object oldContext;
|
|
|
|
object unwindContext;
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (t->continuation) {
|
|
|
|
oldContext = continuationContext(t, t->continuation);
|
|
|
|
unwindContext = oldContext;
|
|
|
|
} else {
|
|
|
|
oldContext = 0;
|
|
|
|
unwindContext = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
object rewindContext = 0;
|
|
|
|
|
|
|
|
for (object newContext = continuationContext(t, continuation);
|
|
|
|
newContext; newContext = continuationContextNext(t, newContext))
|
|
|
|
{
|
|
|
|
if (newContext == oldContext) {
|
|
|
|
unwindContext = 0;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
rewindContext = newContext;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unwindContext
|
|
|
|
and continuationContextContinuation(t, unwindContext))
|
|
|
|
{
|
|
|
|
nextContinuation = continuationContextContinuation(t, unwindContext);
|
2009-05-23 22:15:06 +00:00
|
|
|
result = makeUnwindResult(t, continuation, result, exception);
|
|
|
|
action = Unwind;
|
2009-05-24 01:49:14 +00:00
|
|
|
} else if (rewindContext
|
|
|
|
and continuationContextContinuation(t, rewindContext))
|
|
|
|
{
|
|
|
|
nextContinuation = continuationContextContinuation(t, rewindContext);
|
|
|
|
action = Rewind;
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, RewindMethod) == 0) {
|
2009-05-24 01:49:14 +00:00
|
|
|
PROTECT(t, nextContinuation);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
object method = resolveMethod
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, root(t, Machine::BootLoader), "avian/Continuations", "rewind",
|
2009-06-03 00:55:12 +00:00
|
|
|
"(Ljava/lang/Runnable;Lavian/Callback;Ljava/lang/Object;"
|
|
|
|
"Ljava/lang/Throwable;)V");
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (method) {
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, RewindMethod, method);
|
2009-05-25 00:22:36 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2009-05-25 00:22:36 +00:00
|
|
|
|
|
|
|
if (UNLIKELY(t->exception)) {
|
|
|
|
action = Throw;
|
|
|
|
}
|
2009-05-24 01:49:14 +00:00
|
|
|
} else {
|
|
|
|
action = Throw;
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
}
|
2009-05-25 04:49:39 +00:00
|
|
|
} else {
|
|
|
|
action = Call;
|
|
|
|
}
|
2009-05-24 01:49:14 +00:00
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::IncompatibleContinuationExceptionType);
|
2009-05-24 01:49:14 +00:00
|
|
|
action = Throw;
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
action = Call;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* ip;
|
|
|
|
void* base;
|
|
|
|
void* stack;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
object threadContinuation;
|
|
|
|
findUnwindTarget(t, &ip, &base, &stack, &threadContinuation);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case Call: {
|
2009-05-25 00:58:45 +00:00
|
|
|
callContinuation(t, continuation, result, exception, ip, base, stack);
|
2009-05-23 22:15:06 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case Unwind: {
|
|
|
|
callContinuation(t, nextContinuation, result, 0, ip, base, stack);
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case Rewind: {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
transition(t, 0, 0, 0, nextContinuation, t->trace);
|
2009-05-25 04:49:39 +00:00
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
jumpAndInvoke
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, root(t, RewindMethod), base, stack,
|
2009-05-25 04:27:50 +00:00
|
|
|
continuationContextBefore(t, continuationContext(t, nextContinuation)),
|
2009-05-25 04:49:39 +00:00
|
|
|
continuation, result, exception);
|
2009-05-23 22:15:06 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case Throw: {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
transition(t, ip, stack, base, threadContinuation, t->trace);
|
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
vmJump(ip, base, stack, t, 0, 0);
|
|
|
|
} break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
void
|
|
|
|
callWithCurrentContinuation(MyThread* t, object receiver)
|
|
|
|
{
|
|
|
|
object method = 0;
|
|
|
|
void* ip = 0;
|
|
|
|
void* base = 0;
|
|
|
|
void* stack = 0;
|
|
|
|
|
|
|
|
{ PROTECT(t, receiver);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, ReceiveMethod) == 0) {
|
2009-06-03 00:55:12 +00:00
|
|
|
object m = resolveMethod
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, root(t, Machine::BootLoader), "avian/CallbackReceiver", "receive",
|
2009-06-03 00:55:12 +00:00
|
|
|
"(Lavian/Callback;)Ljava/lang/Object;");
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (m) {
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, ReceiveMethod, m);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
object continuationClass = type(t, Machine::ContinuationType);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (classVmFlags(t, continuationClass) & BootstrapFlag) {
|
2010-09-14 16:49:41 +00:00
|
|
|
resolveSystemClass
|
|
|
|
(t, root(t, Machine::BootLoader),
|
|
|
|
vm::className(t, continuationClass));
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
|
|
|
method = findInterfaceMethod
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, root(t, ReceiveMethod), objectClass(t, receiver));
|
2009-05-26 05:27:10 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
2009-05-29 00:56:05 +00:00
|
|
|
t->continuation = makeCurrentContinuation(t, &ip, &base, &stack);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
2009-05-29 00:56:05 +00:00
|
|
|
jumpAndInvoke(t, method, base, stack, receiver, t->continuation);
|
2009-05-26 05:27:10 +00:00
|
|
|
} else {
|
|
|
|
unwind(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dynamicWind(MyThread* t, object before, object thunk, object after)
|
|
|
|
{
|
|
|
|
void* ip = 0;
|
|
|
|
void* base = 0;
|
|
|
|
void* stack = 0;
|
|
|
|
|
|
|
|
{ PROTECT(t, before);
|
|
|
|
PROTECT(t, thunk);
|
|
|
|
PROTECT(t, after);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, WindMethod) == 0) {
|
2009-06-03 00:55:12 +00:00
|
|
|
object method = resolveMethod
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, root(t, Machine::BootLoader), "avian/Continuations", "wind",
|
2009-06-03 00:55:12 +00:00
|
|
|
"(Ljava/lang/Runnable;Ljava/util/concurrent/Callable;"
|
|
|
|
"Ljava/lang/Runnable;)Lavian/Continuations$UnwindResult;");
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (method) {
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, WindMethod, method);
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
2009-05-29 00:56:05 +00:00
|
|
|
t->continuation = makeCurrentContinuation(t, &ip, &base, &stack);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
object newContext = makeContinuationContext
|
|
|
|
(t, continuationContext(t, t->continuation), before, after,
|
|
|
|
t->continuation, t->trace->originalMethod);
|
|
|
|
|
|
|
|
set(t, t->continuation, ContinuationContext, newContext);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
2010-09-14 16:49:41 +00:00
|
|
|
jumpAndInvoke(t, root(t, WindMethod), base, stack, before, thunk, after);
|
2009-05-26 05:27:10 +00:00
|
|
|
} else {
|
|
|
|
unwind(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
class ArgumentList {
|
|
|
|
public:
|
2009-02-17 02:49:28 +00:00
|
|
|
ArgumentList(Thread* t, uintptr_t* array, unsigned size, bool* objectMask,
|
|
|
|
object this_, const char* spec, bool indirectObjects,
|
|
|
|
va_list arguments):
|
2007-09-25 23:53:11 +00:00
|
|
|
t(static_cast<MyThread*>(t)),
|
|
|
|
array(array),
|
|
|
|
objectMask(objectMask),
|
2009-02-17 02:49:28 +00:00
|
|
|
size(size),
|
2009-05-03 20:57:11 +00:00
|
|
|
position(0),
|
2007-10-12 17:56:43 +00:00
|
|
|
protector(this)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
|
|
|
if (this_) {
|
|
|
|
addObject(this_);
|
|
|
|
}
|
|
|
|
|
2007-10-12 17:56:43 +00:00
|
|
|
for (MethodSpecIterator it(t, spec); it.hasNext();) {
|
|
|
|
switch (*it.next()) {
|
2007-09-25 23:53:11 +00:00
|
|
|
case 'L':
|
|
|
|
case '[':
|
|
|
|
if (indirectObjects) {
|
|
|
|
object* v = va_arg(arguments, object*);
|
|
|
|
addObject(v ? *v : 0);
|
|
|
|
} else {
|
|
|
|
addObject(va_arg(arguments, object));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'J':
|
|
|
|
case 'D':
|
|
|
|
addLong(va_arg(arguments, uint64_t));
|
|
|
|
break;
|
2007-10-12 17:56:43 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
default:
|
|
|
|
addInt(va_arg(arguments, uint32_t));
|
2007-10-12 17:56:43 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2009-02-17 02:49:28 +00:00
|
|
|
ArgumentList(Thread* t, uintptr_t* array, unsigned size, bool* objectMask,
|
|
|
|
object this_, const char* spec, object arguments):
|
2007-09-25 23:53:11 +00:00
|
|
|
t(static_cast<MyThread*>(t)),
|
|
|
|
array(array),
|
|
|
|
objectMask(objectMask),
|
2009-02-17 02:49:28 +00:00
|
|
|
size(size),
|
2009-05-03 20:57:11 +00:00
|
|
|
position(0),
|
2007-10-12 17:56:43 +00:00
|
|
|
protector(this)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
|
|
|
if (this_) {
|
|
|
|
addObject(this_);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned index = 0;
|
2007-10-12 17:56:43 +00:00
|
|
|
for (MethodSpecIterator it(t, spec); it.hasNext();) {
|
|
|
|
switch (*it.next()) {
|
2007-09-25 23:53:11 +00:00
|
|
|
case 'L':
|
|
|
|
case '[':
|
|
|
|
addObject(objectArrayBody(t, arguments, index++));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'J':
|
|
|
|
case 'D':
|
|
|
|
addLong(cast<int64_t>(objectArrayBody(t, arguments, index++),
|
|
|
|
BytesPerWord));
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
addInt(cast<int32_t>(objectArrayBody(t, arguments, index++),
|
|
|
|
BytesPerWord));
|
2007-10-12 17:56:43 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void addObject(object v) {
|
2009-05-03 20:57:11 +00:00
|
|
|
assert(t, position < size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
array[position] = reinterpret_cast<uintptr_t>(v);
|
|
|
|
objectMask[position] = true;
|
2009-05-03 20:57:11 +00:00
|
|
|
++ position;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-10-12 22:06:33 +00:00
|
|
|
void addInt(uintptr_t v) {
|
2009-05-03 20:57:11 +00:00
|
|
|
assert(t, position < size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
array[position] = v;
|
|
|
|
objectMask[position] = false;
|
2009-05-03 20:57:11 +00:00
|
|
|
++ position;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void addLong(uint64_t v) {
|
2009-05-03 20:57:11 +00:00
|
|
|
assert(t, position < size - 1);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
memcpy(array + position, &v, 8);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
objectMask[position] = false;
|
2007-12-23 20:06:24 +00:00
|
|
|
objectMask[position + 1] = false;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
position += 2;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
|
|
|
uintptr_t* array;
|
|
|
|
bool* objectMask;
|
2009-02-17 02:49:28 +00:00
|
|
|
unsigned size;
|
2007-09-25 23:53:11 +00:00
|
|
|
unsigned position;
|
2007-10-12 17:56:43 +00:00
|
|
|
|
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
|
|
|
MyProtector(ArgumentList* list): Protector(list->t), list(list) { }
|
|
|
|
|
|
|
|
virtual void visit(Heap::Visitor* v) {
|
2009-05-17 23:43:48 +00:00
|
|
|
for (unsigned i = 0; i < list->position; ++i) {
|
2007-10-12 17:56:43 +00:00
|
|
|
if (list->objectMask[i]) {
|
2007-10-28 19:14:53 +00:00
|
|
|
v->visit(reinterpret_cast<object*>(list->array + i));
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ArgumentList* list;
|
|
|
|
} protector;
|
2007-09-25 23:53:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
object
|
|
|
|
invoke(Thread* thread, object method, ArgumentList* arguments)
|
|
|
|
{
|
|
|
|
MyThread* t = static_cast<MyThread*>(thread);
|
|
|
|
|
2007-09-26 23:23:03 +00:00
|
|
|
unsigned returnCode = methodReturnCode(t, method);
|
2007-09-25 23:53:11 +00:00
|
|
|
unsigned returnType = fieldType(t, returnCode);
|
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
uint64_t result;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
{ MyThread::CallTrace trace(t, method);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2009-05-15 02:08:01 +00:00
|
|
|
assert(t, arguments->position == arguments->size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
result = vmInvoke
|
2008-12-02 02:38:00 +00:00
|
|
|
(t, reinterpret_cast<void*>(methodAddress(t, method)),
|
2009-05-15 02:08:01 +00:00
|
|
|
arguments->array,
|
|
|
|
arguments->position * BytesPerWord,
|
|
|
|
t->arch->alignFrameSize
|
2009-05-17 00:39:08 +00:00
|
|
|
(t->arch->argumentFootprint(arguments->position))
|
2009-04-25 23:33:42 +00:00
|
|
|
* BytesPerWord,
|
2009-02-17 02:49:28 +00:00
|
|
|
returnType);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-04-20 16:21:32 +00:00
|
|
|
if (t->exception) {
|
2010-09-14 16:49:41 +00:00
|
|
|
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
2008-04-20 16:21:32 +00:00
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
object r;
|
|
|
|
switch (returnCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
r = makeInt(t, result);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LongField:
|
|
|
|
case DoubleField:
|
|
|
|
r = makeLong(t, result);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ObjectField:
|
2007-12-16 00:24:15 +00:00
|
|
|
r = reinterpret_cast<object>(result);
|
2007-09-25 23:53:11 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VoidField:
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2009-09-19 00:01:54 +00:00
|
|
|
}
|
2007-09-24 01:39:03 +00:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
class SegFaultHandler: public System::SignalHandler {
|
|
|
|
public:
|
|
|
|
SegFaultHandler(): m(0) { }
|
|
|
|
|
2008-01-01 17:08:47 +00:00
|
|
|
virtual bool handleSignal(void** ip, void** base, void** stack,
|
|
|
|
void** thread)
|
|
|
|
{
|
2007-12-30 22:24:48 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(m->localThread->get());
|
2009-08-18 21:47:08 +00:00
|
|
|
if (t and t->state == Thread::ActiveState) {
|
2008-04-07 23:51:32 +00:00
|
|
|
object node = methodForIp(t, *ip);
|
2008-01-02 01:07:12 +00:00
|
|
|
if (node) {
|
2009-09-04 23:08:45 +00:00
|
|
|
// add one to the IP since findLineNumber will subtract one
|
2009-09-04 21:09:40 +00:00
|
|
|
// when we make the trace:
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
MyThread::TraceContext context
|
|
|
|
(t, static_cast<uint8_t*>(*ip) + 1,
|
|
|
|
static_cast<void**>(*stack) - t->arch->frameReturnAddressSize(),
|
|
|
|
*base, t->continuation, t->trace);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2010-06-19 22:40:21 +00:00
|
|
|
if (ensure(t, FixedSizeOfNullPointerException + traceSize(t))) {
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicOr(&(t->flags), Thread::TracingFlag);
|
2010-09-10 21:05:29 +00:00
|
|
|
t->exception = t->m->classpath->makeThrowable
|
|
|
|
(t, Machine::NullPointerExceptionType);
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicAnd(&(t->flags), ~Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
} else {
|
|
|
|
// not enough memory available for a new NPE and stack trace
|
|
|
|
// -- use a preallocated instance instead
|
2010-09-14 16:49:41 +00:00
|
|
|
t->exception = root(t, Machine::NullPointerException);
|
2010-06-19 22:40:21 +00:00
|
|
|
}
|
2008-01-02 01:07:12 +00:00
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
// printTrace(t, t->exception);
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
object continuation;
|
|
|
|
findUnwindTarget(t, ip, base, stack, &continuation);
|
2008-04-23 16:33:31 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
transition(t, ip, stack, base, continuation, t->trace);
|
2008-04-23 16:33:31 +00:00
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
*thread = t;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
return true;
|
|
|
|
}
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2009-06-11 23:14:54 +00:00
|
|
|
|
|
|
|
if (compileLog) {
|
|
|
|
fflush(compileLog);
|
|
|
|
}
|
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
return false;
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Machine* m;
|
|
|
|
};
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
bool
|
|
|
|
isThunk(MyThread* t, void* ip);
|
|
|
|
|
2010-07-06 22:13:11 +00:00
|
|
|
bool
|
|
|
|
isVirtualThunk(MyThread* t, void* ip);
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
bool
|
|
|
|
isThunkUnsafeStack(MyThread* t, void* ip);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
void
|
|
|
|
boot(MyThread* t, BootImage* image);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2008-04-22 15:31:40 +00:00
|
|
|
class MyProcessor;
|
|
|
|
|
|
|
|
MyProcessor*
|
|
|
|
processor(MyThread* t);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
void
|
|
|
|
compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p);
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
class MyProcessor: public Processor {
|
2007-09-24 01:39:03 +00:00
|
|
|
public:
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
class Thunk {
|
|
|
|
public:
|
|
|
|
Thunk():
|
|
|
|
start(0), frameSavedOffset(0), length(0)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
Thunk(uint8_t* start, unsigned frameSavedOffset, unsigned length):
|
|
|
|
start(start), frameSavedOffset(frameSavedOffset), length(length)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
uint8_t* start;
|
|
|
|
unsigned frameSavedOffset;
|
|
|
|
unsigned length;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ThunkCollection {
|
|
|
|
public:
|
|
|
|
Thunk default_;
|
|
|
|
Thunk defaultVirtual;
|
|
|
|
Thunk native;
|
|
|
|
Thunk aioob;
|
|
|
|
Thunk table;
|
|
|
|
};
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
MyProcessor(System* s, Allocator* allocator, bool useNativeFeatures):
|
2007-09-25 23:53:11 +00:00
|
|
|
s(s),
|
2008-01-13 22:05:08 +00:00
|
|
|
allocator(allocator),
|
2010-09-14 16:49:41 +00:00
|
|
|
roots(0),
|
2009-10-10 23:46:43 +00:00
|
|
|
bootImage(0),
|
2009-06-01 03:16:58 +00:00
|
|
|
codeAllocator(s, 0, 0),
|
2010-06-25 01:09:50 +00:00
|
|
|
callTableSize(0),
|
2009-10-10 23:46:43 +00:00
|
|
|
useNativeFeatures(useNativeFeatures)
|
2009-04-19 22:36:11 +00:00
|
|
|
{ }
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
virtual Thread*
|
|
|
|
makeThread(Machine* m, object javaThread, Thread* parent)
|
|
|
|
{
|
2008-04-13 18:15:04 +00:00
|
|
|
MyThread* t = new (m->heap->allocate(sizeof(MyThread)))
|
2009-10-10 23:46:43 +00:00
|
|
|
MyThread(m, javaThread, static_cast<MyThread*>(parent),
|
|
|
|
useNativeFeatures);
|
2007-10-25 22:06:05 +00:00
|
|
|
t->init();
|
2009-05-05 01:04:17 +00:00
|
|
|
|
2009-05-25 00:22:36 +00:00
|
|
|
if (false) {
|
2010-09-10 21:05:29 +00:00
|
|
|
fprintf(stderr, "%d\n", difference(&(t->stack), t));
|
2009-05-29 01:50:44 +00:00
|
|
|
fprintf(stderr, "%d\n", difference(&(t->continuation), t));
|
|
|
|
fprintf(stderr, "%d\n", difference(&(t->exception), t));
|
|
|
|
fprintf(stderr, "%d\n", difference(&(t->exceptionStackAdjustment), t));
|
|
|
|
fprintf(stderr, "%d\n", difference(&(t->exceptionOffset), t));
|
|
|
|
fprintf(stderr, "%d\n", difference(&(t->exceptionHandler), t));
|
2009-05-25 00:22:36 +00:00
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
2007-10-25 22:06:05 +00:00
|
|
|
return t;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual object
|
|
|
|
makeMethod(vm::Thread* t,
|
|
|
|
uint8_t vmFlags,
|
|
|
|
uint8_t returnCode,
|
|
|
|
uint8_t parameterCount,
|
|
|
|
uint8_t parameterFootprint,
|
|
|
|
uint16_t flags,
|
|
|
|
uint16_t offset,
|
|
|
|
object name,
|
|
|
|
object spec,
|
2009-09-19 00:01:54 +00:00
|
|
|
object addendum,
|
2007-12-09 22:45:43 +00:00
|
|
|
object class_,
|
|
|
|
object code)
|
2007-10-04 03:19:39 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
return vm::makeMethod
|
|
|
|
(t, vmFlags, returnCode, parameterCount, parameterFootprint, flags,
|
2009-09-19 00:01:54 +00:00
|
|
|
offset, 0, name, spec, addendum, class_, code,
|
2009-08-27 00:26:44 +00:00
|
|
|
local::defaultThunk(static_cast<MyThread*>(t)));
|
2007-10-04 03:19:39 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual object
|
|
|
|
makeClass(vm::Thread* t,
|
|
|
|
uint16_t flags,
|
2009-08-18 20:26:28 +00:00
|
|
|
uint16_t vmFlags,
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t fixedSize,
|
2009-08-18 20:26:28 +00:00
|
|
|
uint8_t arrayElementSize,
|
|
|
|
uint8_t arrayDimensions,
|
2007-12-09 22:45:43 +00:00
|
|
|
object objectMask,
|
|
|
|
object name,
|
2009-08-27 22:26:25 +00:00
|
|
|
object sourceFile,
|
2007-12-09 22:45:43 +00:00
|
|
|
object super,
|
|
|
|
object interfaceTable,
|
|
|
|
object virtualTable,
|
|
|
|
object fieldTable,
|
|
|
|
object methodTable,
|
|
|
|
object staticTable,
|
2009-09-19 00:01:54 +00:00
|
|
|
object addendum,
|
2007-12-09 22:45:43 +00:00
|
|
|
object loader,
|
|
|
|
unsigned vtableLength)
|
2007-09-26 23:23:03 +00:00
|
|
|
{
|
2007-12-11 21:26:59 +00:00
|
|
|
return vm::makeClass
|
2009-08-18 20:26:28 +00:00
|
|
|
(t, flags, vmFlags, fixedSize, arrayElementSize, arrayDimensions,
|
2009-08-27 22:26:25 +00:00
|
|
|
objectMask, name, sourceFile, super, interfaceTable, virtualTable,
|
2009-09-19 00:01:54 +00:00
|
|
|
fieldTable, methodTable, staticTable, addendum, loader, vtableLength);
|
2007-12-11 21:26:59 +00:00
|
|
|
}
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-11 21:26:59 +00:00
|
|
|
virtual void
|
|
|
|
initVtable(Thread* t, object c)
|
|
|
|
{
|
2009-06-04 23:20:55 +00:00
|
|
|
PROTECT(t, c);
|
2009-04-19 22:36:11 +00:00
|
|
|
for (int i = classLength(t, c) - 1; i >= 0; --i) {
|
2009-06-04 23:20:55 +00:00
|
|
|
void* thunk = reinterpret_cast<void*>
|
2009-04-07 00:34:12 +00:00
|
|
|
(virtualThunk(static_cast<MyThread*>(t), i));
|
2009-06-04 23:20:55 +00:00
|
|
|
classVtable(t, c, i) = thunk;
|
2007-09-26 23:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-05 00:27:54 +00:00
|
|
|
virtual bool
|
|
|
|
isInitializing(Thread* t, object c)
|
|
|
|
{
|
|
|
|
for (Thread::ClassInitStack* s = t->classInitStack; s; s = s->next) {
|
|
|
|
if (s->class_ == c) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
virtual void
|
2007-10-12 17:56:43 +00:00
|
|
|
visitObjects(Thread* vmt, Heap::Visitor* v)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-10-12 17:56:43 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (t == t->m->rootThread) {
|
2010-09-14 16:49:41 +00:00
|
|
|
v->visit(&roots);
|
2007-10-14 01:18:25 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
for (MyThread::CallTrace* trace = t->trace; trace; trace = trace->next) {
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(trace->continuation));
|
2008-04-07 23:47:41 +00:00
|
|
|
v->visit(&(trace->nativeMethod));
|
2009-04-27 01:53:42 +00:00
|
|
|
v->visit(&(trace->targetMethod));
|
2009-05-24 01:49:14 +00:00
|
|
|
v->visit(&(trace->originalMethod));
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(t->continuation));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
for (Reference* r = t->reference; r; r = r->next) {
|
|
|
|
v->visit(&(r->target));
|
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
visitStack(t, v);
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual void
|
|
|
|
walkStack(Thread* vmt, StackVisitor* v)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyStackWalker walker(t);
|
|
|
|
walker.walk(v);
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-10-04 22:41:19 +00:00
|
|
|
virtual int
|
2007-12-09 22:45:43 +00:00
|
|
|
lineNumber(Thread* vmt, object method, int ip)
|
2007-10-04 22:41:19 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
return findLineNumber(static_cast<MyThread*>(vmt), method, ip);
|
2007-10-04 22:41:19 +00:00
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
virtual object*
|
2007-09-30 03:33:38 +00:00
|
|
|
makeLocalReference(Thread* vmt, object o)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-09-30 03:33:38 +00:00
|
|
|
if (o) {
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2009-12-17 02:16:51 +00:00
|
|
|
|
|
|
|
for (Reference* r = t->reference; r; r = r->next) {
|
|
|
|
if (r->target == o) {
|
|
|
|
acquire(t, r);
|
|
|
|
|
|
|
|
return &(r->target);
|
|
|
|
}
|
|
|
|
}
|
2007-09-30 03:33:38 +00:00
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
Reference* r = new (t->m->heap->allocate(sizeof(Reference)))
|
2007-09-30 03:33:38 +00:00
|
|
|
Reference(o, &(t->reference));
|
|
|
|
|
2009-12-17 02:16:51 +00:00
|
|
|
acquire(t, r);
|
|
|
|
|
2007-09-30 03:33:38 +00:00
|
|
|
return &(r->target);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void
|
2007-09-30 03:33:38 +00:00
|
|
|
disposeLocalReference(Thread* t, object* r)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-09-30 03:33:38 +00:00
|
|
|
if (r) {
|
2009-12-17 02:16:51 +00:00
|
|
|
release(t, reinterpret_cast<Reference*>(r));
|
2007-09-30 03:33:38 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
virtual object
|
|
|
|
invokeArray(Thread* t, object method, object this_, object arguments)
|
|
|
|
{
|
2008-04-01 17:37:59 +00:00
|
|
|
if (UNLIKELY(t->exception)) return 0;
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
assert(t, t->state == Thread::ActiveState
|
|
|
|
or t->state == Thread::ExclusiveState);
|
|
|
|
|
|
|
|
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
|
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
method = findMethod(t, method, this_);
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
const char* spec = reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned size = methodParameterFootprint(t, method);
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uintptr_t, array, size);
|
|
|
|
RUNTIME_ARRAY(bool, objectMask, size);
|
|
|
|
ArgumentList list
|
|
|
|
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_, spec, arguments);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(static_cast<MyThread*>(t),
|
2009-08-27 00:26:44 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual object
|
|
|
|
invokeList(Thread* t, object method, object this_, bool indirectObjects,
|
|
|
|
va_list arguments)
|
|
|
|
{
|
2008-04-01 17:37:59 +00:00
|
|
|
if (UNLIKELY(t->exception)) return 0;
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
assert(t, t->state == Thread::ActiveState
|
|
|
|
or t->state == Thread::ExclusiveState);
|
|
|
|
|
|
|
|
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
|
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
method = findMethod(t, method, this_);
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
const char* spec = reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned size = methodParameterFootprint(t, method);
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uintptr_t, array, size);
|
|
|
|
RUNTIME_ARRAY(bool, objectMask, size);
|
2007-09-25 23:53:11 +00:00
|
|
|
ArgumentList list
|
2009-08-27 00:26:44 +00:00
|
|
|
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_, spec, indirectObjects, arguments);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(static_cast<MyThread*>(t),
|
2009-08-27 00:26:44 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
if (LIKELY(t->exception == 0)) {
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual object
|
2009-08-10 13:56:16 +00:00
|
|
|
invokeList(Thread* t, object loader, const char* className,
|
|
|
|
const char* methodName, const char* methodSpec,
|
|
|
|
object this_, va_list arguments)
|
2007-09-24 01:39:03 +00:00
|
|
|
{
|
2008-04-01 17:37:59 +00:00
|
|
|
if (UNLIKELY(t->exception)) return 0;
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
assert(t, t->state == Thread::ActiveState
|
|
|
|
or t->state == Thread::ExclusiveState);
|
|
|
|
|
2010-09-10 21:05:29 +00:00
|
|
|
unsigned size = parameterFootprint(t, methodSpec, this_ == 0);
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY(uintptr_t, array, size);
|
|
|
|
RUNTIME_ARRAY(bool, objectMask, size);
|
2007-09-25 23:53:11 +00:00
|
|
|
ArgumentList list
|
2009-08-27 00:26:44 +00:00
|
|
|
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_, methodSpec, false, arguments);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object method = resolveMethod
|
|
|
|
(t, loader, className, methodName, methodSpec);
|
2007-09-24 01:39:03 +00:00
|
|
|
if (LIKELY(t->exception == 0)) {
|
|
|
|
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(static_cast<MyThread*>(t),
|
2009-08-27 00:26:44 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (LIKELY(t->exception == 0)) {
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
virtual void dispose(Thread* vmt) {
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
|
2007-12-11 21:26:59 +00:00
|
|
|
while (t->reference) {
|
|
|
|
vm::dispose(t, t->reference);
|
|
|
|
}
|
2008-01-10 01:20:36 +00:00
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
t->arch->release();
|
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
t->m->heap->free(t, sizeof(*t));
|
2007-12-11 21:26:59 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual void dispose() {
|
2009-04-05 21:42:10 +00:00
|
|
|
if (codeAllocator.base) {
|
|
|
|
s->freeExecutable(codeAllocator.base, codeAllocator.capacity);
|
|
|
|
}
|
|
|
|
|
2008-01-10 01:20:36 +00:00
|
|
|
s->handleSegFault(0);
|
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
allocator->free(this, sizeof(*this));
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
|
|
|
|
virtual object getStackTrace(Thread* vmt, Thread* vmTarget) {
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
MyThread* target = static_cast<MyThread*>(vmTarget);
|
2008-12-02 02:38:00 +00:00
|
|
|
MyProcessor* p = this;
|
2008-04-22 15:31:40 +00:00
|
|
|
|
2008-04-09 19:08:13 +00:00
|
|
|
class Visitor: public System::ThreadVisitor {
|
|
|
|
public:
|
2008-05-31 22:14:27 +00:00
|
|
|
Visitor(MyThread* t, MyProcessor* p, MyThread* target):
|
2009-11-25 02:15:27 +00:00
|
|
|
t(t), p(p), target(target), trace(0)
|
2008-05-31 22:14:27 +00:00
|
|
|
{ }
|
2008-04-09 19:08:13 +00:00
|
|
|
|
|
|
|
virtual void visit(void* ip, void* base, void* stack) {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
MyThread::TraceContext c(target);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2008-04-21 17:29:36 +00:00
|
|
|
if (methodForIp(t, ip)) {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in Java code - use the register values
|
|
|
|
c.ip = ip;
|
|
|
|
c.base = base;
|
|
|
|
c.stack = stack;
|
|
|
|
} else if (target->transition) {
|
|
|
|
// we caught the thread in native code while in the middle
|
|
|
|
// of updating the context fields (MyThread::stack,
|
|
|
|
// MyThread::base, etc.)
|
|
|
|
static_cast<MyThread::Context&>(c) = *(target->transition);
|
|
|
|
} else if (isVmInvokeUnsafeStack(ip)) {
|
|
|
|
// we caught the thread in native code just after returning
|
|
|
|
// from java code, but before clearing MyThread::stack
|
|
|
|
// (which now contains a garbage value), and the most recent
|
|
|
|
// Java frame, if any, can be found in
|
|
|
|
// MyThread::continuation or MyThread::trace
|
|
|
|
c.ip = 0;
|
|
|
|
c.base = 0;
|
|
|
|
c.stack = 0;
|
2010-07-06 22:13:11 +00:00
|
|
|
} else if (target->stack
|
|
|
|
and (not isThunkUnsafeStack(t, ip))
|
|
|
|
and (not isVirtualThunk(t, ip)))
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in a thunk or native code, and the
|
|
|
|
// saved stack and base pointers indicate the most recent
|
|
|
|
// Java frame on the stack
|
|
|
|
c.ip = t->arch->frameIp(target->stack);
|
|
|
|
c.base = target->base;
|
|
|
|
c.stack = target->stack;
|
2010-07-06 22:13:11 +00:00
|
|
|
} else if (isThunk(t, ip) or isVirtualThunk(t, ip)) {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in a thunk where the stack and base
|
|
|
|
// registers indicate the most recent Java frame on the
|
|
|
|
// stack
|
|
|
|
c.ip = t->arch->frameIp(stack);
|
|
|
|
c.base = base;
|
|
|
|
c.stack = stack;
|
2008-04-22 15:31:40 +00:00
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in native code, and the most recent
|
|
|
|
// Java frame, if any, can be found in
|
|
|
|
// MyThread::continuation or MyThread::trace
|
|
|
|
c.ip = 0;
|
|
|
|
c.base = 0;
|
|
|
|
c.stack = 0;
|
2008-04-21 17:29:36 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2010-06-19 22:40:21 +00:00
|
|
|
if (ensure(t, traceSize(target))) {
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicOr(&(t->flags), Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
trace = makeTrace(t, target);
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicAnd(&(t->flags), ~Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
2008-05-31 22:14:27 +00:00
|
|
|
MyProcessor* p;
|
2008-04-09 19:08:13 +00:00
|
|
|
MyThread* target;
|
|
|
|
object trace;
|
2008-05-31 22:14:27 +00:00
|
|
|
} visitor(t, p, target);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2008-04-21 22:36:13 +00:00
|
|
|
t->m->system->visit(t->systemThread, target->systemThread, &visitor);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
2008-04-09 19:08:13 +00:00
|
|
|
PROTECT(t, visitor.trace);
|
|
|
|
|
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
|
|
|
|
2009-07-25 01:03:33 +00:00
|
|
|
return visitor.trace ? visitor.trace : makeArray(t, 0);
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2009-06-01 03:16:58 +00:00
|
|
|
virtual void initialize(BootImage* image, uint8_t* code, unsigned capacity) {
|
|
|
|
bootImage = image;
|
2009-04-05 21:42:10 +00:00
|
|
|
codeAllocator.base = code;
|
|
|
|
codeAllocator.capacity = capacity;
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
virtual void compileMethod(Thread* vmt, Zone* zone, object* constants,
|
|
|
|
object* calls, DelayedPromise** addresses,
|
|
|
|
object method)
|
2008-11-23 23:58:01 +00:00
|
|
|
{
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2008-12-02 16:45:20 +00:00
|
|
|
BootContext bootContext(t, *constants, *calls, *addresses, zone);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(t, &codeAllocator, &bootContext, method);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
*constants = bootContext.constants;
|
|
|
|
*calls = bootContext.calls;
|
2008-12-02 16:45:20 +00:00
|
|
|
*addresses = bootContext.addresses;
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
virtual void visitRoots(Thread* t, HeapWalker* w) {
|
|
|
|
bootImage->methodTree = w->visitRoot(root(t, MethodTree));
|
|
|
|
bootImage->methodTreeSentinal = w->visitRoot(root(t, MethodTreeSentinal));
|
|
|
|
bootImage->virtualThunks = w->visitRoot(root(t, VirtualThunks));
|
2008-11-28 22:02:45 +00:00
|
|
|
}
|
|
|
|
|
2009-06-01 03:16:58 +00:00
|
|
|
virtual unsigned* makeCallTable(Thread* t, HeapWalker* w) {
|
|
|
|
bootImage->codeSize = codeAllocator.offset;
|
|
|
|
bootImage->callCount = callTableSize;
|
2008-11-28 22:02:45 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned* table = static_cast<unsigned*>
|
|
|
|
(t->m->heap->allocate(callTableSize * sizeof(unsigned) * 2));
|
2008-11-29 23:08:14 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned index = 0;
|
2010-09-14 16:49:41 +00:00
|
|
|
for (unsigned i = 0; i < arrayLength(t, root(t, CallTable)); ++i) {
|
|
|
|
for (object p = arrayBody(t, root(t, CallTable), i);
|
|
|
|
p; p = callNodeNext(t, p))
|
|
|
|
{
|
2008-12-02 02:38:00 +00:00
|
|
|
table[index++] = callNodeAddress(t, p)
|
2009-06-01 03:16:58 +00:00
|
|
|
- reinterpret_cast<uintptr_t>(codeAllocator.base);
|
2008-12-02 02:38:00 +00:00
|
|
|
table[index++] = w->map()->find(callNodeTarget(t, p))
|
2009-04-07 00:34:12 +00:00
|
|
|
| (static_cast<unsigned>(callNodeFlags(t, p)) << BootShift);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
2008-11-29 23:08:14 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
return table;
|
|
|
|
}
|
2008-11-28 22:02:45 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
virtual void boot(Thread* t, BootImage* image) {
|
2009-06-01 03:16:58 +00:00
|
|
|
if (codeAllocator.base == 0) {
|
|
|
|
codeAllocator.base = static_cast<uint8_t*>
|
|
|
|
(s->tryAllocateExecutable(ExecutableAreaSizeInBytes));
|
|
|
|
codeAllocator.capacity = ExecutableAreaSizeInBytes;
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
roots = makeArray(t, RootCount);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
if (image) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::boot(static_cast<MyThread*>(t), image);
|
2008-12-02 02:38:00 +00:00
|
|
|
} else {
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, CallTable, makeArray(t, 128));
|
|
|
|
|
|
|
|
setRoot(t, MethodTreeSentinal, makeTreeNode(t, 0, 0, 0));
|
|
|
|
setRoot(t, MethodTree, root(t, MethodTreeSentinal));
|
|
|
|
set(t, root(t, MethodTree), TreeNodeLeft,
|
|
|
|
root(t, MethodTreeSentinal));
|
|
|
|
set(t, root(t, MethodTree), TreeNodeRight,
|
|
|
|
root(t, MethodTreeSentinal));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
2008-11-28 22:02:45 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
local::compileThunks(static_cast<MyThread*>(t), &codeAllocator, this);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
segFaultHandler.m = t->m;
|
|
|
|
expect(t, t->m->system->success
|
|
|
|
(t->m->system->handleSegFault(&segFaultHandler)));
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
virtual void callWithCurrentContinuation(Thread* t, object receiver) {
|
|
|
|
if (Continuations) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::callWithCurrentContinuation(static_cast<MyThread*>(t), receiver);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
2009-05-26 05:27:10 +00:00
|
|
|
abort(t);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
virtual void dynamicWind(Thread* t, object before, object thunk,
|
2009-05-23 22:15:06 +00:00
|
|
|
object after)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::dynamicWind(static_cast<MyThread*>(t), before, thunk, after);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
2009-05-26 05:27:10 +00:00
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
virtual void feedResultToContinuation(Thread* t, object continuation,
|
2009-05-23 22:15:06 +00:00
|
|
|
object result)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
|
|
|
callContinuation(static_cast<MyThread*>(t), continuation, result, 0);
|
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
virtual void feedExceptionToContinuation(Thread* t, object continuation,
|
2009-05-23 22:15:06 +00:00
|
|
|
object exception)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
|
|
|
callContinuation(static_cast<MyThread*>(t), continuation, 0, exception);
|
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
}
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
virtual void walkContinuationBody(Thread* t, Heap::Walker* w, object o,
|
|
|
|
unsigned start)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::walkContinuationBody(static_cast<MyThread*>(t), w, o, start);
|
2009-05-26 05:27:10 +00:00
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
2007-09-24 01:39:03 +00:00
|
|
|
|
|
|
|
System* s;
|
2008-01-13 22:05:08 +00:00
|
|
|
Allocator* allocator;
|
2010-09-14 16:49:41 +00:00
|
|
|
object roots;
|
2009-10-10 23:46:43 +00:00
|
|
|
BootImage* bootImage;
|
2007-12-30 22:24:48 +00:00
|
|
|
SegFaultHandler segFaultHandler;
|
2009-04-05 21:42:10 +00:00
|
|
|
FixedAllocator codeAllocator;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
ThunkCollection thunks;
|
|
|
|
ThunkCollection bootThunks;
|
2009-10-10 23:46:43 +00:00
|
|
|
unsigned callTableSize;
|
|
|
|
bool useNativeFeatures;
|
2007-09-24 01:39:03 +00:00
|
|
|
};
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
bool
|
2010-06-17 02:29:41 +00:00
|
|
|
isThunk(MyProcessor::ThunkCollection* thunks, void* ip)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
2010-06-17 02:29:41 +00:00
|
|
|
uint8_t* thunkStart = thunks->default_.start;
|
|
|
|
uint8_t* thunkEnd = thunks->table.start
|
|
|
|
+ (thunks->table.length * ThunkCount);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
return (reinterpret_cast<uintptr_t>(ip)
|
|
|
|
>= reinterpret_cast<uintptr_t>(thunkStart)
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
2010-06-17 02:29:41 +00:00
|
|
|
< reinterpret_cast<uintptr_t>(thunkEnd));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isThunk(MyThread* t, void* ip)
|
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
|
|
|
return isThunk(&(p->thunks), ip) or isThunk(&(p->bootThunks), ip);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isThunkUnsafeStack(MyProcessor::Thunk* thunk, void* ip)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(ip)
|
|
|
|
>= reinterpret_cast<uintptr_t>(thunk->start)
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
|
|
|
< reinterpret_cast<uintptr_t>(thunk->start + thunk->frameSavedOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2010-06-17 02:29:41 +00:00
|
|
|
isThunkUnsafeStack(MyProcessor::ThunkCollection* thunks, void* ip)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
2010-06-17 02:29:41 +00:00
|
|
|
const unsigned NamedThunkCount = 4;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
MyProcessor::Thunk table[NamedThunkCount + ThunkCount];
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
table[0] = thunks->default_;
|
|
|
|
table[1] = thunks->defaultVirtual;
|
|
|
|
table[2] = thunks->native;
|
|
|
|
table[3] = thunks->aioob;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
for (unsigned i = 0; i < ThunkCount; ++i) {
|
|
|
|
new (table + NamedThunkCount + i) MyProcessor::Thunk
|
|
|
|
(thunks->table.start + (i * thunks->table.length),
|
|
|
|
thunks->table.frameSavedOffset,
|
|
|
|
thunks->table.length);
|
|
|
|
}
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
for (unsigned i = 0; i < NamedThunkCount + ThunkCount; ++i) {
|
|
|
|
if (isThunkUnsafeStack(table + i, ip)) {
|
|
|
|
return true;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-07-06 22:13:11 +00:00
|
|
|
bool
|
|
|
|
isVirtualThunk(MyThread* t, void* ip)
|
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
for (unsigned i = 0; i < wordArrayLength(t, root(t, VirtualThunks)); i += 2)
|
|
|
|
{
|
|
|
|
uintptr_t start = wordArrayBody(t, root(t, VirtualThunks), i);
|
|
|
|
uintptr_t end = start + wordArrayBody(t, root(t, VirtualThunks), i + 1);
|
2010-07-06 22:13:11 +00:00
|
|
|
|
|
|
|
if (reinterpret_cast<uintptr_t>(ip) >= start
|
|
|
|
and reinterpret_cast<uintptr_t>(ip) < end)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
bool
|
|
|
|
isThunkUnsafeStack(MyThread* t, void* ip)
|
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
|
|
|
return isThunk(t, ip)
|
|
|
|
and (isThunkUnsafeStack(&(p->thunks), ip)
|
|
|
|
or isThunkUnsafeStack(&(p->bootThunks), ip));
|
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
object
|
|
|
|
findCallNode(MyThread* t, void* address)
|
2008-05-31 22:14:27 +00:00
|
|
|
{
|
2008-12-02 02:38:00 +00:00
|
|
|
if (DebugCallTable) {
|
|
|
|
fprintf(stderr, "find call node %p\n", address);
|
|
|
|
}
|
|
|
|
|
2009-03-03 01:40:06 +00:00
|
|
|
// we must use a version of the call table at least as recent as the
|
|
|
|
// compiled form of the method containing the specified address (see
|
|
|
|
// compile(MyThread*, Allocator*, BootContext*, object)):
|
2009-11-30 15:38:16 +00:00
|
|
|
loadMemoryBarrier();
|
2009-03-03 01:40:06 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
object table = root(t, CallTable);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
intptr_t key = reinterpret_cast<intptr_t>(address);
|
2009-10-18 00:18:03 +00:00
|
|
|
unsigned index = static_cast<uintptr_t>(key) & (arrayLength(t, table) - 1);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
for (object n = arrayBody(t, table, index);
|
|
|
|
n; n = callNodeNext(t, n))
|
|
|
|
{
|
|
|
|
intptr_t k = callNodeAddress(t, n);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
if (k == key) {
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
object
|
|
|
|
resizeTable(MyThread* t, object oldTable, unsigned newLength)
|
|
|
|
{
|
|
|
|
PROTECT(t, oldTable);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
object oldNode = 0;
|
|
|
|
PROTECT(t, oldNode);
|
2008-08-16 18:46:14 +00:00
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
object newTable = makeArray(t, newLength);
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, newTable);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
for (unsigned i = 0; i < arrayLength(t, oldTable); ++i) {
|
|
|
|
for (oldNode = arrayBody(t, oldTable, i);
|
|
|
|
oldNode;
|
|
|
|
oldNode = callNodeNext(t, oldNode))
|
|
|
|
{
|
|
|
|
intptr_t k = callNodeAddress(t, oldNode);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned index = k & (newLength - 1);
|
|
|
|
|
|
|
|
object newNode = makeCallNode
|
|
|
|
(t, callNodeAddress(t, oldNode),
|
|
|
|
callNodeTarget(t, oldNode),
|
2009-03-31 20:15:08 +00:00
|
|
|
callNodeFlags(t, oldNode),
|
2008-12-02 02:38:00 +00:00
|
|
|
arrayBody(t, newTable, index));
|
|
|
|
|
|
|
|
set(t, newTable, ArrayBody + (index * BytesPerWord), newNode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return newTable;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
insertCallNode(MyThread* t, object table, unsigned* size, object node)
|
|
|
|
{
|
|
|
|
if (DebugCallTable) {
|
|
|
|
fprintf(stderr, "insert call node %p\n",
|
|
|
|
reinterpret_cast<void*>(callNodeAddress(t, node)));
|
|
|
|
}
|
|
|
|
|
|
|
|
PROTECT(t, table);
|
|
|
|
PROTECT(t, node);
|
|
|
|
|
|
|
|
++ (*size);
|
|
|
|
|
|
|
|
if (*size >= arrayLength(t, table) * 2) {
|
|
|
|
table = resizeTable(t, table, arrayLength(t, table) * 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
intptr_t key = callNodeAddress(t, node);
|
|
|
|
unsigned index = static_cast<uintptr_t>(key) & (arrayLength(t, table) - 1);
|
|
|
|
|
|
|
|
set(t, node, CallNodeNext, arrayBody(t, table, index));
|
|
|
|
set(t, table, ArrayBody + (index * BytesPerWord), node);
|
|
|
|
|
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
insertCallNode(MyThread* t, object node)
|
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, CallTable, insertCallNode
|
|
|
|
(t, root(t, CallTable), &(processor(t)->callTableSize), node));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
makeClassMap(Thread* t, unsigned* table, unsigned count, uintptr_t* heap)
|
|
|
|
{
|
2009-03-04 03:05:48 +00:00
|
|
|
object array = makeArray(t, nextPowerOfTwo(count));
|
2008-12-02 02:38:00 +00:00
|
|
|
object map = makeHashMap(t, 0, array);
|
|
|
|
PROTECT(t, map);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
|
|
|
object c = bootObject(heap, table[i]);
|
|
|
|
hashMapInsert(t, map, className(t, c), c, byteArrayHash);
|
|
|
|
}
|
|
|
|
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
2010-09-14 16:49:41 +00:00
|
|
|
makeStaticTableArray(Thread* t, unsigned* bootTable, unsigned bootCount,
|
|
|
|
unsigned* appTable, unsigned appCount, uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
object array = makeArray(t, bootCount + appCount);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
for (unsigned i = 0; i < bootCount; ++i) {
|
2008-12-02 02:38:00 +00:00
|
|
|
set(t, array, ArrayBody + (i * BytesPerWord),
|
2010-09-14 16:49:41 +00:00
|
|
|
classStaticTable(t, bootObject(heap, bootTable[i])));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < appCount; ++i) {
|
|
|
|
set(t, array, ArrayBody + ((bootCount + i) * BytesPerWord),
|
|
|
|
classStaticTable(t, bootObject(heap, appTable[i])));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
makeStringMap(Thread* t, unsigned* table, unsigned count, uintptr_t* heap)
|
|
|
|
{
|
2009-03-04 03:05:48 +00:00
|
|
|
object array = makeArray(t, nextPowerOfTwo(count));
|
2008-12-02 02:38:00 +00:00
|
|
|
object map = makeWeakHashMap(t, 0, array);
|
|
|
|
PROTECT(t, map);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
|
|
|
object s = bootObject(heap, table[i]);
|
|
|
|
hashMapInsert(t, map, s, 0, stringHash);
|
|
|
|
}
|
|
|
|
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
makeCallTable(MyThread* t, uintptr_t* heap, unsigned* calls, unsigned count,
|
|
|
|
uintptr_t base)
|
|
|
|
{
|
2009-03-04 03:05:48 +00:00
|
|
|
object table = makeArray(t, nextPowerOfTwo(count));
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, table);
|
|
|
|
|
|
|
|
unsigned size = 0;
|
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
|
|
|
unsigned address = calls[i * 2];
|
|
|
|
unsigned target = calls[(i * 2) + 1];
|
|
|
|
|
|
|
|
object node = makeCallNode
|
|
|
|
(t, base + address, bootObject(heap, target & BootMask),
|
|
|
|
target >> BootShift, 0);
|
|
|
|
|
|
|
|
table = insertCallNode(t, table, &size, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-12-03 02:39:56 +00:00
|
|
|
fixupHeap(MyThread* t UNUSED, uintptr_t* map, unsigned size, uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
|
|
|
for (unsigned word = 0; word < size; ++word) {
|
|
|
|
uintptr_t w = map[word];
|
|
|
|
if (w) {
|
|
|
|
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
|
|
|
|
if (w & (static_cast<uintptr_t>(1) << bit)) {
|
|
|
|
unsigned index = indexOf(word, bit);
|
|
|
|
uintptr_t* p = heap + index;
|
|
|
|
assert(t, *p);
|
|
|
|
|
|
|
|
uintptr_t number = *p & BootMask;
|
|
|
|
uintptr_t mark = *p >> BootShift;
|
|
|
|
|
|
|
|
if (number) {
|
|
|
|
*p = reinterpret_cast<uintptr_t>(heap + (number - 1)) | mark;
|
|
|
|
} else {
|
|
|
|
*p = mark;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-03-10 00:52:09 +00:00
|
|
|
fixupCode(Thread* t, uintptr_t* map, unsigned size, uint8_t* code,
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t* heap)
|
|
|
|
{
|
2009-10-10 23:46:43 +00:00
|
|
|
Assembler::Architecture* arch = makeArchitecture(t->m->system, false);
|
2009-03-10 00:52:09 +00:00
|
|
|
arch->acquire();
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
for (unsigned word = 0; word < size; ++word) {
|
|
|
|
uintptr_t w = map[word];
|
|
|
|
if (w) {
|
|
|
|
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
|
|
|
|
if (w & (static_cast<uintptr_t>(1) << bit)) {
|
|
|
|
unsigned index = indexOf(word, bit);
|
2009-03-11 01:08:16 +00:00
|
|
|
uintptr_t oldValue; memcpy(&oldValue, code + index, BytesPerWord);
|
|
|
|
uintptr_t newValue;
|
|
|
|
if (oldValue & BootHeapOffset) {
|
|
|
|
newValue = reinterpret_cast<uintptr_t>
|
|
|
|
(heap + (oldValue & BootMask) - 1);
|
2008-12-02 16:45:20 +00:00
|
|
|
} else {
|
2009-03-11 01:08:16 +00:00
|
|
|
newValue = reinterpret_cast<uintptr_t>
|
|
|
|
(code + (oldValue & BootMask));
|
|
|
|
}
|
|
|
|
if (oldValue & BootFlatConstant) {
|
|
|
|
memcpy(code + index, &newValue, BytesPerWord);
|
|
|
|
} else {
|
|
|
|
arch->setConstant(code + index, newValue);
|
2008-12-02 16:45:20 +00:00
|
|
|
}
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-03-10 00:52:09 +00:00
|
|
|
|
|
|
|
arch->release();
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-14 16:49:41 +00:00
|
|
|
fixupMethods(Thread* t, object map, BootImage* image, uint8_t* code)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
for (HashMapIterator it(t, map); it.hasMore();) {
|
2008-12-02 02:38:00 +00:00
|
|
|
object c = tripleSecond(t, it.next());
|
|
|
|
|
|
|
|
if (classMethodTable(t, c)) {
|
|
|
|
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
|
|
|
|
object method = arrayBody(t, classMethodTable(t, c), i);
|
|
|
|
if (methodCode(t, method) or (methodFlags(t, method) & ACC_NATIVE)) {
|
|
|
|
assert(t, (methodCompiled(t, method) - image->codeBase)
|
|
|
|
<= image->codeSize);
|
|
|
|
|
|
|
|
methodCompiled(t, method)
|
|
|
|
= (methodCompiled(t, method) - image->codeBase)
|
|
|
|
+ reinterpret_cast<uintptr_t>(code);
|
|
|
|
|
|
|
|
if (DebugCompile and (methodFlags(t, method) & ACC_NATIVE) == 0) {
|
|
|
|
logCompile
|
|
|
|
(static_cast<MyThread*>(t),
|
|
|
|
reinterpret_cast<uint8_t*>(methodCompiled(t, method)),
|
|
|
|
reinterpret_cast<uintptr_t*>
|
|
|
|
(methodCompiled(t, method))[-1],
|
|
|
|
reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, method)), 0)),
|
|
|
|
reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, method), 0)),
|
|
|
|
reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t->m->processor->initVtable(t, c);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
MyProcessor::Thunk
|
|
|
|
thunkToThunk(const BootImage::Thunk& thunk, uint8_t* base)
|
|
|
|
{
|
|
|
|
return MyProcessor::Thunk
|
|
|
|
(base + thunk.start, thunk.frameSavedOffset, thunk.length);
|
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
void
|
|
|
|
fixupThunks(MyThread* t, BootImage* image, uint8_t* code)
|
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->bootThunks.default_ = thunkToThunk(image->thunks.default_, code);
|
|
|
|
p->bootThunks.defaultVirtual
|
|
|
|
= thunkToThunk(image->thunks.defaultVirtual, code);
|
|
|
|
p->bootThunks.native = thunkToThunk(image->thunks.native, code);
|
|
|
|
p->bootThunks.aioob = thunkToThunk(image->thunks.aioob, code);
|
|
|
|
p->bootThunks.table = thunkToThunk(image->thunks.table, code);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
updateCall(t, LongCall, code + image->compileMethodCall,
|
2009-08-27 00:26:44 +00:00
|
|
|
voidPointer(local::compileMethod));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
updateCall(t, LongCall, code + image->compileVirtualMethodCall,
|
2009-08-27 00:26:44 +00:00
|
|
|
voidPointer(local::compileVirtualMethod));
|
2009-04-07 00:34:12 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
updateCall(t, LongCall, code + image->invokeNativeCall,
|
2008-12-02 02:38:00 +00:00
|
|
|
voidPointer(invokeNative));
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
updateCall(t, LongCall, code + image->throwArrayIndexOutOfBoundsCall,
|
2008-12-02 02:38:00 +00:00
|
|
|
voidPointer(throwArrayIndexOutOfBounds));
|
|
|
|
|
|
|
|
#define THUNK(s) \
|
2009-10-18 00:18:03 +00:00
|
|
|
updateCall(t, LongJump, code + image->s##Call, voidPointer(s));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
|
|
|
#include "thunks.cpp"
|
|
|
|
|
|
|
|
#undef THUNK
|
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
void
|
|
|
|
fixupVirtualThunks(MyThread* t, BootImage* image, uint8_t* code)
|
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
for (unsigned i = 0; i < wordArrayLength(t, root(t, VirtualThunks)); i += 2) {
|
|
|
|
if (wordArrayBody(t, root(t, VirtualThunks), i)) {
|
|
|
|
wordArrayBody(t, root(t, VirtualThunks), i)
|
|
|
|
= (wordArrayBody(t, root(t, VirtualThunks), i) - image->codeBase)
|
2009-04-05 21:42:10 +00:00
|
|
|
+ reinterpret_cast<uintptr_t>(code);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
void
|
|
|
|
boot(MyThread* t, BootImage* image)
|
|
|
|
{
|
|
|
|
assert(t, image->magic == BootImage::Magic);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
unsigned* bootClassTable = reinterpret_cast<unsigned*>(image + 1);
|
|
|
|
unsigned* appClassTable = bootClassTable + image->bootClassCount;
|
|
|
|
unsigned* stringTable = appClassTable + image->appClassCount;
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned* callTable = stringTable + image->stringCount;
|
|
|
|
|
|
|
|
uintptr_t* heapMap = reinterpret_cast<uintptr_t*>
|
2009-10-18 00:35:19 +00:00
|
|
|
(padWord(reinterpret_cast<uintptr_t>(callTable + (image->callCount * 2))));
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned heapMapSizeInWords = ceiling
|
|
|
|
(heapMapSize(image->heapSize), BytesPerWord);
|
|
|
|
uintptr_t* heap = heapMap + heapMapSizeInWords;
|
|
|
|
|
|
|
|
// fprintf(stderr, "heap from %p to %p\n",
|
|
|
|
// heap, heap + ceiling(image->heapSize, BytesPerWord));
|
|
|
|
|
|
|
|
uintptr_t* codeMap = heap + ceiling(image->heapSize, BytesPerWord);
|
|
|
|
unsigned codeMapSizeInWords = ceiling
|
|
|
|
(codeMapSize(image->codeSize), BytesPerWord);
|
|
|
|
uint8_t* code = reinterpret_cast<uint8_t*>(codeMap + codeMapSizeInWords);
|
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
// fprintf(stderr, "code from %p to %p\n",
|
|
|
|
// code, code + image->codeSize);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
|
|
|
fixupHeap(t, heapMap, heapMapSizeInWords, heap);
|
|
|
|
|
|
|
|
t->m->heap->setImmortalHeap(heap, image->heapSize / BytesPerWord);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, Machine::BootLoader, bootObject(heap, image->bootLoader));
|
|
|
|
setRoot(t, Machine::AppLoader, bootObject(heap, image->appLoader));
|
2008-12-02 02:38:00 +00:00
|
|
|
t->m->types = bootObject(heap, image->types);
|
|
|
|
|
|
|
|
MyProcessor* p = static_cast<MyProcessor*>(t->m->processor);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, MethodTree, bootObject(heap, image->methodTree));
|
|
|
|
setRoot(t, MethodTreeSentinal, bootObject(heap, image->methodTreeSentinal));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, VirtualThunks, bootObject(heap, image->virtualThunks));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
fixupCode(t, codeMap, codeMapSizeInWords, code, heap);
|
|
|
|
|
2009-03-11 01:08:16 +00:00
|
|
|
syncInstructionCache(code, image->codeSize);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, Machine::ClassMap, makeClassMap
|
|
|
|
(t, bootClassTable, image->bootClassCount, heap));
|
2009-08-10 13:56:16 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
set(t, root(t, Machine::AppLoader), ClassLoaderMap, makeClassMap
|
|
|
|
(t, appClassTable, image->appClassCount, heap));
|
|
|
|
|
|
|
|
setRoot(t, Machine::StringMap, makeStringMap
|
|
|
|
(t, stringTable, image->stringCount, heap));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
|
|
|
p->callTableSize = image->callCount;
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, CallTable, makeCallTable
|
|
|
|
(t, heap, callTable, image->callCount,
|
|
|
|
reinterpret_cast<uintptr_t>(code)));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, StaticTableArray, makeStaticTableArray
|
|
|
|
(t, bootClassTable, image->bootClassCount,
|
|
|
|
appClassTable, image->appClassCount, heap));
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
fixupThunks(t, image, code);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
fixupVirtualThunks(t, image, code);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
fixupMethods(t, root(t, Machine::ClassMap), image, code);
|
|
|
|
fixupMethods(t, classLoaderMap(t, root(t, Machine::AppLoader)), image, code);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, Machine::BootstrapClassMap, makeHashMap(t, 0, 0));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
intptr_t
|
|
|
|
getThunk(MyThread* t, Thunk thunk)
|
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
|
|
|
return reinterpret_cast<intptr_t>
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
(p->thunks.table.start + (thunk * p->thunks.table.length));
|
|
|
|
}
|
|
|
|
|
|
|
|
BootImage::Thunk
|
|
|
|
thunkToThunk(const MyProcessor::Thunk& thunk, uint8_t* base)
|
|
|
|
{
|
|
|
|
return BootImage::Thunk
|
|
|
|
(thunk.start - base, thunk.frameSavedOffset, thunk.length);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-06-01 03:16:58 +00:00
|
|
|
compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p)
|
2008-05-31 22:14:27 +00:00
|
|
|
{
|
|
|
|
class ThunkContext {
|
|
|
|
public:
|
2008-11-27 20:59:40 +00:00
|
|
|
ThunkContext(MyThread* t, Zone* zone):
|
|
|
|
context(t), promise(t->m->system, zone)
|
|
|
|
{ }
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
Context context;
|
2008-11-27 20:59:40 +00:00
|
|
|
ListenPromise promise;
|
2008-05-31 22:14:27 +00:00
|
|
|
};
|
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
Zone zone(t->m->system, t->m->heap, 1024);
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
ThunkContext defaultContext(t, &zone);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
{ Assembler* a = defaultContext.context.assembler;
|
2009-02-09 23:22:01 +00:00
|
|
|
|
|
|
|
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.default_.frameSavedOffset = a->length();
|
|
|
|
|
2009-02-09 23:22:01 +00:00
|
|
|
Assembler::Register thread(t->arch->thread());
|
|
|
|
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
Assembler::Constant proc(&(defaultContext.promise));
|
2008-06-02 13:49:09 +00:00
|
|
|
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-02-09 23:22:01 +00:00
|
|
|
a->popFrame();
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
Assembler::Register result(t->arch->returnLow());
|
2008-05-31 22:14:27 +00:00
|
|
|
a->apply(Jump, BytesPerWord, RegisterOperand, &result);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2008-09-09 00:31:19 +00:00
|
|
|
a->endBlock(false)->resolve(0, 0);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
p->thunks.default_.length = a->length();
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
ThunkContext defaultVirtualContext(t, &zone);
|
|
|
|
|
|
|
|
{ Assembler* a = defaultVirtualContext.context.assembler;
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
Assembler::Register class_(t->arch->virtualCallTarget());
|
|
|
|
Assembler::Memory virtualCallTargetSrc
|
|
|
|
(t->arch->stack(),
|
2009-05-12 18:16:55 +00:00
|
|
|
(t->arch->frameFooterSize() + t->arch->frameReturnAddressSize())
|
|
|
|
* BytesPerWord);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
a->apply(Move, BytesPerWord, MemoryOperand, &virtualCallTargetSrc,
|
|
|
|
BytesPerWord, RegisterOperand, &class_);
|
|
|
|
|
|
|
|
Assembler::Memory virtualCallTargetDst
|
|
|
|
(t->arch->thread(), difference(&(t->virtualCallTarget), t));
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
a->apply(Move, BytesPerWord, RegisterOperand, &class_,
|
2009-05-03 20:57:11 +00:00
|
|
|
BytesPerWord, MemoryOperand, &virtualCallTargetDst);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2009-04-19 22:36:11 +00:00
|
|
|
Assembler::Register index(t->arch->virtualCallIndex());
|
2009-04-05 21:42:10 +00:00
|
|
|
Assembler::Memory virtualCallIndex
|
|
|
|
(t->arch->thread(), difference(&(t->virtualCallIndex), t));
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
a->apply(Move, BytesPerWord, RegisterOperand, &index,
|
|
|
|
BytesPerWord, MemoryOperand, &virtualCallIndex);
|
|
|
|
|
|
|
|
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.defaultVirtual.frameSavedOffset = a->length();
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
Assembler::Register thread(t->arch->thread());
|
|
|
|
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
|
|
|
|
|
|
|
|
Assembler::Constant proc(&(defaultVirtualContext.promise));
|
|
|
|
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
|
|
|
|
|
|
|
a->popFrame();
|
|
|
|
|
|
|
|
Assembler::Register result(t->arch->returnLow());
|
|
|
|
a->apply(Jump, BytesPerWord, RegisterOperand, &result);
|
|
|
|
|
|
|
|
a->endBlock(false)->resolve(0, 0);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
p->thunks.defaultVirtual.length = a->length();
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
ThunkContext nativeContext(t, &zone);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
{ Assembler* a = nativeContext.context.assembler;
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2008-08-16 18:46:14 +00:00
|
|
|
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.native.frameSavedOffset = a->length();
|
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
Assembler::Register thread(t->arch->thread());
|
2008-08-16 18:46:14 +00:00
|
|
|
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
Assembler::Constant proc(&(nativeContext.promise));
|
2008-06-02 13:49:09 +00:00
|
|
|
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-04-25 17:49:56 +00:00
|
|
|
a->popFrameAndUpdateStackAndReturn(difference(&(t->stack), t));
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2008-09-09 00:31:19 +00:00
|
|
|
a->endBlock(false)->resolve(0, 0);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
p->thunks.native.length = a->length();
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
ThunkContext aioobContext(t, &zone);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
{ Assembler* a = aioobContext.context.assembler;
|
|
|
|
|
2008-08-16 18:46:14 +00:00
|
|
|
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.aioob.frameSavedOffset = a->length();
|
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
Assembler::Register thread(t->arch->thread());
|
2008-08-16 18:46:14 +00:00
|
|
|
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
Assembler::Constant proc(&(aioobContext.promise));
|
2008-06-02 13:49:09 +00:00
|
|
|
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2008-09-09 00:31:19 +00:00
|
|
|
a->endBlock(false)->resolve(0, 0);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
p->thunks.aioob.length = a->length();
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
ThunkContext tableContext(t, &zone);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
{ Assembler* a = tableContext.context.assembler;
|
|
|
|
|
2008-08-16 18:46:14 +00:00
|
|
|
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
|
2008-05-31 22:14:27 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.table.frameSavedOffset = a->length();
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
Assembler::Constant proc(&(tableContext.promise));
|
2008-06-02 13:49:09 +00:00
|
|
|
a->apply(LongJump, BytesPerWord, ConstantOperand, &proc);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2008-09-09 00:31:19 +00:00
|
|
|
a->endBlock(false)->resolve(0, 0);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.table.length = a->length();
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.default_.start = finish
|
2008-11-23 23:58:01 +00:00
|
|
|
(t, allocator, defaultContext.context.assembler, "default");
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-06-01 03:16:58 +00:00
|
|
|
BootImage* image = p->bootImage;
|
|
|
|
uint8_t* imageBase = p->codeAllocator.base;
|
|
|
|
|
2009-03-11 01:08:16 +00:00
|
|
|
{ void* call;
|
|
|
|
defaultContext.promise.listener->resolve
|
|
|
|
(reinterpret_cast<intptr_t>(voidPointer(compileMethod)), &call);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-11-28 04:44:04 +00:00
|
|
|
if (image) {
|
2009-03-11 01:08:16 +00:00
|
|
|
image->compileMethodCall = static_cast<uint8_t*>(call) - imageBase;
|
2008-11-28 04:44:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.defaultVirtual.start = finish
|
2009-04-05 21:42:10 +00:00
|
|
|
(t, allocator, defaultVirtualContext.context.assembler, "defaultVirtual");
|
|
|
|
|
|
|
|
{ void* call;
|
|
|
|
defaultVirtualContext.promise.listener->resolve
|
|
|
|
(reinterpret_cast<intptr_t>(voidPointer(compileVirtualMethod)), &call);
|
|
|
|
|
|
|
|
if (image) {
|
|
|
|
image->compileVirtualMethodCall
|
|
|
|
= static_cast<uint8_t*>(call) - imageBase;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.native.start = finish
|
2008-11-23 23:58:01 +00:00
|
|
|
(t, allocator, nativeContext.context.assembler, "native");
|
|
|
|
|
2009-03-11 01:08:16 +00:00
|
|
|
{ void* call;
|
|
|
|
nativeContext.promise.listener->resolve
|
|
|
|
(reinterpret_cast<intptr_t>(voidPointer(invokeNative)), &call);
|
2008-11-28 04:44:04 +00:00
|
|
|
|
|
|
|
if (image) {
|
2009-03-11 01:08:16 +00:00
|
|
|
image->invokeNativeCall = static_cast<uint8_t*>(call) - imageBase;
|
2008-11-28 04:44:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.aioob.start = finish
|
2008-11-23 23:58:01 +00:00
|
|
|
(t, allocator, aioobContext.context.assembler, "aioob");
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-03-11 01:08:16 +00:00
|
|
|
{ void* call;
|
|
|
|
aioobContext.promise.listener->resolve
|
|
|
|
(reinterpret_cast<intptr_t>(voidPointer(throwArrayIndexOutOfBounds)),
|
|
|
|
&call);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-11-28 04:44:04 +00:00
|
|
|
if (image) {
|
2009-03-11 01:08:16 +00:00
|
|
|
image->throwArrayIndexOutOfBoundsCall
|
|
|
|
= static_cast<uint8_t*>(call) - imageBase;
|
2008-11-28 04:44:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.table.start = static_cast<uint8_t*>
|
|
|
|
(allocator->allocate(p->thunks.table.length * ThunkCount));
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-11-28 22:02:45 +00:00
|
|
|
if (image) {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
image->thunks.default_ = thunkToThunk(p->thunks.default_, imageBase);
|
|
|
|
image->thunks.defaultVirtual
|
|
|
|
= thunkToThunk(p->thunks.defaultVirtual, imageBase);
|
|
|
|
image->thunks.native = thunkToThunk(p->thunks.native, imageBase);
|
|
|
|
image->thunks.aioob = thunkToThunk(p->thunks.aioob, imageBase);
|
|
|
|
image->thunks.table = thunkToThunk(p->thunks.table, imageBase);
|
2008-11-28 22:02:45 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
logCompile(t, p->thunks.table.start, p->thunks.table.length * ThunkCount, 0,
|
|
|
|
"thunkTable", 0);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
uint8_t* start = p->thunks.table.start;
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
#define THUNK(s) \
|
|
|
|
tableContext.context.assembler->writeTo(start); \
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
start += p->thunks.table.length; \
|
2009-03-11 01:08:16 +00:00
|
|
|
{ void* call; \
|
|
|
|
tableContext.promise.listener->resolve \
|
|
|
|
(reinterpret_cast<intptr_t>(voidPointer(s)), &call); \
|
2008-11-27 20:59:40 +00:00
|
|
|
if (image) { \
|
2009-03-11 01:08:16 +00:00
|
|
|
image->s##Call = static_cast<uint8_t*>(call) - imageBase; \
|
2008-11-27 20:59:40 +00:00
|
|
|
} \
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
|
|
|
#include "thunks.cpp"
|
|
|
|
|
|
|
|
#undef THUNK
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyProcessor*
|
|
|
|
processor(MyThread* t)
|
2007-10-04 03:19:39 +00:00
|
|
|
{
|
2008-12-02 02:38:00 +00:00
|
|
|
return static_cast<MyProcessor*>(t->m->processor);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-12-30 22:24:48 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uintptr_t
|
2008-05-31 22:14:27 +00:00
|
|
|
defaultThunk(MyThread* t)
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.default_.start);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
uintptr_t
|
|
|
|
bootDefaultThunk(MyThread* t)
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.default_.start);
|
2009-10-18 00:18:03 +00:00
|
|
|
}
|
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
uintptr_t
|
|
|
|
defaultVirtualThunk(MyThread* t)
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>
|
|
|
|
(processor(t)->thunks.defaultVirtual.start);
|
2009-04-07 00:34:12 +00:00
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uintptr_t
|
2008-05-31 22:14:27 +00:00
|
|
|
nativeThunk(MyThread* t)
|
2008-04-09 19:08:13 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.native.start);
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
uintptr_t
|
|
|
|
bootNativeThunk(MyThread* t)
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.native.start);
|
2009-10-18 00:18:03 +00:00
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uintptr_t
|
2008-05-31 22:14:27 +00:00
|
|
|
aioobThunk(MyThread* t)
|
2008-04-09 19:08:13 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.aioob.start);
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
bool
|
|
|
|
unresolved(MyThread* t, uintptr_t methodAddress)
|
|
|
|
{
|
|
|
|
return methodAddress == defaultThunk(t)
|
|
|
|
or methodAddress == bootDefaultThunk(t);
|
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
uintptr_t
|
2010-07-06 22:13:11 +00:00
|
|
|
compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2009-04-07 00:34:12 +00:00
|
|
|
Context context(t);
|
2009-04-05 21:42:10 +00:00
|
|
|
Assembler* a = context.assembler;
|
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
ResolvedPromise indexPromise(index);
|
|
|
|
Assembler::Constant indexConstant(&indexPromise);
|
2009-04-19 22:36:11 +00:00
|
|
|
Assembler::Register indexRegister(t->arch->virtualCallIndex());
|
2009-04-05 21:42:10 +00:00
|
|
|
a->apply(Move, BytesPerWord, ConstantOperand, &indexConstant,
|
2009-04-19 22:36:11 +00:00
|
|
|
BytesPerWord, RegisterOperand, &indexRegister);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
ResolvedPromise defaultVirtualThunkPromise(defaultVirtualThunk(t));
|
|
|
|
Assembler::Constant thunk(&defaultVirtualThunkPromise);
|
2009-04-05 21:42:10 +00:00
|
|
|
a->apply(Jump, BytesPerWord, ConstantOperand, &thunk);
|
|
|
|
|
2009-04-19 22:36:11 +00:00
|
|
|
a->endBlock(false)->resolve(0, 0);
|
|
|
|
|
2010-07-06 22:13:11 +00:00
|
|
|
*size = a->length();
|
|
|
|
|
|
|
|
uint8_t* start = static_cast<uint8_t*>(codeAllocator(t)->allocate(*size));
|
2009-04-07 00:34:12 +00:00
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
a->writeTo(start);
|
2009-04-08 00:55:43 +00:00
|
|
|
|
2010-07-06 22:13:11 +00:00
|
|
|
logCompile(t, start, *size, 0, "virtualThunk", 0);
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2009-04-08 00:55:43 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(start);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t
|
|
|
|
virtualThunk(MyThread* t, unsigned index)
|
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, VirtualThunks) == 0
|
|
|
|
or wordArrayLength(t, root(t, VirtualThunks)) <= index * 2)
|
2010-07-06 22:13:11 +00:00
|
|
|
{
|
|
|
|
object newArray = makeWordArray(t, nextPowerOfTwo((index + 1) * 2));
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, VirtualThunks)) {
|
2009-04-05 21:42:10 +00:00
|
|
|
memcpy(&wordArrayBody(t, newArray, 0),
|
2010-09-14 16:49:41 +00:00
|
|
|
&wordArrayBody(t, root(t, VirtualThunks), 0),
|
|
|
|
wordArrayLength(t, root(t, VirtualThunks)) * BytesPerWord);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, VirtualThunks, newArray);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (wordArrayBody(t, root(t, VirtualThunks), index * 2) == 0) {
|
2009-04-05 21:42:10 +00:00
|
|
|
ACQUIRE(t, t->m->classLock);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (wordArrayBody(t, root(t, VirtualThunks), index * 2) == 0) {
|
2010-07-06 22:13:11 +00:00
|
|
|
unsigned size;
|
|
|
|
uintptr_t thunk = compileVirtualThunk(t, index, &size);
|
2010-09-14 16:49:41 +00:00
|
|
|
wordArrayBody(t, root(t, VirtualThunks), index * 2) = thunk;
|
|
|
|
wordArrayBody(t, root(t, VirtualThunks), (index * 2) + 1) = size;
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
return wordArrayBody(t, root(t, VirtualThunks), index * 2);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2007-10-16 17:21:26 +00:00
|
|
|
void
|
2008-11-23 23:58:01 +00:00
|
|
|
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
|
|
|
|
object method)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-11-30 04:58:09 +00:00
|
|
|
PROTECT(t, method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-11-30 04:58:09 +00:00
|
|
|
if (bootContext == 0) {
|
|
|
|
initClass(t, methodClass(t, method));
|
|
|
|
if (UNLIKELY(t->exception)) return;
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
if (methodAddress(t, method) == defaultThunk(t)) {
|
2007-12-09 22:45:43 +00:00
|
|
|
ACQUIRE(t, t->m->classLock);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
if (methodAddress(t, method) == defaultThunk(t)) {
|
|
|
|
assert(t, (methodFlags(t, method) & ACC_NATIVE) == 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
Context context(t, bootContext, method);
|
|
|
|
uint8_t* compiled = compile(t, allocator, &context);
|
|
|
|
if (UNLIKELY(t->exception)) return;
|
2008-04-10 23:48:28 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
if (DebugMethodTree) {
|
|
|
|
fprintf(stderr, "insert method at %p\n", compiled);
|
2007-12-11 23:52:28 +00:00
|
|
|
}
|
2007-12-11 00:48:09 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
// We can't set the MethodCompiled field on the original method
|
|
|
|
// before it is placed into the method tree, since another
|
|
|
|
// thread might call the method, from which stack unwinding
|
|
|
|
// would fail (since there is not yet an entry in the method
|
|
|
|
// tree). However, we can't insert the original method into the
|
|
|
|
// tree before setting the MethodCompiled field on it since we
|
|
|
|
// rely on that field to determine its position in the tree.
|
|
|
|
// Therefore, we insert a clone in its place. Later, we'll
|
|
|
|
// replace the clone with the original to save memory.
|
|
|
|
|
|
|
|
object clone = makeMethod
|
|
|
|
(t, methodVmFlags(t, method),
|
|
|
|
methodReturnCode(t, method),
|
|
|
|
methodParameterCount(t, method),
|
|
|
|
methodParameterFootprint(t, method),
|
|
|
|
methodFlags(t, method),
|
|
|
|
methodOffset(t, method),
|
|
|
|
methodNativeID(t, method),
|
|
|
|
methodName(t, method),
|
|
|
|
methodSpec(t, method),
|
2009-09-19 00:01:54 +00:00
|
|
|
methodAddendum(t, method),
|
2008-12-02 02:38:00 +00:00
|
|
|
methodClass(t, method),
|
|
|
|
methodCode(t, method),
|
|
|
|
reinterpret_cast<intptr_t>(compiled));
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot
|
|
|
|
(t, MethodTree, treeInsert
|
|
|
|
(t, &(context.zone), root(t, MethodTree),
|
|
|
|
reinterpret_cast<intptr_t>(compiled), clone,
|
|
|
|
root(t, MethodTreeSentinal),
|
|
|
|
compareIpToMethodBounds));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2009-11-30 15:38:16 +00:00
|
|
|
storeStoreMemoryBarrier();
|
2009-03-03 01:40:06 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
methodCompiled(t, method) = reinterpret_cast<intptr_t>(compiled);
|
|
|
|
|
|
|
|
if (methodVirtual(t, method)) {
|
|
|
|
classVtable(t, methodClass(t, method), methodOffset(t, method))
|
|
|
|
= compiled;
|
|
|
|
}
|
2007-12-28 16:50:26 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
treeUpdate(t, root(t, MethodTree), reinterpret_cast<intptr_t>(compiled),
|
|
|
|
method, root(t, MethodTreeSentinal), compareIpToMethodBounds);
|
2007-12-11 00:48:09 +00:00
|
|
|
}
|
|
|
|
}
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-11 00:48:09 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object&
|
2010-09-14 16:49:41 +00:00
|
|
|
root(Thread* t, Root root)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
return arrayBody(t, processor(static_cast<MyThread*>(t))->roots, root);
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
void
|
|
|
|
setRoot(Thread* t, Root root, object value)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
set(t, processor(static_cast<MyThread*>(t))->roots,
|
|
|
|
ArrayBody + (root * BytesPerWord), value);
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
FixedAllocator*
|
|
|
|
codeAllocator(MyThread* t)
|
|
|
|
{
|
|
|
|
return &(processor(t)->codeAllocator);
|
2008-01-10 01:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
} // namespace local
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace vm {
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
Processor*
|
2009-10-10 23:46:43 +00:00
|
|
|
makeProcessor(System* system, Allocator* allocator, bool useNativeFeatures)
|
2007-09-24 01:39:03 +00:00
|
|
|
{
|
2009-08-27 00:26:44 +00:00
|
|
|
return new (allocator->allocate(sizeof(local::MyProcessor)))
|
2009-10-10 23:46:43 +00:00
|
|
|
local::MyProcessor(system, allocator, useNativeFeatures);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace vm
|