2011-07-17 01:08:39 +00:00
|
|
|
/* Copyright (c) 2008-2011, Avian Contributors
|
2008-02-19 18:06:52 +00:00
|
|
|
|
|
|
|
Permission to use, copy, modify, and/or distribute this software
|
|
|
|
for any purpose with or without fee is hereby granted, provided
|
|
|
|
that the above copyright notice and this permission notice appear
|
|
|
|
in all copies.
|
|
|
|
|
|
|
|
There is NO WARRANTY for this software. See license.txt for
|
|
|
|
details. */
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
#include "machine.h"
|
2007-12-09 22:45:43 +00:00
|
|
|
#include "util.h"
|
|
|
|
#include "vector.h"
|
2007-09-26 23:23:03 +00:00
|
|
|
#include "process.h"
|
2008-02-11 17:21:41 +00:00
|
|
|
#include "assembler.h"
|
2011-08-30 01:00:17 +00:00
|
|
|
#include "target.h"
|
2007-12-09 22:45:43 +00:00
|
|
|
#include "compiler.h"
|
2008-06-04 22:21:27 +00:00
|
|
|
#include "arch.h"
|
2007-09-24 01:39:03 +00:00
|
|
|
|
|
|
|
using namespace vm;
|
|
|
|
|
2007-09-26 23:23:03 +00:00
|
|
|
extern "C" uint64_t
|
2009-02-17 02:49:28 +00:00
|
|
|
vmInvoke(void* thread, void* function, void* arguments,
|
|
|
|
unsigned argumentFootprint, unsigned frameSize, unsigned returnType);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
extern "C" void
|
|
|
|
vmInvoke_returnAddress();
|
|
|
|
|
|
|
|
extern "C" void
|
|
|
|
vmInvoke_safeStack();
|
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
extern "C" void
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
vmJumpAndInvoke(void* thread, void* function, void* stack,
|
2009-05-27 01:02:39 +00:00
|
|
|
unsigned argumentFootprint, uintptr_t* arguments,
|
|
|
|
unsigned frameSize);
|
2009-05-05 01:04:17 +00:00
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
namespace {
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
namespace local {
|
|
|
|
|
2011-02-02 15:32:40 +00:00
|
|
|
const bool DebugCompile = false;
|
2007-12-26 23:59:55 +00:00
|
|
|
const bool DebugNatives = false;
|
2008-04-07 23:47:41 +00:00
|
|
|
const bool DebugCallTable = false;
|
2008-04-11 19:03:40 +00:00
|
|
|
const bool DebugMethodTree = false;
|
2008-11-08 22:36:38 +00:00
|
|
|
const bool DebugFrameMaps = false;
|
2009-08-06 16:32:00 +00:00
|
|
|
const bool DebugIntrinsics = false;
|
2007-12-11 23:52:28 +00:00
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
const bool CheckArrayBounds = true;
|
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
#ifdef AVIAN_CONTINUATIONS
|
|
|
|
const bool Continuations = true;
|
|
|
|
#else
|
|
|
|
const bool Continuations = false;
|
|
|
|
#endif
|
|
|
|
|
2011-08-30 01:00:17 +00:00
|
|
|
const unsigned MaxNativeCallFootprint = TargetBytesPerWord == 8 ? 4 : 5;
|
2008-09-28 19:00:52 +00:00
|
|
|
|
2008-11-25 23:01:30 +00:00
|
|
|
const unsigned InitialZoneCapacityInBytes = 64 * 1024;
|
|
|
|
|
2011-03-18 03:24:35 +00:00
|
|
|
const unsigned ExecutableAreaSizeInBytes = 30 * 1024 * 1024;
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
enum Root {
|
|
|
|
CallTable,
|
|
|
|
MethodTree,
|
|
|
|
MethodTreeSentinal,
|
|
|
|
ObjectPools,
|
|
|
|
StaticTableArray,
|
|
|
|
VirtualThunks,
|
|
|
|
ReceiveMethod,
|
|
|
|
WindMethod,
|
|
|
|
RewindMethod
|
|
|
|
};
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
enum ThunkIndex {
|
|
|
|
compileMethodIndex,
|
|
|
|
compileVirtualMethodIndex,
|
|
|
|
invokeNativeIndex,
|
|
|
|
throwArrayIndexOutOfBoundsIndex,
|
|
|
|
throwStackOverflowIndex,
|
|
|
|
|
|
|
|
#define THUNK(s) s##Index,
|
|
|
|
#include "thunks.cpp"
|
|
|
|
#undef THUNK
|
|
|
|
|
|
|
|
dummyIndex
|
|
|
|
};
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
const unsigned RootCount = RewindMethod + 1;
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
inline bool
|
|
|
|
isVmInvokeUnsafeStack(void* ip)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(ip)
|
|
|
|
>= reinterpret_cast<uintptr_t>(voidPointer(vmInvoke_returnAddress))
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
|
|
|
< reinterpret_cast<uintptr_t> (voidPointer(vmInvoke_safeStack));
|
|
|
|
}
|
|
|
|
|
2011-02-20 20:31:29 +00:00
|
|
|
class MyThread;
|
|
|
|
|
|
|
|
void*
|
|
|
|
getIp(MyThread*);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
class MyThread: public Thread {
|
|
|
|
public:
|
|
|
|
class CallTrace {
|
|
|
|
public:
|
2009-05-24 01:49:14 +00:00
|
|
|
CallTrace(MyThread* t, object method):
|
2007-12-09 22:45:43 +00:00
|
|
|
t(t),
|
2011-02-20 20:31:29 +00:00
|
|
|
ip(getIp(t)),
|
2008-08-16 18:46:14 +00:00
|
|
|
stack(t->stack),
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
scratch(t->scratch),
|
2009-05-03 20:57:11 +00:00
|
|
|
continuation(t->continuation),
|
2009-05-24 01:49:14 +00:00
|
|
|
nativeMethod((methodFlags(t, method) & ACC_NATIVE) ? method : 0),
|
2009-04-27 01:53:42 +00:00
|
|
|
targetMethod(0),
|
2009-05-24 01:49:14 +00:00
|
|
|
originalMethod(method),
|
2007-12-09 22:45:43 +00:00
|
|
|
next(t->trace)
|
|
|
|
{
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
doTransition(t, 0, 0, 0, this);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
~CallTrace() {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
assert(t, t->stack == 0);
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
t->scratch = scratch;
|
|
|
|
|
2011-02-25 18:04:23 +00:00
|
|
|
doTransition(t, ip, stack, continuation, next);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
2011-02-20 20:31:29 +00:00
|
|
|
void* ip;
|
2008-08-16 18:46:14 +00:00
|
|
|
void* stack;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
void* scratch;
|
2009-05-03 20:57:11 +00:00
|
|
|
object continuation;
|
2008-04-07 23:47:41 +00:00
|
|
|
object nativeMethod;
|
2009-04-27 01:53:42 +00:00
|
|
|
object targetMethod;
|
2009-05-24 01:49:14 +00:00
|
|
|
object originalMethod;
|
2007-12-09 22:45:43 +00:00
|
|
|
CallTrace* next;
|
|
|
|
};
|
2007-10-01 15:19:15 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
class Context {
|
|
|
|
public:
|
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
|
|
|
MyProtector(MyThread* t, Context* context):
|
|
|
|
Protector(t), context(context)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
virtual void visit(Heap::Visitor* v) {
|
|
|
|
v->visit(&(context->continuation));
|
|
|
|
}
|
|
|
|
|
|
|
|
Context* context;
|
|
|
|
};
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
Context(MyThread* t, void* ip, void* stack, object continuation,
|
|
|
|
CallTrace* trace):
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
ip(ip),
|
|
|
|
stack(stack),
|
|
|
|
continuation(continuation),
|
|
|
|
trace(trace),
|
|
|
|
protector(t, this)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
void* ip;
|
|
|
|
void* stack;
|
|
|
|
object continuation;
|
|
|
|
CallTrace* trace;
|
|
|
|
MyProtector protector;
|
|
|
|
};
|
|
|
|
|
|
|
|
class TraceContext: public Context {
|
|
|
|
public:
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
TraceContext(MyThread* t, void* ip, void* stack, object continuation,
|
|
|
|
CallTrace* trace):
|
|
|
|
Context(t, ip, stack, continuation, trace),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
t(t),
|
2011-01-26 00:22:43 +00:00
|
|
|
link(0),
|
|
|
|
javaStackLimit(0),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
next(t->traceContext)
|
|
|
|
{
|
|
|
|
t->traceContext = this;
|
|
|
|
}
|
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
TraceContext(MyThread* t, void* link):
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
Context(t, t->ip, t->stack, t->continuation, t->trace),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
t(t),
|
2011-01-26 00:22:43 +00:00
|
|
|
link(link),
|
|
|
|
javaStackLimit(0),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
next(t->traceContext)
|
|
|
|
{
|
|
|
|
t->traceContext = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
~TraceContext() {
|
|
|
|
t->traceContext = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
2011-01-26 00:22:43 +00:00
|
|
|
void* link;
|
|
|
|
void* javaStackLimit;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
TraceContext* next;
|
|
|
|
};
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
static void doTransition(MyThread* t, void* ip, void* stack,
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
object continuation, MyThread::CallTrace* trace)
|
|
|
|
{
|
|
|
|
// in this function, we "atomically" update the thread context
|
|
|
|
// fields in such a way to ensure that another thread may
|
|
|
|
// interrupt us at any time and still get a consistent, accurate
|
2010-09-14 16:49:41 +00:00
|
|
|
// stack trace. See MyProcessor::getStackTrace for details.
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
assert(t, t->transition == 0);
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
Context c(t, ip, stack, continuation, trace);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->transition = &c;
|
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->ip = ip;
|
|
|
|
t->stack = stack;
|
|
|
|
t->continuation = continuation;
|
|
|
|
t->trace = trace;
|
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->transition = 0;
|
|
|
|
}
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
MyThread(Machine* m, object javaThread, MyThread* parent,
|
|
|
|
bool useNativeFeatures):
|
2007-12-09 22:45:43 +00:00
|
|
|
Thread(m, javaThread, parent),
|
2007-12-30 22:24:48 +00:00
|
|
|
ip(0),
|
2008-08-16 18:46:14 +00:00
|
|
|
stack(0),
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
scratch(0),
|
2009-05-03 20:57:11 +00:00
|
|
|
continuation(0),
|
2009-05-25 04:27:50 +00:00
|
|
|
exceptionStackAdjustment(0),
|
2009-05-03 20:57:11 +00:00
|
|
|
exceptionOffset(0),
|
|
|
|
exceptionHandler(0),
|
2009-04-07 00:34:12 +00:00
|
|
|
tailAddress(0),
|
2009-05-03 20:57:11 +00:00
|
|
|
virtualCallTarget(0),
|
2009-04-07 00:34:12 +00:00
|
|
|
virtualCallIndex(0),
|
2011-09-20 22:30:30 +00:00
|
|
|
heapImage(0),
|
|
|
|
codeImage(0),
|
|
|
|
thunkTable(0),
|
2007-12-09 22:45:43 +00:00
|
|
|
trace(0),
|
2008-08-18 15:23:01 +00:00
|
|
|
reference(0),
|
2009-10-10 23:46:43 +00:00
|
|
|
arch(parent
|
|
|
|
? parent->arch
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
: makeArchitecture(m->system, useNativeFeatures)),
|
|
|
|
transition(0),
|
2010-12-19 22:23:19 +00:00
|
|
|
traceContext(0),
|
2011-03-26 00:37:02 +00:00
|
|
|
stackLimit(0),
|
|
|
|
methodLockIsClean(true)
|
2008-08-18 15:23:01 +00:00
|
|
|
{
|
|
|
|
arch->acquire();
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
void* ip;
|
2008-08-16 18:46:14 +00:00
|
|
|
void* stack;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
void* scratch;
|
2009-05-03 20:57:11 +00:00
|
|
|
object continuation;
|
2009-05-25 04:27:50 +00:00
|
|
|
uintptr_t exceptionStackAdjustment;
|
2009-05-03 20:57:11 +00:00
|
|
|
uintptr_t exceptionOffset;
|
|
|
|
void* exceptionHandler;
|
2009-04-07 00:34:12 +00:00
|
|
|
void* tailAddress;
|
2009-05-03 20:57:11 +00:00
|
|
|
void* virtualCallTarget;
|
2009-04-07 00:34:12 +00:00
|
|
|
uintptr_t virtualCallIndex;
|
2011-09-20 22:30:30 +00:00
|
|
|
uintptr_t* heapImage;
|
|
|
|
uint8_t* codeImage;
|
|
|
|
void** thunkTable;
|
2007-12-09 22:45:43 +00:00
|
|
|
CallTrace* trace;
|
|
|
|
Reference* reference;
|
2008-08-18 15:23:01 +00:00
|
|
|
Assembler::Architecture* arch;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
Context* transition;
|
|
|
|
TraceContext* traceContext;
|
2010-12-19 22:23:19 +00:00
|
|
|
uintptr_t stackLimit;
|
2011-03-26 00:37:02 +00:00
|
|
|
bool methodLockIsClean;
|
2007-12-09 22:45:43 +00:00
|
|
|
};
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
void
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
transition(MyThread* t, void* ip, void* stack, object continuation,
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
MyThread::CallTrace* trace)
|
|
|
|
{
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
MyThread::doTransition(t, ip, stack, continuation, trace);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
2009-02-28 19:33:26 +00:00
|
|
|
unsigned
|
|
|
|
parameterOffset(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return methodParameterFootprint(t, method)
|
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->frameReturnAddressSize() - 1;
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
object
|
2009-05-05 01:04:17 +00:00
|
|
|
resolveThisPointer(MyThread* t, void* stack)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
return reinterpret_cast<object*>(stack)
|
|
|
|
[t->arch->frameFooterSize() + t->arch->frameReturnAddressSize()];
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-23 19:26:35 +00:00
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
object
|
|
|
|
findMethod(Thread* t, object method, object instance)
|
|
|
|
{
|
|
|
|
if ((methodFlags(t, method) & ACC_STATIC) == 0) {
|
2010-09-10 21:05:29 +00:00
|
|
|
if (classFlags(t, methodClass(t, method)) & ACC_INTERFACE) {
|
2009-08-13 15:17:05 +00:00
|
|
|
return findInterfaceMethod(t, method, objectClass(t, instance));
|
2010-09-10 21:05:29 +00:00
|
|
|
} else if (methodVirtual(t, method)) {
|
|
|
|
return findVirtualMethod(t, method, objectClass(t, instance));
|
2009-08-13 15:17:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return method;
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object
|
|
|
|
resolveTarget(MyThread* t, void* stack, object method)
|
|
|
|
{
|
2009-05-05 01:04:17 +00:00
|
|
|
object class_ = objectClass(t, resolveThisPointer(t, stack));
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
if (classVmFlags(t, class_) & BootstrapFlag) {
|
|
|
|
PROTECT(t, method);
|
|
|
|
PROTECT(t, class_);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
resolveSystemClass(t, root(t, Machine::BootLoader), className(t, class_));
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
if (classFlags(t, methodClass(t, method)) & ACC_INTERFACE) {
|
|
|
|
return findInterfaceMethod(t, method, class_);
|
|
|
|
} else {
|
2009-08-13 15:17:05 +00:00
|
|
|
return findVirtualMethod(t, method, class_);
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
object
|
|
|
|
resolveTarget(MyThread* t, object class_, unsigned index)
|
|
|
|
{
|
|
|
|
if (classVmFlags(t, class_) & BootstrapFlag) {
|
|
|
|
PROTECT(t, class_);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
resolveSystemClass(t, root(t, Machine::BootLoader), className(t, class_));
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return arrayBody(t, classVirtualTable(t, class_), index);
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object&
|
2010-09-14 16:49:41 +00:00
|
|
|
root(Thread* t, Root root);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
void
|
|
|
|
setRoot(Thread* t, Root root, object value);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
unsigned
|
|
|
|
compiledSize(intptr_t address)
|
|
|
|
{
|
2011-09-01 03:18:00 +00:00
|
|
|
return reinterpret_cast<target_uintptr_t*>(address)[-1];
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
intptr_t
|
|
|
|
methodCompiled(Thread* t, object method)
|
|
|
|
{
|
|
|
|
return codeCompiled(t, methodCode(t, method));
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
intptr_t
|
|
|
|
compareIpToMethodBounds(Thread* t, intptr_t ip, object method)
|
|
|
|
{
|
2008-11-23 23:58:01 +00:00
|
|
|
intptr_t start = methodCompiled(t, method);
|
2008-04-10 23:48:28 +00:00
|
|
|
|
|
|
|
if (DebugMethodTree) {
|
2009-08-07 22:27:24 +00:00
|
|
|
fprintf(stderr, "find %p in (%p,%p)\n",
|
|
|
|
reinterpret_cast<void*>(ip),
|
|
|
|
reinterpret_cast<void*>(start),
|
|
|
|
reinterpret_cast<void*>(start + compiledSize(start)));
|
2008-04-10 23:48:28 +00:00
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
if (ip < start) {
|
|
|
|
return -1;
|
2008-04-11 23:01:17 +00:00
|
|
|
} else if (ip < start + static_cast<intptr_t>
|
2011-09-24 05:25:52 +00:00
|
|
|
(compiledSize(start) + TargetBytesPerWord))
|
2008-04-10 23:48:28 +00:00
|
|
|
{
|
2008-04-07 23:47:41 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
methodForIp(MyThread* t, void* ip)
|
|
|
|
{
|
2008-04-10 23:48:28 +00:00
|
|
|
if (DebugMethodTree) {
|
|
|
|
fprintf(stderr, "query for method containing %p\n", ip);
|
|
|
|
}
|
|
|
|
|
2009-03-03 01:40:06 +00:00
|
|
|
// we must use a version of the method tree at least as recent as the
|
|
|
|
// compiled form of the method containing the specified address (see
|
2011-02-28 06:03:13 +00:00
|
|
|
// compile(MyThread*, FixedAllocator*, BootContext*, object)):
|
2009-11-30 15:38:16 +00:00
|
|
|
loadMemoryBarrier();
|
2009-03-03 01:40:06 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
return treeQuery(t, root(t, MethodTree), reinterpret_cast<intptr_t>(ip),
|
|
|
|
root(t, MethodTreeSentinal), compareIpToMethodBounds);
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
unsigned
|
|
|
|
localSize(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
unsigned size = codeMaxLocals(t, methodCode(t, method));
|
|
|
|
if ((methodFlags(t, method) & (ACC_SYNCHRONIZED | ACC_STATIC))
|
|
|
|
== ACC_SYNCHRONIZED)
|
|
|
|
{
|
|
|
|
++ size;
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
alignedFrameSize(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return t->arch->alignFrameSize
|
|
|
|
(localSize(t, method)
|
|
|
|
- methodParameterFootprint(t, method)
|
|
|
|
+ codeMaxStack(t, methodCode(t, method))
|
|
|
|
+ t->arch->frameFootprint(MaxNativeCallFootprint));
|
|
|
|
}
|
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
void
|
|
|
|
nextFrame(MyThread* t, void** ip, void** sp, object method, object target)
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
{
|
|
|
|
object code = methodCode(t, method);
|
|
|
|
intptr_t start = codeCompiled(t, code);
|
2011-01-26 00:22:43 +00:00
|
|
|
void* link;
|
|
|
|
void* javaStackLimit;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
if (t->traceContext) {
|
|
|
|
link = t->traceContext->link;
|
|
|
|
javaStackLimit = t->traceContext->javaStackLimit;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
} else {
|
2011-01-26 00:22:43 +00:00
|
|
|
link = 0;
|
|
|
|
javaStackLimit = 0;
|
|
|
|
}
|
|
|
|
|
2011-01-29 18:11:27 +00:00
|
|
|
// fprintf(stderr, "nextFrame %s.%s%s target %s.%s%s ip %p sp %p\n",
|
2011-01-26 00:22:43 +00:00
|
|
|
// &byteArrayBody(t, className(t, methodClass(t, method)), 0),
|
|
|
|
// &byteArrayBody(t, methodName(t, method), 0),
|
|
|
|
// &byteArrayBody(t, methodSpec(t, method), 0),
|
|
|
|
// target
|
|
|
|
// ? &byteArrayBody(t, className(t, methodClass(t, target)), 0)
|
|
|
|
// : 0,
|
|
|
|
// target
|
|
|
|
// ? &byteArrayBody(t, methodName(t, target), 0)
|
|
|
|
// : 0,
|
|
|
|
// target
|
|
|
|
// ? &byteArrayBody(t, methodSpec(t, target), 0)
|
2011-01-29 18:11:27 +00:00
|
|
|
// : 0,
|
|
|
|
// *ip, *sp);
|
2011-01-26 00:22:43 +00:00
|
|
|
|
|
|
|
t->arch->nextFrame
|
|
|
|
(reinterpret_cast<void*>(start), compiledSize(start),
|
2011-01-30 21:14:57 +00:00
|
|
|
alignedFrameSize(t, method), link, javaStackLimit,
|
|
|
|
target ? methodParameterFootprint(t, target) : -1, ip, sp);
|
2011-01-29 18:11:27 +00:00
|
|
|
|
|
|
|
// fprintf(stderr, "next frame ip %p sp %p\n", *ip, *sp);
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
}
|
|
|
|
|
2011-02-20 03:33:26 +00:00
|
|
|
void*
|
2011-02-20 20:31:29 +00:00
|
|
|
getIp(MyThread* t, void* ip, void* stack)
|
2011-02-20 03:33:26 +00:00
|
|
|
{
|
|
|
|
// Here we use the convention that, if the return address is neither
|
|
|
|
// pushed on to the stack automatically as part of the call nor
|
|
|
|
// stored in the caller's frame, it will be saved in MyThread::ip
|
|
|
|
// instead of on the stack. See the various implementations of
|
|
|
|
// Assembler::saveFrame for details on how this is done.
|
2011-02-20 20:31:29 +00:00
|
|
|
return t->arch->returnAddressOffset() < 0 ? ip : t->arch->frameIp(stack);
|
|
|
|
}
|
|
|
|
|
|
|
|
void*
|
|
|
|
getIp(MyThread* t)
|
|
|
|
{
|
|
|
|
return getIp(t, t->ip, t->stack);
|
2011-02-20 03:33:26 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
class MyStackWalker: public Processor::StackWalker {
|
|
|
|
public:
|
2008-04-23 16:33:31 +00:00
|
|
|
enum State {
|
|
|
|
Start,
|
|
|
|
Next,
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
Trace,
|
2009-05-05 01:04:17 +00:00
|
|
|
Continuation,
|
2008-04-23 16:33:31 +00:00
|
|
|
Method,
|
|
|
|
NativeMethod,
|
|
|
|
Finish
|
|
|
|
};
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
|
|
|
MyProtector(MyStackWalker* walker):
|
|
|
|
Protector(walker->t), walker(walker)
|
|
|
|
{ }
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual void visit(Heap::Visitor* v) {
|
2008-04-07 23:47:41 +00:00
|
|
|
v->visit(&(walker->method_));
|
2011-01-26 00:22:43 +00:00
|
|
|
v->visit(&(walker->target));
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(walker->continuation));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyStackWalker* walker;
|
|
|
|
};
|
|
|
|
|
|
|
|
MyStackWalker(MyThread* t):
|
|
|
|
t(t),
|
2008-04-23 16:33:31 +00:00
|
|
|
state(Start),
|
|
|
|
method_(0),
|
2011-01-26 00:22:43 +00:00
|
|
|
target(0),
|
2007-12-09 22:45:43 +00:00
|
|
|
protector(this)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
|
|
|
if (t->traceContext) {
|
|
|
|
ip_ = t->traceContext->ip;
|
|
|
|
stack = t->traceContext->stack;
|
|
|
|
trace = t->traceContext->trace;
|
|
|
|
continuation = t->traceContext->continuation;
|
|
|
|
} else {
|
2011-02-20 03:33:26 +00:00
|
|
|
ip_ = getIp(t);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
stack = t->stack;
|
|
|
|
trace = t->trace;
|
|
|
|
continuation = t->continuation;
|
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
MyStackWalker(MyStackWalker* w):
|
|
|
|
t(w->t),
|
2008-04-23 16:33:31 +00:00
|
|
|
state(w->state),
|
2008-04-10 23:48:28 +00:00
|
|
|
ip_(w->ip_),
|
2007-12-09 22:45:43 +00:00
|
|
|
stack(w->stack),
|
|
|
|
trace(w->trace),
|
2008-04-07 23:47:41 +00:00
|
|
|
method_(w->method_),
|
2011-01-26 00:22:43 +00:00
|
|
|
target(w->target),
|
2009-05-12 18:16:55 +00:00
|
|
|
continuation(w->continuation),
|
2007-12-09 22:45:43 +00:00
|
|
|
protector(this)
|
|
|
|
{ }
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual void walk(Processor::StackVisitor* v) {
|
2008-04-23 16:33:31 +00:00
|
|
|
for (MyStackWalker it(this); it.valid();) {
|
2009-07-10 14:33:38 +00:00
|
|
|
MyStackWalker walker(&it);
|
2008-04-23 16:33:31 +00:00
|
|
|
if (not v->visit(&walker)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
it.next();
|
2007-12-14 18:27:56 +00:00
|
|
|
}
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool valid() {
|
|
|
|
while (true) {
|
|
|
|
// fprintf(stderr, "state: %d\n", state);
|
|
|
|
switch (state) {
|
|
|
|
case Start:
|
|
|
|
if (trace and trace->nativeMethod) {
|
|
|
|
method_ = trace->nativeMethod;
|
|
|
|
state = NativeMethod;
|
|
|
|
} else {
|
2008-08-14 18:13:05 +00:00
|
|
|
state = Next;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Next:
|
|
|
|
if (stack) {
|
2011-01-26 00:22:43 +00:00
|
|
|
target = method_;
|
2008-04-23 16:33:31 +00:00
|
|
|
method_ = methodForIp(t, ip_);
|
|
|
|
if (method_) {
|
|
|
|
state = Method;
|
2009-05-03 20:57:11 +00:00
|
|
|
} else if (continuation) {
|
2009-05-17 00:39:08 +00:00
|
|
|
method_ = continuationMethod(t, continuation);
|
2009-05-03 20:57:11 +00:00
|
|
|
state = Continuation;
|
2008-04-23 16:33:31 +00:00
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
state = Trace;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
state = Trace;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2008-04-23 16:33:31 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
case Trace: {
|
|
|
|
if (trace) {
|
|
|
|
continuation = trace->continuation;
|
|
|
|
stack = trace->stack;
|
2011-02-20 20:31:29 +00:00
|
|
|
ip_ = trace->ip;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
trace = trace->next;
|
|
|
|
|
|
|
|
state = Start;
|
|
|
|
} else {
|
|
|
|
state = Finish;
|
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
|
|
|
case NativeMethod:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case Finish:
|
|
|
|
return false;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
void next() {
|
|
|
|
switch (state) {
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
|
|
|
continuation = continuationNext(t, continuation);
|
|
|
|
break;
|
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
2011-01-26 00:22:43 +00:00
|
|
|
nextFrame(t, &ip_, &stack, method_, target);
|
2008-04-23 16:33:31 +00:00
|
|
|
break;
|
2008-04-22 16:21:54 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case NativeMethod:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2008-04-22 16:21:54 +00:00
|
|
|
}
|
2008-08-14 18:13:05 +00:00
|
|
|
|
|
|
|
state = Next;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual object method() {
|
2008-08-14 18:13:05 +00:00
|
|
|
// fprintf(stderr, "method %s.%s\n", &byteArrayBody
|
|
|
|
// (t, className(t, methodClass(t, method_)), 0),
|
|
|
|
// &byteArrayBody(t, methodName(t, method_), 0));
|
|
|
|
return method_;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual int ip() {
|
2008-04-23 16:33:31 +00:00
|
|
|
switch (state) {
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
2009-05-05 01:04:17 +00:00
|
|
|
return reinterpret_cast<intptr_t>(continuationAddress(t, continuation))
|
2009-05-03 20:57:11 +00:00
|
|
|
- methodCompiled(t, continuationMethod(t, continuation));
|
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
2008-11-23 23:58:01 +00:00
|
|
|
return reinterpret_cast<intptr_t>(ip_) - methodCompiled(t, method_);
|
2008-04-23 16:33:31 +00:00
|
|
|
|
|
|
|
case NativeMethod:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual unsigned count() {
|
2008-04-23 16:33:31 +00:00
|
|
|
unsigned count = 0;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
for (MyStackWalker walker(this); walker.valid();) {
|
|
|
|
walker.next();
|
|
|
|
++ count;
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
return count;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
2008-04-23 16:33:31 +00:00
|
|
|
State state;
|
2008-04-07 23:47:41 +00:00
|
|
|
void* ip_;
|
2007-12-09 22:45:43 +00:00
|
|
|
void* stack;
|
|
|
|
MyThread::CallTrace* trace;
|
2008-04-07 23:47:41 +00:00
|
|
|
object method_;
|
2011-01-26 00:22:43 +00:00
|
|
|
object target;
|
2009-05-03 20:57:11 +00:00
|
|
|
object continuation;
|
2007-12-09 22:45:43 +00:00
|
|
|
MyProtector protector;
|
|
|
|
};
|
|
|
|
|
|
|
|
int
|
|
|
|
localOffset(MyThread* t, int v, object method)
|
|
|
|
{
|
2008-09-09 00:31:19 +00:00
|
|
|
int parameterFootprint = methodParameterFootprint(t, method);
|
|
|
|
int frameSize = alignedFrameSize(t, method);
|
|
|
|
|
|
|
|
int offset = ((v < parameterFootprint) ?
|
|
|
|
(frameSize
|
|
|
|
+ parameterFootprint
|
2009-02-27 01:54:25 +00:00
|
|
|
+ t->arch->frameFooterSize()
|
2008-09-09 00:31:19 +00:00
|
|
|
+ t->arch->frameHeaderSize()
|
2008-09-15 02:28:42 +00:00
|
|
|
- v - 1) :
|
2008-09-09 00:31:19 +00:00
|
|
|
(frameSize
|
|
|
|
+ parameterFootprint
|
2009-05-17 23:43:48 +00:00
|
|
|
- v - 1));
|
2008-09-09 00:31:19 +00:00
|
|
|
|
|
|
|
assert(t, offset >= 0);
|
|
|
|
return offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
int
|
|
|
|
localOffsetFromStack(MyThread* t, int index, object method)
|
|
|
|
{
|
|
|
|
return localOffset(t, index, method)
|
2009-05-17 23:43:48 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2009-04-26 21:55:35 +00:00
|
|
|
object*
|
2008-08-18 15:23:01 +00:00
|
|
|
localObject(MyThread* t, void* stack, object method, unsigned index)
|
2007-12-28 00:02:05 +00:00
|
|
|
{
|
2009-05-17 23:43:48 +00:00
|
|
|
return static_cast<object*>(stack) + localOffsetFromStack(t, index, method);
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
stackOffsetFromFrame(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return alignedFrameSize(t, method) + t->arch->frameHeaderSize();
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2009-04-26 21:55:35 +00:00
|
|
|
void*
|
|
|
|
stackForFrame(MyThread* t, void* frame, object method)
|
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
return static_cast<void**>(frame) - stackOffsetFromFrame(t, method);
|
2009-04-26 21:55:35 +00:00
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
class PoolElement: public Promise {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2008-11-23 23:58:01 +00:00
|
|
|
PoolElement(Thread* t, object target, PoolElement* next):
|
|
|
|
t(t), target(target), address(0), next(next)
|
2007-12-16 00:24:15 +00:00
|
|
|
{ }
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
virtual int64_t value() {
|
|
|
|
assert(t, resolved());
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool resolved() {
|
|
|
|
return address != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
Thread* t;
|
|
|
|
object target;
|
|
|
|
intptr_t address;
|
2007-12-31 22:40:56 +00:00
|
|
|
PoolElement* next;
|
2007-12-09 22:45:43 +00:00
|
|
|
};
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
class Context;
|
2009-06-26 21:36:04 +00:00
|
|
|
class SubroutineCall;
|
|
|
|
|
|
|
|
class Subroutine {
|
|
|
|
public:
|
2009-07-13 23:49:15 +00:00
|
|
|
Subroutine(unsigned ip, unsigned logIndex, Subroutine* listNext,
|
|
|
|
Subroutine* stackNext):
|
|
|
|
listNext(listNext),
|
|
|
|
stackNext(stackNext),
|
2009-06-26 21:36:04 +00:00
|
|
|
calls(0),
|
|
|
|
handle(0),
|
|
|
|
ip(ip),
|
|
|
|
logIndex(logIndex),
|
|
|
|
stackIndex(0),
|
|
|
|
callCount(0),
|
|
|
|
tableIndex(0),
|
|
|
|
visited(false)
|
|
|
|
{ }
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
Subroutine* listNext;
|
|
|
|
Subroutine* stackNext;
|
2009-06-26 21:36:04 +00:00
|
|
|
SubroutineCall* calls;
|
|
|
|
Compiler::Subroutine* handle;
|
|
|
|
unsigned ip;
|
|
|
|
unsigned logIndex;
|
|
|
|
unsigned stackIndex;
|
|
|
|
unsigned callCount;
|
|
|
|
unsigned tableIndex;
|
|
|
|
bool visited;
|
|
|
|
};
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
class SubroutinePath;
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
class SubroutineCall {
|
|
|
|
public:
|
|
|
|
SubroutineCall(Subroutine* subroutine, Promise* returnAddress):
|
|
|
|
subroutine(subroutine),
|
|
|
|
returnAddress(returnAddress),
|
2009-07-13 23:49:15 +00:00
|
|
|
paths(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
next(subroutine->calls)
|
|
|
|
{
|
|
|
|
subroutine->calls = this;
|
2009-07-08 14:18:40 +00:00
|
|
|
++ subroutine->callCount;
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Subroutine* subroutine;
|
|
|
|
Promise* returnAddress;
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutinePath* paths;
|
2009-06-26 21:36:04 +00:00
|
|
|
SubroutineCall* next;
|
|
|
|
};
|
|
|
|
|
|
|
|
class SubroutinePath {
|
|
|
|
public:
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutinePath(SubroutineCall* call, SubroutinePath* stackNext,
|
2009-07-08 14:18:40 +00:00
|
|
|
uintptr_t* rootTable):
|
2009-06-26 21:36:04 +00:00
|
|
|
call(call),
|
2009-07-13 23:49:15 +00:00
|
|
|
stackNext(stackNext),
|
|
|
|
listNext(call->paths),
|
2009-07-08 14:18:40 +00:00
|
|
|
rootTable(rootTable)
|
2009-07-13 23:49:15 +00:00
|
|
|
{
|
|
|
|
call->paths = this;
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
SubroutineCall* call;
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutinePath* stackNext;
|
|
|
|
SubroutinePath* listNext;
|
2009-07-08 14:18:40 +00:00
|
|
|
uintptr_t* rootTable;
|
2009-06-26 21:36:04 +00:00
|
|
|
};
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
void
|
|
|
|
print(SubroutinePath* path)
|
|
|
|
{
|
|
|
|
if (path) {
|
|
|
|
fprintf(stderr, " (");
|
|
|
|
while (true) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, "%p", path->call->returnAddress->resolved() ?
|
|
|
|
reinterpret_cast<void*>(path->call->returnAddress->value()) : 0);
|
2009-07-13 23:49:15 +00:00
|
|
|
path = path->stackNext;
|
|
|
|
if (path) {
|
|
|
|
fprintf(stderr, ", ");
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fprintf(stderr, ")");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
class SubroutineTrace {
|
|
|
|
public:
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
SubroutineTrace(SubroutinePath* path, SubroutineTrace* next,
|
|
|
|
unsigned mapSize):
|
2009-06-26 21:36:04 +00:00
|
|
|
path(path),
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
next(next),
|
|
|
|
watch(false)
|
|
|
|
{
|
|
|
|
memset(map, 0, mapSize * BytesPerWord);
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
SubroutinePath* path;
|
|
|
|
SubroutineTrace* next;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
bool watch;
|
2009-06-26 21:36:04 +00:00
|
|
|
uintptr_t map[0];
|
|
|
|
};
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
class TraceElement: public TraceHandler {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2009-04-07 00:34:12 +00:00
|
|
|
static const unsigned VirtualCall = 1 << 0;
|
|
|
|
static const unsigned TailCall = 1 << 1;
|
2009-10-18 00:18:03 +00:00
|
|
|
static const unsigned LongCall = 1 << 2;
|
2009-03-31 20:15:08 +00:00
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
TraceElement(Context* context, unsigned ip, object target, unsigned flags,
|
|
|
|
TraceElement* next, unsigned mapSize):
|
2007-12-31 22:40:56 +00:00
|
|
|
context(context),
|
|
|
|
address(0),
|
2008-11-09 23:56:37 +00:00
|
|
|
next(next),
|
2009-06-26 21:36:04 +00:00
|
|
|
subroutineTrace(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
target(target),
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
ip(ip),
|
|
|
|
subroutineTraceCount(0),
|
2009-04-27 01:53:42 +00:00
|
|
|
argumentIndex(0),
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
flags(flags),
|
|
|
|
watch(false)
|
|
|
|
{
|
|
|
|
memset(map, 0, mapSize * BytesPerWord);
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
virtual void handleTrace(Promise* address, unsigned argumentIndex) {
|
2007-12-31 22:40:56 +00:00
|
|
|
if (this->address == 0) {
|
|
|
|
this->address = address;
|
2009-04-27 01:53:42 +00:00
|
|
|
this->argumentIndex = argumentIndex;
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Context* context;
|
|
|
|
Promise* address;
|
2008-11-09 23:56:37 +00:00
|
|
|
TraceElement* next;
|
2009-06-26 21:36:04 +00:00
|
|
|
SubroutineTrace* subroutineTrace;
|
2007-12-09 22:45:43 +00:00
|
|
|
object target;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
unsigned ip;
|
|
|
|
unsigned subroutineTraceCount;
|
2009-04-27 01:53:42 +00:00
|
|
|
unsigned argumentIndex;
|
2009-03-31 20:15:08 +00:00
|
|
|
unsigned flags;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
bool watch;
|
2007-12-31 22:40:56 +00:00
|
|
|
uintptr_t map[0];
|
2007-10-03 00:22:48 +00:00
|
|
|
};
|
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
class TraceElementPromise: public Promise {
|
|
|
|
public:
|
2009-04-07 00:34:12 +00:00
|
|
|
TraceElementPromise(System* s, TraceElement* trace): s(s), trace(trace) { }
|
2009-03-31 20:15:08 +00:00
|
|
|
|
|
|
|
virtual int64_t value() {
|
|
|
|
assert(s, resolved());
|
2009-04-22 01:39:25 +00:00
|
|
|
return trace->address->value();
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual bool resolved() {
|
2009-04-22 01:39:25 +00:00
|
|
|
return trace->address != 0 and trace->address->resolved();
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
System* s;
|
|
|
|
TraceElement* trace;
|
|
|
|
};
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
enum Event {
|
2008-07-05 20:21:13 +00:00
|
|
|
PushContextEvent,
|
|
|
|
PopContextEvent,
|
2008-01-07 14:51:07 +00:00
|
|
|
IpEvent,
|
|
|
|
MarkEvent,
|
|
|
|
ClearEvent,
|
2009-07-13 23:49:15 +00:00
|
|
|
PushExceptionHandlerEvent,
|
2009-06-26 21:36:04 +00:00
|
|
|
TraceEvent,
|
|
|
|
PushSubroutineEvent,
|
|
|
|
PopSubroutineEvent
|
2008-01-07 14:51:07 +00:00
|
|
|
};
|
|
|
|
|
2009-04-27 14:46:43 +00:00
|
|
|
unsigned
|
|
|
|
frameMapSizeInBits(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return localSize(t, method) + codeMaxStack(t, methodCode(t, method));
|
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned
|
|
|
|
frameMapSizeInWords(MyThread* t, object method)
|
|
|
|
{
|
2009-07-13 23:49:15 +00:00
|
|
|
return ceiling(frameMapSizeInBits(t, method), BitsPerWord);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t*
|
|
|
|
makeVisitTable(MyThread* t, Zone* zone, object method)
|
|
|
|
{
|
|
|
|
unsigned size = codeLength(t, methodCode(t, method)) * 2;
|
|
|
|
uint16_t* table = static_cast<uint16_t*>(zone->allocate(size));
|
|
|
|
memset(table, 0, size);
|
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t*
|
2008-01-08 15:24:57 +00:00
|
|
|
makeRootTable(MyThread* t, Zone* zone, object method)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
|
|
|
unsigned size = frameMapSizeInWords(t, method)
|
|
|
|
* codeLength(t, methodCode(t, method))
|
|
|
|
* BytesPerWord;
|
|
|
|
uintptr_t* table = static_cast<uintptr_t*>(zone->allocate(size));
|
2008-01-08 15:24:57 +00:00
|
|
|
memset(table, 0xFF, size);
|
2008-01-07 14:51:07 +00:00
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
enum Thunk {
|
|
|
|
#define THUNK(s) s##Thunk,
|
|
|
|
|
|
|
|
#include "thunks.cpp"
|
|
|
|
|
|
|
|
#undef THUNK
|
|
|
|
};
|
|
|
|
|
|
|
|
const unsigned ThunkCount = gcIfNecessaryThunk + 1;
|
|
|
|
|
|
|
|
intptr_t
|
|
|
|
getThunk(MyThread* t, Thunk thunk);
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
class BootContext {
|
|
|
|
public:
|
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
|
|
|
MyProtector(Thread* t, BootContext* c): Protector(t), c(c) { }
|
|
|
|
|
|
|
|
virtual void visit(Heap::Visitor* v) {
|
2008-11-27 20:59:40 +00:00
|
|
|
v->visit(&(c->constants));
|
|
|
|
v->visit(&(c->calls));
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
BootContext* c;
|
|
|
|
};
|
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
BootContext(Thread* t, object constants, object calls,
|
2011-09-01 03:18:00 +00:00
|
|
|
DelayedPromise* addresses, Zone* zone, OffsetResolver* resolver):
|
2008-12-02 16:45:20 +00:00
|
|
|
protector(t, this), constants(constants), calls(calls),
|
2011-09-01 03:18:00 +00:00
|
|
|
addresses(addresses), addressSentinal(addresses), zone(zone),
|
|
|
|
resolver(resolver)
|
2008-11-23 23:58:01 +00:00
|
|
|
{ }
|
|
|
|
|
|
|
|
MyProtector protector;
|
2008-11-27 20:59:40 +00:00
|
|
|
object constants;
|
|
|
|
object calls;
|
2008-12-02 16:45:20 +00:00
|
|
|
DelayedPromise* addresses;
|
|
|
|
DelayedPromise* addressSentinal;
|
2008-11-23 23:58:01 +00:00
|
|
|
Zone* zone;
|
2011-09-01 03:18:00 +00:00
|
|
|
OffsetResolver* resolver;
|
2008-11-23 23:58:01 +00:00
|
|
|
};
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
class Context {
|
2007-10-10 22:39:40 +00:00
|
|
|
public:
|
2010-12-27 22:55:23 +00:00
|
|
|
class MyResource: public Thread::Resource {
|
|
|
|
public:
|
|
|
|
MyResource(Context* c): Resource(c->thread), c(c) { }
|
|
|
|
|
|
|
|
virtual void release() {
|
|
|
|
c->dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
Context* c;
|
|
|
|
};
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
2008-02-11 17:21:41 +00:00
|
|
|
MyProtector(Context* c): Protector(c->thread), c(c) { }
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
virtual void visit(Heap::Visitor* v) {
|
2007-12-31 22:40:56 +00:00
|
|
|
v->visit(&(c->method));
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
for (PoolElement* p = c->objectPool; p; p = p->next) {
|
2008-11-23 23:58:01 +00:00
|
|
|
v->visit(&(p->target));
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
for (TraceElement* p = c->traceLog; p; p = p->next) {
|
|
|
|
v->visit(&(p->target));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* c;
|
2007-10-10 22:39:40 +00:00
|
|
|
};
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
class MyClient: public Compiler::Client {
|
|
|
|
public:
|
|
|
|
MyClient(MyThread* t): t(t) { }
|
|
|
|
|
|
|
|
virtual intptr_t getThunk(UnaryOperation, unsigned) {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
virtual intptr_t getThunk(BinaryOperation op, unsigned size,
|
|
|
|
unsigned resultSize)
|
|
|
|
{
|
2009-10-18 01:26:14 +00:00
|
|
|
if (size == 8) {
|
|
|
|
switch(op) {
|
2009-12-01 16:21:33 +00:00
|
|
|
case Absolute:
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, absoluteLongThunk);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case FloatNegate:
|
|
|
|
assert(t, resultSize == 8);
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, negateDoubleThunk);
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case FloatSquareRoot:
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, squareRootDoubleThunk);
|
|
|
|
|
|
|
|
case Float2Float:
|
|
|
|
assert(t, resultSize == 4);
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, doubleToFloatThunk);
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case Float2Int:
|
|
|
|
if (resultSize == 8) {
|
|
|
|
return local::getThunk(t, doubleToLongThunk);
|
|
|
|
} else {
|
|
|
|
assert(t, resultSize == 4);
|
|
|
|
return local::getThunk(t, doubleToIntThunk);
|
|
|
|
}
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case Int2Float:
|
|
|
|
if (resultSize == 8) {
|
|
|
|
return local::getThunk(t, longToDoubleThunk);
|
|
|
|
} else {
|
|
|
|
assert(t, resultSize == 4);
|
|
|
|
return local::getThunk(t, longToFloatThunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
default: abort(t);
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2009-10-18 01:26:14 +00:00
|
|
|
} else {
|
|
|
|
assert(t, size == 4);
|
|
|
|
|
|
|
|
switch(op) {
|
2009-12-01 16:21:33 +00:00
|
|
|
case Absolute:
|
|
|
|
assert(t, resultSize == 4);
|
|
|
|
return local::getThunk(t, absoluteIntThunk);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
case FloatNegate:
|
2009-12-01 16:21:33 +00:00
|
|
|
assert(t, resultSize == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, negateFloatThunk);
|
|
|
|
|
|
|
|
case FloatAbsolute:
|
2009-12-01 16:21:33 +00:00
|
|
|
assert(t, resultSize == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, absoluteFloatThunk);
|
|
|
|
|
|
|
|
case Float2Float:
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, floatToDoubleThunk);
|
|
|
|
|
|
|
|
case Float2Int:
|
|
|
|
if (resultSize == 4) {
|
|
|
|
return local::getThunk(t, floatToIntThunk);
|
|
|
|
} else {
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, floatToLongThunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
case Int2Float:
|
|
|
|
if (resultSize == 4) {
|
|
|
|
return local::getThunk(t, intToFloatThunk);
|
|
|
|
} else {
|
|
|
|
assert(t, resultSize == 8);
|
|
|
|
return local::getThunk(t, intToDoubleThunk);
|
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
default: abort(t);
|
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2010-12-20 00:47:21 +00:00
|
|
|
virtual intptr_t getThunk(TernaryOperation op, unsigned size, unsigned,
|
|
|
|
bool* threadParameter)
|
|
|
|
{
|
|
|
|
*threadParameter = false;
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
if (size == 8) {
|
|
|
|
switch (op) {
|
|
|
|
case Divide:
|
2010-12-20 00:47:21 +00:00
|
|
|
*threadParameter = true;
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::getThunk(t, divideLongThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case Remainder:
|
2010-12-20 00:47:21 +00:00
|
|
|
*threadParameter = true;
|
|
|
|
return local::getThunk(t, moduloLongThunk);
|
2009-10-10 23:46:43 +00:00
|
|
|
|
|
|
|
case FloatAdd:
|
|
|
|
return local::getThunk(t, addDoubleThunk);
|
|
|
|
|
|
|
|
case FloatSubtract:
|
|
|
|
return local::getThunk(t, subtractDoubleThunk);
|
|
|
|
|
|
|
|
case FloatMultiply:
|
|
|
|
return local::getThunk(t, multiplyDoubleThunk);
|
|
|
|
|
|
|
|
case FloatDivide:
|
|
|
|
return local::getThunk(t, divideDoubleThunk);
|
|
|
|
|
2009-10-25 01:29:20 +00:00
|
|
|
case FloatRemainder:
|
|
|
|
return local::getThunk(t, moduloDoubleThunk);
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case JumpIfFloatEqual:
|
|
|
|
case JumpIfFloatNotEqual:
|
|
|
|
case JumpIfFloatLess:
|
|
|
|
case JumpIfFloatGreater:
|
|
|
|
case JumpIfFloatLessOrEqual:
|
|
|
|
case JumpIfFloatGreaterOrUnordered:
|
|
|
|
case JumpIfFloatGreaterOrEqualOrUnordered:
|
|
|
|
return local::getThunk(t, compareDoublesGThunk);
|
|
|
|
|
|
|
|
case JumpIfFloatGreaterOrEqual:
|
|
|
|
case JumpIfFloatLessOrUnordered:
|
|
|
|
case JumpIfFloatLessOrEqualOrUnordered:
|
|
|
|
return local::getThunk(t, compareDoublesLThunk);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
default: abort(t);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
2009-10-18 01:26:14 +00:00
|
|
|
} else {
|
|
|
|
assert(t, size == 4);
|
2009-10-10 23:46:43 +00:00
|
|
|
switch (op) {
|
2009-10-29 20:23:20 +00:00
|
|
|
case Divide:
|
2010-12-20 00:47:21 +00:00
|
|
|
*threadParameter = true;
|
2009-10-29 20:23:20 +00:00
|
|
|
return local::getThunk(t, divideIntThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-29 20:23:20 +00:00
|
|
|
case Remainder:
|
2010-12-20 00:47:21 +00:00
|
|
|
*threadParameter = true;
|
2009-10-29 20:14:44 +00:00
|
|
|
return local::getThunk(t, moduloIntThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case FloatAdd:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, addFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case FloatSubtract:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, subtractFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case FloatMultiply:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, multiplyFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case FloatDivide:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, divideFloatThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-10-25 01:29:20 +00:00
|
|
|
case FloatRemainder:
|
|
|
|
return local::getThunk(t, moduloFloatThunk);
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
case JumpIfFloatEqual:
|
|
|
|
case JumpIfFloatNotEqual:
|
|
|
|
case JumpIfFloatLess:
|
|
|
|
case JumpIfFloatGreater:
|
|
|
|
case JumpIfFloatLessOrEqual:
|
|
|
|
case JumpIfFloatGreaterOrUnordered:
|
|
|
|
case JumpIfFloatGreaterOrEqualOrUnordered:
|
|
|
|
return local::getThunk(t, compareFloatsGThunk);
|
|
|
|
|
|
|
|
case JumpIfFloatGreaterOrEqual:
|
|
|
|
case JumpIfFloatLessOrUnordered:
|
|
|
|
case JumpIfFloatLessOrEqualOrUnordered:
|
|
|
|
return local::getThunk(t, compareFloatsLThunk);
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
default: abort(t);
|
2009-10-10 23:46:43 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
|
|
|
};
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
Context(MyThread* t, BootContext* bootContext, object method):
|
2008-02-11 17:21:41 +00:00
|
|
|
thread(t),
|
2008-11-25 23:01:30 +00:00
|
|
|
zone(t->m->system, t->m->heap, InitialZoneCapacityInBytes),
|
2008-08-19 23:38:37 +00:00
|
|
|
assembler(makeAssembler(t->m->system, t->m->heap, &zone, t->arch)),
|
2008-05-31 22:14:27 +00:00
|
|
|
client(t),
|
|
|
|
compiler(makeCompiler(t->m->system, assembler, &zone, &client)),
|
2007-10-10 22:39:40 +00:00
|
|
|
method(method),
|
2008-11-23 23:58:01 +00:00
|
|
|
bootContext(bootContext),
|
2007-12-31 22:40:56 +00:00
|
|
|
objectPool(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
subroutines(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
traceLog(0),
|
2008-01-07 14:51:07 +00:00
|
|
|
visitTable(makeVisitTable(t, &zone, method)),
|
2008-01-08 15:24:57 +00:00
|
|
|
rootTable(makeRootTable(t, &zone, method)),
|
2009-07-13 23:49:15 +00:00
|
|
|
subroutineTable(0),
|
2010-12-27 22:55:23 +00:00
|
|
|
executableAllocator(0),
|
|
|
|
executableStart(0),
|
|
|
|
executableSize(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
objectPoolCount(0),
|
|
|
|
traceLogCount(0),
|
|
|
|
dirtyRoots(false),
|
2010-12-19 22:23:19 +00:00
|
|
|
leaf(true),
|
2008-01-14 23:37:24 +00:00
|
|
|
eventLog(t->m->system, t->m->heap, 1024),
|
2011-03-15 23:52:02 +00:00
|
|
|
protector(this),
|
|
|
|
resource(this)
|
2007-12-31 22:40:56 +00:00
|
|
|
{ }
|
|
|
|
|
|
|
|
Context(MyThread* t):
|
2008-02-11 17:21:41 +00:00
|
|
|
thread(t),
|
2008-11-25 23:01:30 +00:00
|
|
|
zone(t->m->system, t->m->heap, InitialZoneCapacityInBytes),
|
2008-08-19 23:38:37 +00:00
|
|
|
assembler(makeAssembler(t->m->system, t->m->heap, &zone, t->arch)),
|
2008-05-31 22:14:27 +00:00
|
|
|
client(t),
|
2008-02-11 17:21:41 +00:00
|
|
|
compiler(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
method(0),
|
2008-11-23 23:58:01 +00:00
|
|
|
bootContext(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
objectPool(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
subroutines(0),
|
2007-12-31 22:40:56 +00:00
|
|
|
traceLog(0),
|
2008-01-07 14:51:07 +00:00
|
|
|
visitTable(0),
|
|
|
|
rootTable(0),
|
2009-07-13 23:49:15 +00:00
|
|
|
subroutineTable(0),
|
2010-12-27 22:55:23 +00:00
|
|
|
executableAllocator(0),
|
|
|
|
executableStart(0),
|
|
|
|
executableSize(0),
|
2009-06-26 21:36:04 +00:00
|
|
|
objectPoolCount(0),
|
|
|
|
traceLogCount(0),
|
|
|
|
dirtyRoots(false),
|
2010-12-19 22:23:19 +00:00
|
|
|
leaf(true),
|
2008-01-14 23:37:24 +00:00
|
|
|
eventLog(t->m->system, t->m->heap, 0),
|
2011-03-15 23:52:02 +00:00
|
|
|
protector(this),
|
|
|
|
resource(this)
|
2007-12-31 22:40:56 +00:00
|
|
|
{ }
|
|
|
|
|
|
|
|
~Context() {
|
2010-12-27 22:55:23 +00:00
|
|
|
dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
void dispose() {
|
|
|
|
if (compiler) {
|
|
|
|
compiler->dispose();
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
assembler->dispose();
|
2010-12-27 22:55:23 +00:00
|
|
|
|
|
|
|
if (executableAllocator) {
|
|
|
|
executableAllocator->free(executableStart, executableSize);
|
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
eventLog.dispose();
|
|
|
|
|
|
|
|
zone.dispose();
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
MyThread* thread;
|
2007-12-31 22:40:56 +00:00
|
|
|
Zone zone;
|
2008-02-11 17:21:41 +00:00
|
|
|
Assembler* assembler;
|
2008-05-31 22:14:27 +00:00
|
|
|
MyClient client;
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler* compiler;
|
2007-12-31 22:40:56 +00:00
|
|
|
object method;
|
2008-11-23 23:58:01 +00:00
|
|
|
BootContext* bootContext;
|
2007-12-31 22:40:56 +00:00
|
|
|
PoolElement* objectPool;
|
2009-06-26 21:36:04 +00:00
|
|
|
Subroutine* subroutines;
|
2007-12-31 22:40:56 +00:00
|
|
|
TraceElement* traceLog;
|
2008-01-07 14:51:07 +00:00
|
|
|
uint16_t* visitTable;
|
|
|
|
uintptr_t* rootTable;
|
2009-07-13 23:49:15 +00:00
|
|
|
Subroutine** subroutineTable;
|
2010-12-27 22:55:23 +00:00
|
|
|
Allocator* executableAllocator;
|
|
|
|
void* executableStart;
|
|
|
|
unsigned executableSize;
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned objectPoolCount;
|
|
|
|
unsigned traceLogCount;
|
2008-03-05 21:44:17 +00:00
|
|
|
bool dirtyRoots;
|
2010-12-19 22:23:19 +00:00
|
|
|
bool leaf;
|
2008-01-07 14:51:07 +00:00
|
|
|
Vector eventLog;
|
2007-12-31 22:40:56 +00:00
|
|
|
MyProtector protector;
|
2011-03-15 23:52:02 +00:00
|
|
|
MyResource resource;
|
2007-12-31 22:40:56 +00:00
|
|
|
};
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned
|
|
|
|
translateLocalIndex(Context* context, unsigned footprint, unsigned index)
|
|
|
|
{
|
|
|
|
unsigned parameterFootprint = methodParameterFootprint
|
|
|
|
(context->thread, context->method);
|
|
|
|
|
|
|
|
if (index < parameterFootprint) {
|
|
|
|
return parameterFootprint - index - footprint;
|
|
|
|
} else {
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler::Operand*
|
|
|
|
loadLocal(Context* context, unsigned footprint, unsigned index)
|
|
|
|
{
|
|
|
|
return context->compiler->loadLocal
|
|
|
|
(footprint, translateLocalIndex(context, footprint, index));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
storeLocal(Context* context, unsigned footprint, Compiler::Operand* value,
|
|
|
|
unsigned index)
|
|
|
|
{
|
|
|
|
context->compiler->storeLocal
|
|
|
|
(footprint, value, translateLocalIndex(context, footprint, index));
|
|
|
|
}
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
FixedAllocator*
|
|
|
|
codeAllocator(MyThread* t);
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
class Frame {
|
|
|
|
public:
|
2008-02-11 17:21:41 +00:00
|
|
|
enum StackType {
|
|
|
|
Integer,
|
|
|
|
Long,
|
|
|
|
Object
|
|
|
|
};
|
|
|
|
|
|
|
|
Frame(Context* context, uint8_t* stackMap):
|
2007-12-31 22:40:56 +00:00
|
|
|
context(context),
|
2008-02-11 17:21:41 +00:00
|
|
|
t(context->thread),
|
|
|
|
c(context->compiler),
|
2009-02-14 20:26:39 +00:00
|
|
|
subroutine(0),
|
2008-01-07 14:51:07 +00:00
|
|
|
stackMap(stackMap),
|
2007-12-09 22:45:43 +00:00
|
|
|
ip(0),
|
2008-01-07 16:01:35 +00:00
|
|
|
sp(localSize()),
|
2008-01-07 14:51:07 +00:00
|
|
|
level(0)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-02-11 17:21:41 +00:00
|
|
|
memset(stackMap, 0, codeMaxStack(t, methodCode(t, context->method)));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Frame(Frame* f, uint8_t* stackMap):
|
2007-12-31 22:40:56 +00:00
|
|
|
context(f->context),
|
2008-02-11 17:21:41 +00:00
|
|
|
t(context->thread),
|
|
|
|
c(context->compiler),
|
2009-02-14 20:26:39 +00:00
|
|
|
subroutine(f->subroutine),
|
2008-01-07 14:51:07 +00:00
|
|
|
stackMap(stackMap),
|
2007-12-09 22:45:43 +00:00
|
|
|
ip(f->ip),
|
2008-01-07 14:51:07 +00:00
|
|
|
sp(f->sp),
|
|
|
|
level(f->level + 1)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-02-11 17:21:41 +00:00
|
|
|
memcpy(stackMap, f->stackMap, codeMaxStack
|
|
|
|
(t, methodCode(t, context->method)));
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
if (level > 1) {
|
2008-07-05 20:21:13 +00:00
|
|
|
context->eventLog.append(PushContextEvent);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~Frame() {
|
2010-12-27 22:55:23 +00:00
|
|
|
if (level > 1) {
|
|
|
|
context->eventLog.append(PopContextEvent);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* append(object o) {
|
2011-09-20 22:30:30 +00:00
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
if (bc) {
|
2008-11-27 20:59:40 +00:00
|
|
|
Promise* p = new (bc->zone->allocate(sizeof(ListenPromise)))
|
|
|
|
ListenPromise(t->m->system, bc->zone);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
PROTECT(t, o);
|
2008-11-23 23:58:01 +00:00
|
|
|
object pointer = makePointer(t, p);
|
2008-11-27 20:59:40 +00:00
|
|
|
bc->constants = makeTriple(t, o, pointer, bc->constants);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
return c->add
|
|
|
|
(TargetBytesPerWord, c->memory
|
|
|
|
(c->register_(t->arch->thread()), Compiler::AddressType,
|
|
|
|
TargetThreadHeapImage), c->promiseConstant
|
|
|
|
(p, Compiler::AddressType));
|
2008-11-23 23:58:01 +00:00
|
|
|
} else {
|
2010-01-28 00:46:04 +00:00
|
|
|
for (PoolElement* e = context->objectPool; e; e = e->next) {
|
|
|
|
if (o == e->target) {
|
|
|
|
return c->address(e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
context->objectPool = new
|
|
|
|
(context->zone.allocate(sizeof(PoolElement)))
|
|
|
|
PoolElement(t, o, context->objectPool);
|
|
|
|
|
2008-11-29 01:23:01 +00:00
|
|
|
++ context->objectPoolCount;
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
return c->address(context->objectPool);
|
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned localSize() {
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::localSize(t, context->method);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned stackSize() {
|
|
|
|
return codeMaxStack(t, methodCode(t, context->method));
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned frameSize() {
|
|
|
|
return localSize() + stackSize();
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void set(unsigned index, uint8_t type) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < frameSize());
|
2007-10-10 22:39:40 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
if (type == Object) {
|
|
|
|
context->eventLog.append(MarkEvent);
|
|
|
|
context->eventLog.append2(index);
|
|
|
|
} else {
|
|
|
|
context->eventLog.append(ClearEvent);
|
|
|
|
context->eventLog.append2(index);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int si = index - localSize();
|
|
|
|
if (si >= 0) {
|
2008-02-11 17:21:41 +00:00
|
|
|
stackMap[si] = type;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t get(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < frameSize());
|
|
|
|
int si = index - localSize();
|
|
|
|
assert(t, si >= 0);
|
2008-02-11 17:21:41 +00:00
|
|
|
return stackMap[si];
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void pushedInt() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp++, Integer);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pushedLong() {
|
|
|
|
assert(t, sp + 2 <= frameSize());
|
|
|
|
set(sp++, Long);
|
|
|
|
set(sp++, Long);
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void pushedObject() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp++, Object);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-12-12 22:19:13 +00:00
|
|
|
|
|
|
|
void popped(unsigned count) {
|
|
|
|
assert(t, sp >= count);
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp - count >= localSize());
|
2007-12-12 22:19:13 +00:00
|
|
|
while (count) {
|
2008-02-11 17:21:41 +00:00
|
|
|
set(--sp, Integer);
|
2007-12-12 22:19:13 +00:00
|
|
|
-- count;
|
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
void poppedInt() {
|
|
|
|
assert(t, sp >= 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp - 1 >= localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
assert(t, get(sp - 1) == Integer);
|
2007-12-09 22:45:43 +00:00
|
|
|
-- sp;
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void poppedLong() {
|
|
|
|
assert(t, sp >= 1);
|
|
|
|
assert(t, sp - 2 >= localSize());
|
|
|
|
assert(t, get(sp - 1) == Long);
|
|
|
|
assert(t, get(sp - 2) == Long);
|
|
|
|
sp -= 2;
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void poppedObject() {
|
|
|
|
assert(t, sp >= 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp - 1 >= localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
assert(t, get(sp - 1) == Object);
|
|
|
|
set(--sp, Integer);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void storedInt(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
set(index, Integer);
|
|
|
|
}
|
|
|
|
|
|
|
|
void storedLong(unsigned index) {
|
|
|
|
assert(t, index + 1 < localSize());
|
|
|
|
set(index, Long);
|
|
|
|
set(index + 1, Long);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void storedObject(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
set(index, Object);
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dupped() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
|
|
|
assert(t, sp - 1 >= localSize());
|
2009-02-28 21:41:05 +00:00
|
|
|
set(sp, get(sp - 1));
|
|
|
|
++ sp;
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2007-10-16 17:21:26 +00:00
|
|
|
void duppedX1() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
|
|
|
assert(t, sp - 2 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 1, b2);
|
|
|
|
set(sp - 2, b1);
|
|
|
|
set(sp , b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
++ sp;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void duppedX2() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 1 <= frameSize());
|
|
|
|
assert(t, sp - 3 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b3 = get(sp - 3);
|
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 2, b3);
|
|
|
|
set(sp - 1, b2);
|
|
|
|
set(sp - 3, b1);
|
|
|
|
set(sp , b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
++ sp;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dupped2() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 2 <= frameSize());
|
|
|
|
assert(t, sp - 2 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp, b2);
|
|
|
|
set(sp + 1, b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
sp += 2;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dupped2X1() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 2 <= frameSize());
|
|
|
|
assert(t, sp - 3 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b3 = get(sp - 3);
|
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 1, b3);
|
|
|
|
set(sp - 3, b2);
|
|
|
|
set(sp , b2);
|
|
|
|
set(sp - 2, b1);
|
|
|
|
set(sp + 1, b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
sp += 2;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void dupped2X2() {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp + 2 <= frameSize());
|
|
|
|
assert(t, sp - 4 >= localSize());
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t b4 = get(sp - 4);
|
|
|
|
uint8_t b3 = get(sp - 3);
|
|
|
|
uint8_t b2 = get(sp - 2);
|
|
|
|
uint8_t b1 = get(sp - 1);
|
2007-10-10 22:39:40 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 2, b4);
|
|
|
|
set(sp - 1, b3);
|
|
|
|
set(sp - 4, b2);
|
|
|
|
set(sp , b2);
|
|
|
|
set(sp - 3, b1);
|
|
|
|
set(sp + 1, b1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
sp += 2;
|
2007-10-13 21:48:40 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void swapped() {
|
2008-01-07 16:01:35 +00:00
|
|
|
assert(t, sp - 2 >= localSize());
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t saved = get(sp - 1);
|
2007-10-10 22:39:40 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
set(sp - 1, get(sp - 2));
|
|
|
|
set(sp - 2, saved);
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
Promise* addressPromise(Promise* p) {
|
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
if (bc) {
|
|
|
|
bc->addresses = new (bc->zone->allocate(sizeof(DelayedPromise)))
|
|
|
|
DelayedPromise(t->m->system, bc->zone, p, bc->addresses);
|
|
|
|
return bc->addresses;
|
|
|
|
} else {
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler::Operand* addressOperand(Promise* p) {
|
2011-09-20 22:30:30 +00:00
|
|
|
return c->promiseConstant(p, Compiler::AddressType);
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler::Operand* absoluteAddressOperand(Promise* p) {
|
|
|
|
return context->bootContext
|
|
|
|
? c->add
|
|
|
|
(TargetBytesPerWord, c->memory
|
|
|
|
(c->register_(t->arch->thread()), Compiler::AddressType,
|
|
|
|
TargetThreadCodeImage), c->promiseConstant
|
|
|
|
(new (context->zone.allocate(sizeof(OffsetPromise)))
|
|
|
|
OffsetPromise
|
|
|
|
(p, - reinterpret_cast<intptr_t>(codeAllocator(t)->base)),
|
|
|
|
Compiler::AddressType))
|
|
|
|
: addressOperand(p);
|
2008-12-02 16:45:20 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* machineIp(unsigned logicalIp) {
|
2009-09-20 21:43:32 +00:00
|
|
|
return c->promiseConstant(c->machineIp(logicalIp), Compiler::AddressType);
|
2007-12-16 00:24:15 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
void visitLogicalIp(unsigned ip) {
|
2008-04-20 00:43:12 +00:00
|
|
|
c->visitLogicalIp(ip);
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
context->eventLog.append(IpEvent);
|
2008-01-07 16:01:35 +00:00
|
|
|
context->eventLog.append2(ip);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void startLogicalIp(unsigned ip) {
|
2009-07-13 23:49:15 +00:00
|
|
|
if (subroutine) {
|
|
|
|
context->subroutineTable[ip] = subroutine;
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
c->startLogicalIp(ip);
|
2007-12-26 16:56:14 +00:00
|
|
|
|
2008-04-20 05:23:08 +00:00
|
|
|
context->eventLog.append(IpEvent);
|
|
|
|
context->eventLog.append2(ip);
|
2007-12-26 16:56:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
this->ip = ip;
|
2007-12-26 16:56:14 +00:00
|
|
|
}
|
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
void pushQuiet(unsigned footprint, Compiler::Operand* o) {
|
|
|
|
c->push(footprint, o);
|
2008-09-23 21:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void pushLongQuiet(Compiler::Operand* o) {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(2, o);
|
2008-07-05 20:21:13 +00:00
|
|
|
}
|
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* popQuiet(unsigned footprint) {
|
|
|
|
return c->pop(footprint);
|
2008-09-23 21:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Compiler::Operand* popLongQuiet() {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* r = popQuiet(2);
|
2008-07-05 20:21:13 +00:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void pushInt(Compiler::Operand* o) {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, o);
|
2007-12-09 22:45:43 +00:00
|
|
|
pushedInt();
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void pushAddress(Compiler::Operand* o) {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, o);
|
2007-12-26 23:59:55 +00:00
|
|
|
pushedInt();
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
void pushObject(Compiler::Operand* o) {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, o);
|
2007-12-09 22:45:43 +00:00
|
|
|
pushedObject();
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
void pushObject() {
|
2008-07-05 20:21:13 +00:00
|
|
|
c->pushed();
|
2007-12-12 22:19:13 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
pushedObject();
|
2008-02-12 02:06:12 +00:00
|
|
|
}
|
2007-12-16 21:30:19 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
void pushLong(Compiler::Operand* o) {
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(o);
|
2008-02-11 17:21:41 +00:00
|
|
|
pushedLong();
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void pop(unsigned count) {
|
2007-12-12 22:19:13 +00:00
|
|
|
popped(count);
|
2009-05-15 02:08:01 +00:00
|
|
|
c->popped(count);
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* popInt() {
|
2007-12-09 22:45:43 +00:00
|
|
|
poppedInt();
|
2008-11-02 22:25:51 +00:00
|
|
|
return popQuiet(1);
|
2008-01-03 18:37:00 +00:00
|
|
|
}
|
2007-12-16 21:30:19 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* popLong() {
|
|
|
|
poppedLong();
|
2008-09-23 21:18:41 +00:00
|
|
|
return popLongQuiet();
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* popObject() {
|
2007-12-09 22:45:43 +00:00
|
|
|
poppedObject();
|
2008-11-02 22:25:51 +00:00
|
|
|
return popQuiet(1);
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void loadInt(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < localSize());
|
2009-05-03 20:57:11 +00:00
|
|
|
pushInt(loadLocal(context, 1, index));
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void loadLong(unsigned index) {
|
2008-02-11 17:21:41 +00:00
|
|
|
assert(t, index < static_cast<unsigned>(localSize() - 1));
|
2009-05-03 20:57:11 +00:00
|
|
|
pushLong(loadLocal(context, 2, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void loadObject(unsigned index) {
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, index < localSize());
|
2009-05-03 20:57:11 +00:00
|
|
|
pushObject(loadLocal(context, 1, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void storeInt(unsigned index) {
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(context, 1, popInt(), index);
|
2009-05-18 15:16:17 +00:00
|
|
|
storedInt(translateLocalIndex(context, 1, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void storeLong(unsigned index) {
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(context, 2, popLong(), index);
|
2009-05-18 15:16:17 +00:00
|
|
|
storedLong(translateLocalIndex(context, 2, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void storeObject(unsigned index) {
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(context, 1, popObject(), index);
|
2009-05-18 15:16:17 +00:00
|
|
|
storedObject(translateLocalIndex(context, 1, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
void storeObjectOrAddress(unsigned index) {
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(context, 1, popQuiet(1), index);
|
2007-12-26 23:59:55 +00:00
|
|
|
|
|
|
|
assert(t, sp >= 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
assert(t, sp - 1 >= localSize());
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 1) == Object) {
|
2009-05-18 15:16:17 +00:00
|
|
|
storedObject(translateLocalIndex(context, 1, index));
|
2007-12-26 23:59:55 +00:00
|
|
|
} else {
|
2009-05-18 15:16:17 +00:00
|
|
|
storedInt(translateLocalIndex(context, 1, index));
|
2007-12-26 23:59:55 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
popped(1);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dup() {
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, c->peek(1, 0));
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
dupped();
|
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dupX1() {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
duppedX1();
|
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dupX2() {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
|
|
|
if (get(sp - 2) == Long) {
|
2008-09-23 21:18:41 +00:00
|
|
|
Compiler::Operand* s1 = popLongQuiet();
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s1);
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
|
|
|
Compiler::Operand* s2 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s2);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
duppedX2();
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dup2() {
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 1) == Long) {
|
2008-11-07 00:39:38 +00:00
|
|
|
pushLongQuiet(c->peek(2, 0));
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
dupped2();
|
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dup2X1() {
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 1) == Long) {
|
2008-09-23 21:18:41 +00:00
|
|
|
Compiler::Operand* s0 = popLongQuiet();
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s1);
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
2008-02-12 02:06:12 +00:00
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
|
|
|
Compiler::Operand* s2 = popQuiet(1);
|
|
|
|
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s2);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
dupped2X1();
|
2007-10-18 00:41:49 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void dup2X2() {
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 1) == Long) {
|
2008-09-23 21:18:41 +00:00
|
|
|
Compiler::Operand* s0 = popLongQuiet();
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
if (get(sp - 3) == Long) {
|
2008-09-23 21:18:41 +00:00
|
|
|
Compiler::Operand* s1 = popLongQuiet();
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
|
|
|
pushLongQuiet(s1);
|
|
|
|
pushLongQuiet(s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
|
|
|
Compiler::Operand* s2 = popQuiet(1);
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s2);
|
|
|
|
pushQuiet(1, s1);
|
2008-09-23 21:18:41 +00:00
|
|
|
pushLongQuiet(s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
|
|
|
} else {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
|
|
|
Compiler::Operand* s2 = popQuiet(1);
|
|
|
|
Compiler::Operand* s3 = popQuiet(1);
|
|
|
|
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s3);
|
|
|
|
pushQuiet(1, s2);
|
|
|
|
pushQuiet(1, s1);
|
|
|
|
pushQuiet(1, s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
dupped2X2();
|
|
|
|
}
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void swap() {
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* s0 = popQuiet(1);
|
|
|
|
Compiler::Operand* s1 = popQuiet(1);
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
pushQuiet(1, s0);
|
|
|
|
pushQuiet(1, s1);
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
swapped();
|
2007-10-02 00:08:17 +00:00
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
TraceElement* trace(object target, unsigned flags) {
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned mapSize = frameMapSizeInWords(t, context->method);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
|
|
|
TraceElement* e = context->traceLog = new
|
|
|
|
(context->zone.allocate(sizeof(TraceElement) + (mapSize * BytesPerWord)))
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
TraceElement(context, ip, target, flags, context->traceLog, mapSize);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
++ context->traceLogCount;
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
context->eventLog.append(TraceEvent);
|
|
|
|
context->eventLog.appendAddress(e);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
|
|
|
return e;
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
unsigned startSubroutine(unsigned ip, Promise* returnAddress) {
|
2011-09-20 22:30:30 +00:00
|
|
|
pushAddress(absoluteAddressOperand(returnAddress));
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
Subroutine* subroutine = 0;
|
2009-07-13 23:49:15 +00:00
|
|
|
for (Subroutine* s = context->subroutines; s; s = s->listNext) {
|
2009-06-26 21:36:04 +00:00
|
|
|
if (s->ip == ip) {
|
|
|
|
subroutine = s;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (subroutine == 0) {
|
|
|
|
context->subroutines = subroutine = new
|
|
|
|
(context->zone.allocate(sizeof(Subroutine)))
|
2009-07-08 14:18:40 +00:00
|
|
|
Subroutine(ip, context->eventLog.length() + 1 + BytesPerWord + 2,
|
2009-07-13 23:49:15 +00:00
|
|
|
context->subroutines, this->subroutine);
|
|
|
|
|
|
|
|
if (context->subroutineTable == 0) {
|
|
|
|
unsigned size = codeLength(t, methodCode(t, context->method))
|
|
|
|
* sizeof(Subroutine*);
|
|
|
|
|
|
|
|
context->subroutineTable = static_cast<Subroutine**>
|
|
|
|
(context->zone.allocate(size));
|
|
|
|
|
|
|
|
memset(context->subroutineTable, 0, size);
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
subroutine->handle = c->startSubroutine();
|
|
|
|
this->subroutine = subroutine;
|
|
|
|
|
|
|
|
SubroutineCall* call = new
|
|
|
|
(context->zone.allocate(sizeof(SubroutineCall)))
|
|
|
|
SubroutineCall(subroutine, returnAddress);
|
|
|
|
|
|
|
|
context->eventLog.append(PushSubroutineEvent);
|
|
|
|
context->eventLog.appendAddress(call);
|
|
|
|
|
|
|
|
unsigned nextIndexIndex = context->eventLog.length();
|
|
|
|
context->eventLog.append2(0);
|
|
|
|
|
|
|
|
c->saveLocals();
|
|
|
|
|
|
|
|
return nextIndexIndex;
|
|
|
|
}
|
|
|
|
|
|
|
|
void returnFromSubroutine(unsigned returnAddressLocal) {
|
2010-01-05 00:17:16 +00:00
|
|
|
c->returnFromSubroutine
|
|
|
|
(subroutine->handle, loadLocal(context, 1, returnAddressLocal));
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
subroutine->stackIndex = localOffsetFromStack
|
|
|
|
(t, translateLocalIndex(context, 1, returnAddressLocal),
|
|
|
|
context->method);
|
|
|
|
}
|
|
|
|
|
|
|
|
void endSubroutine(unsigned nextIndexIndex) {
|
2009-07-20 14:26:01 +00:00
|
|
|
c->linkSubroutine(subroutine->handle);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
poppedInt();
|
|
|
|
|
|
|
|
context->eventLog.append(PopSubroutineEvent);
|
2009-07-08 14:18:40 +00:00
|
|
|
|
|
|
|
context->eventLog.set2(nextIndexIndex, context->eventLog.length());
|
2009-07-13 23:49:15 +00:00
|
|
|
|
|
|
|
subroutine = subroutine->stackNext;
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* context;
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
|
|
|
Compiler* c;
|
2009-06-26 21:36:04 +00:00
|
|
|
Subroutine* subroutine;
|
2008-02-11 17:21:41 +00:00
|
|
|
uint8_t* stackMap;
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned ip;
|
|
|
|
unsigned sp;
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned level;
|
2007-12-30 22:24:48 +00:00
|
|
|
};
|
|
|
|
|
2008-01-20 18:55:08 +00:00
|
|
|
unsigned
|
|
|
|
savedTargetIndex(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return codeMaxLocals(t, methodCode(t, method));
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object
|
|
|
|
findCallNode(MyThread* t, void* address);
|
|
|
|
|
|
|
|
void
|
|
|
|
insertCallNode(MyThread* t, object node);
|
|
|
|
|
2008-04-11 19:03:40 +00:00
|
|
|
void*
|
|
|
|
findExceptionHandler(Thread* t, object method, void* ip)
|
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
if (t->exception) {
|
|
|
|
object table = codeExceptionHandlerTable(t, methodCode(t, method));
|
|
|
|
if (table) {
|
|
|
|
object index = arrayBody(t, table, 0);
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
uint8_t* compiled = reinterpret_cast<uint8_t*>
|
|
|
|
(methodCompiled(t, method));
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
for (unsigned i = 0; i < arrayLength(t, table) - 1; ++i) {
|
|
|
|
unsigned start = intArrayBody(t, index, i * 3);
|
|
|
|
unsigned end = intArrayBody(t, index, (i * 3) + 1);
|
|
|
|
unsigned key = difference(ip, compiled) - 1;
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
if (key >= start and key < end) {
|
|
|
|
object catchType = arrayBody(t, table, i + 1);
|
2008-04-11 19:03:40 +00:00
|
|
|
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
if (exceptionMatch(t, catchType, t->exception)) {
|
2009-05-03 20:57:11 +00:00
|
|
|
return compiled + intArrayBody(t, index, (i * 3) + 2);
|
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
void
|
2009-05-05 01:04:17 +00:00
|
|
|
releaseLock(MyThread* t, object method, void* stack)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
|
|
|
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
|
2011-03-26 00:37:02 +00:00
|
|
|
if (t->methodLockIsClean) {
|
|
|
|
object lock;
|
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
lock = methodClass(t, method);
|
|
|
|
} else {
|
|
|
|
lock = *localObject
|
|
|
|
(t, stackForFrame(t, stack, method), method,
|
|
|
|
savedTargetIndex(t, method));
|
|
|
|
}
|
|
|
|
|
|
|
|
release(t, lock);
|
2009-05-03 20:57:11 +00:00
|
|
|
} else {
|
2011-03-26 00:37:02 +00:00
|
|
|
// got an exception while trying to acquire the lock for a
|
|
|
|
// synchronized method -- don't try to release it, since we
|
|
|
|
// never succeeded in acquiring it.
|
|
|
|
t->methodLockIsClean = true;
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-01 17:08:47 +00:00
|
|
|
void
|
2011-01-28 04:06:01 +00:00
|
|
|
findUnwindTarget(MyThread* t, void** targetIp, void** targetFrame,
|
|
|
|
void** targetStack, object* targetContinuation)
|
2007-09-29 21:08:29 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
void* ip;
|
|
|
|
void* stack;
|
|
|
|
object continuation;
|
|
|
|
|
|
|
|
if (t->traceContext) {
|
|
|
|
ip = t->traceContext->ip;
|
|
|
|
stack = t->traceContext->stack;
|
|
|
|
continuation = t->traceContext->continuation;
|
|
|
|
} else {
|
2011-02-20 03:33:26 +00:00
|
|
|
ip = getIp(t);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
stack = t->stack;
|
|
|
|
continuation = t->continuation;
|
|
|
|
}
|
|
|
|
|
2009-05-17 00:39:08 +00:00
|
|
|
object target = t->trace->targetMethod;
|
|
|
|
|
2008-01-01 17:08:47 +00:00
|
|
|
*targetIp = 0;
|
|
|
|
while (*targetIp == 0) {
|
2008-04-07 23:47:41 +00:00
|
|
|
object method = methodForIp(t, ip);
|
|
|
|
if (method) {
|
2009-05-24 01:49:14 +00:00
|
|
|
void* handler = findExceptionHandler(t, method, ip);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2009-04-26 22:06:15 +00:00
|
|
|
if (handler) {
|
|
|
|
*targetIp = handler;
|
2009-04-26 21:55:35 +00:00
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
nextFrame(t, &ip, &stack, method, target);
|
2009-04-26 21:55:35 +00:00
|
|
|
|
2009-04-26 22:06:15 +00:00
|
|
|
void** sp = static_cast<void**>(stackForFrame(t, stack, method))
|
2009-02-27 01:54:25 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2011-01-28 04:06:01 +00:00
|
|
|
*targetFrame = static_cast<void**>
|
|
|
|
(stack) + t->arch->framePointerOffset();
|
2009-04-26 22:06:15 +00:00
|
|
|
*targetStack = sp;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
*targetContinuation = continuation;
|
2009-04-26 22:06:15 +00:00
|
|
|
|
2009-05-17 23:43:48 +00:00
|
|
|
sp[localOffset(t, localSize(t, method), method)] = t->exception;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
t->exception = 0;
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2011-01-26 00:22:43 +00:00
|
|
|
nextFrame(t, &ip, &stack, method, target);
|
2009-04-26 22:06:15 +00:00
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
if (t->exception) {
|
|
|
|
releaseLock(t, method, stack);
|
|
|
|
}
|
2009-05-17 00:39:08 +00:00
|
|
|
|
|
|
|
target = method;
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
} else {
|
2011-02-25 18:04:23 +00:00
|
|
|
expect(t, ip);
|
2009-05-03 20:57:11 +00:00
|
|
|
*targetIp = ip;
|
2011-01-28 04:06:01 +00:00
|
|
|
*targetFrame = 0;
|
2009-05-03 20:57:11 +00:00
|
|
|
*targetStack = static_cast<void**>(stack)
|
|
|
|
+ t->arch->frameReturnAddressSize();
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
*targetContinuation = continuation;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
while (Continuations and *targetContinuation) {
|
|
|
|
object c = *targetContinuation;
|
2009-05-25 04:27:50 +00:00
|
|
|
|
|
|
|
object method = continuationMethod(t, c);
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
void* handler = findExceptionHandler
|
2009-05-25 04:27:50 +00:00
|
|
|
(t, method, continuationAddress(t, c));
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
if (handler) {
|
|
|
|
t->exceptionHandler = handler;
|
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
t->exceptionStackAdjustment
|
|
|
|
= (stackOffsetFromFrame(t, method)
|
|
|
|
- ((continuationFramePointerOffset(t, c) / BytesPerWord)
|
|
|
|
- t->arch->framePointerOffset()
|
|
|
|
+ t->arch->frameReturnAddressSize())) * BytesPerWord;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-17 23:43:48 +00:00
|
|
|
t->exceptionOffset
|
|
|
|
= localOffset(t, localSize(t, method), method) * BytesPerWord;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
break;
|
2009-05-23 22:15:06 +00:00
|
|
|
} else if (t->exception) {
|
2009-05-17 00:39:08 +00:00
|
|
|
releaseLock(t, method,
|
2009-05-25 04:27:50 +00:00
|
|
|
reinterpret_cast<uint8_t*>(c)
|
2009-05-05 01:04:17 +00:00
|
|
|
+ ContinuationBody
|
2009-05-25 04:27:50 +00:00
|
|
|
+ continuationReturnAddressOffset(t, c)
|
2009-05-16 08:03:03 +00:00
|
|
|
- t->arch->returnAddressOffset());
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
*targetContinuation = continuationNext(t, c);
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
makeCurrentContinuation(MyThread* t, void** targetIp, void** targetStack)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2011-02-20 20:31:29 +00:00
|
|
|
void* ip = getIp(t);
|
2009-05-03 20:57:11 +00:00
|
|
|
void* stack = t->stack;
|
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
object context = t->continuation
|
|
|
|
? continuationContext(t, t->continuation)
|
|
|
|
: makeContinuationContext(t, 0, 0, 0, 0, t->trace->originalMethod);
|
|
|
|
PROTECT(t, context);
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
object target = t->trace->targetMethod;
|
|
|
|
PROTECT(t, target);
|
|
|
|
|
|
|
|
object first = 0;
|
|
|
|
PROTECT(t, first);
|
|
|
|
|
|
|
|
object last = 0;
|
|
|
|
PROTECT(t, last);
|
|
|
|
|
|
|
|
*targetIp = 0;
|
|
|
|
while (*targetIp == 0) {
|
|
|
|
object method = methodForIp(t, ip);
|
|
|
|
if (method) {
|
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-05-16 08:03:03 +00:00
|
|
|
void** top = static_cast<void**>(stack)
|
2009-05-29 00:56:05 +00:00
|
|
|
+ t->arch->frameReturnAddressSize()
|
|
|
|
+ t->arch->frameFooterSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned argumentFootprint
|
|
|
|
= t->arch->argumentFootprint(methodParameterFootprint(t, target));
|
|
|
|
unsigned alignment = t->arch->stackAlignmentInWords();
|
2009-05-26 05:27:10 +00:00
|
|
|
if (TailCalls and argumentFootprint > alignment) {
|
2009-05-03 20:57:11 +00:00
|
|
|
top += argumentFootprint - alignment;
|
|
|
|
}
|
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
void* nextIp = ip;
|
|
|
|
nextFrame(t, &nextIp, &stack, method, target);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-16 08:03:03 +00:00
|
|
|
void** bottom = static_cast<void**>(stack)
|
|
|
|
+ t->arch->frameReturnAddressSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned frameSize = bottom - top;
|
|
|
|
unsigned totalSize = frameSize
|
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->argumentFootprint(methodParameterFootprint(t, method));
|
|
|
|
|
|
|
|
object c = makeContinuation
|
2009-05-23 22:15:06 +00:00
|
|
|
(t, 0, context, method, ip,
|
2011-01-27 18:54:41 +00:00
|
|
|
(frameSize
|
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->returnAddressOffset()
|
|
|
|
- t->arch->frameReturnAddressSize()) * BytesPerWord,
|
|
|
|
(frameSize
|
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->framePointerOffset()
|
|
|
|
- t->arch->frameReturnAddressSize()) * BytesPerWord,
|
2009-05-03 20:57:11 +00:00
|
|
|
totalSize);
|
|
|
|
|
|
|
|
memcpy(&continuationBody(t, c, 0), top, totalSize * BytesPerWord);
|
|
|
|
|
|
|
|
if (last) {
|
|
|
|
set(t, last, ContinuationNext, c);
|
|
|
|
} else {
|
|
|
|
first = c;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
last = c;
|
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
ip = nextIp;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
target = method;
|
2007-10-12 22:06:33 +00:00
|
|
|
} else {
|
2008-01-01 17:08:47 +00:00
|
|
|
*targetIp = ip;
|
2008-11-09 23:56:37 +00:00
|
|
|
*targetStack = static_cast<void**>(stack)
|
2009-12-03 06:09:05 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
expect(t, last);
|
|
|
|
set(t, last, ContinuationNext, t->continuation);
|
|
|
|
|
|
|
|
return first;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
|
2008-01-01 17:08:47 +00:00
|
|
|
void NO_RETURN
|
|
|
|
unwind(MyThread* t)
|
|
|
|
{
|
|
|
|
void* ip;
|
2011-01-28 04:06:01 +00:00
|
|
|
void* frame;
|
2008-01-01 17:08:47 +00:00
|
|
|
void* stack;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
object continuation;
|
2011-01-28 04:06:01 +00:00
|
|
|
findUnwindTarget(t, &ip, &frame, &stack, &continuation);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2011-07-14 00:06:02 +00:00
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
transition(t, ip, stack, continuation, t->trace);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2011-01-28 04:06:01 +00:00
|
|
|
vmJump(ip, frame, stack, t, 0, 0);
|
2008-01-01 17:08:47 +00:00
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
class MyCheckpoint: public Thread::Checkpoint {
|
|
|
|
public:
|
|
|
|
MyCheckpoint(MyThread* t): Checkpoint(t) { }
|
|
|
|
|
|
|
|
virtual void unwind() {
|
|
|
|
local::unwind(static_cast<MyThread*>(t));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t
|
|
|
|
defaultThunk(MyThread* t);
|
|
|
|
|
|
|
|
uintptr_t
|
|
|
|
nativeThunk(MyThread* t);
|
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
uintptr_t
|
|
|
|
bootNativeThunk(MyThread* t);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t
|
|
|
|
aioobThunk(MyThread* t);
|
|
|
|
|
2010-12-19 22:23:19 +00:00
|
|
|
uintptr_t
|
|
|
|
stackOverflowThunk(MyThread* t);
|
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
uintptr_t
|
|
|
|
virtualThunk(MyThread* t, unsigned index);
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
bool
|
|
|
|
unresolved(MyThread* t, uintptr_t methodAddress);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t
|
|
|
|
methodAddress(Thread* t, object method)
|
|
|
|
{
|
|
|
|
if (methodFlags(t, method) & ACC_NATIVE) {
|
2011-10-03 14:05:49 +00:00
|
|
|
return bootNativeThunk(static_cast<MyThread*>(t));
|
2008-12-02 02:38:00 +00:00
|
|
|
} else {
|
|
|
|
return methodCompiled(t, method);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
void
|
|
|
|
tryInitClass(MyThread* t, object class_)
|
|
|
|
{
|
|
|
|
initClass(t, class_);
|
|
|
|
}
|
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
void
|
2011-02-28 06:03:13 +00:00
|
|
|
compile(MyThread* t, FixedAllocator* allocator, BootContext* bootContext,
|
2009-04-07 00:34:12 +00:00
|
|
|
object method);
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object
|
|
|
|
resolveMethod(Thread* t, object pair)
|
|
|
|
{
|
|
|
|
object reference = pairSecond(t, pair);
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object class_ = resolveClassInObject
|
|
|
|
(t, classLoader(t, methodClass(t, pairFirst(t, pair))), reference,
|
|
|
|
ReferenceClass);
|
|
|
|
|
|
|
|
return findInHierarchy
|
|
|
|
(t, class_, referenceName(t, reference), referenceSpec(t, reference),
|
|
|
|
findMethodInClass, Machine::NoSuchMethodErrorType);
|
|
|
|
}
|
|
|
|
|
2011-08-12 20:19:21 +00:00
|
|
|
bool
|
|
|
|
methodAbstract(Thread* t, object method)
|
|
|
|
{
|
|
|
|
return methodCode(t, method) == 0
|
|
|
|
and (methodFlags(t, method) & ACC_NATIVE) == 0;
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2011-03-15 23:52:02 +00:00
|
|
|
prepareMethodForCall(MyThread* t, object target)
|
2007-10-16 17:21:26 +00:00
|
|
|
{
|
2011-08-12 20:19:21 +00:00
|
|
|
if (methodAbstract(t, target)) {
|
|
|
|
throwNew(t, Machine::AbstractMethodErrorType, "%s.%s%s",
|
|
|
|
&byteArrayBody(t, className(t, methodClass(t, target)), 0),
|
|
|
|
&byteArrayBody(t, methodName(t, target), 0),
|
|
|
|
&byteArrayBody(t, methodSpec(t, target), 0));
|
|
|
|
} else {
|
|
|
|
if (unresolved(t, methodAddress(t, target))) {
|
|
|
|
PROTECT(t, target);
|
|
|
|
|
|
|
|
compile(t, codeAllocator(t), 0, target);
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-08-12 20:19:21 +00:00
|
|
|
if (methodFlags(t, target) & ACC_NATIVE) {
|
|
|
|
t->trace->nativeMethod = target;
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-08-12 20:19:21 +00:00
|
|
|
return methodAddress(t, target);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
int64_t
|
|
|
|
findInterfaceMethodFromInstance(MyThread* t, object method, object instance)
|
|
|
|
{
|
|
|
|
if (instance) {
|
|
|
|
return prepareMethodForCall
|
|
|
|
(t, findInterfaceMethod(t, method, objectClass(t, instance)));
|
2007-12-30 22:24:48 +00:00
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::NullPointerExceptionType);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
int64_t
|
|
|
|
findInterfaceMethodFromInstanceAndReference
|
|
|
|
(MyThread* t, object pair, object instance)
|
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
|
|
|
|
object method = resolveMethod(t, pair);
|
|
|
|
|
|
|
|
return findInterfaceMethodFromInstance(t, method, instance);
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
|
|
|
findSpecialMethodFromReference(MyThread* t, object pair)
|
|
|
|
{
|
|
|
|
PROTECT(t, pair);
|
|
|
|
|
|
|
|
object target = resolveMethod(t, pair);
|
|
|
|
|
|
|
|
object class_ = methodClass(t, pairFirst(t, pair));
|
|
|
|
if (isSpecialMethod(t, target, class_)) {
|
|
|
|
target = findVirtualMethod(t, target, classSuper(t, class_));
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
|
|
|
|
|
|
|
|
return prepareMethodForCall(t, target);
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
|
|
|
findStaticMethodFromReference(MyThread* t, object pair)
|
|
|
|
{
|
|
|
|
object target = resolveMethod(t, pair);
|
|
|
|
|
|
|
|
assert(t, methodFlags(t, target) & ACC_STATIC);
|
|
|
|
|
|
|
|
return prepareMethodForCall(t, target);
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
|
|
|
findVirtualMethodFromReference(MyThread* t, object pair, object instance)
|
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
|
|
|
|
object target = resolveMethod(t, pair);
|
|
|
|
|
|
|
|
target = findVirtualMethod(t, target, objectClass(t, instance));
|
|
|
|
|
|
|
|
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
|
|
|
|
|
|
|
|
return prepareMethodForCall(t, target);
|
|
|
|
}
|
|
|
|
|
2011-07-18 01:54:55 +00:00
|
|
|
int64_t
|
|
|
|
getMethodAddress(MyThread* t, object target)
|
|
|
|
{
|
2011-08-12 20:19:21 +00:00
|
|
|
return prepareMethodForCall(t, target);
|
2011-07-18 01:54:55 +00:00
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
int64_t
|
|
|
|
getJClassFromReference(MyThread* t, object pair)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<intptr_t>
|
|
|
|
(getJClass
|
|
|
|
(t, resolveClass
|
|
|
|
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
|
|
|
|
referenceName(t, pairSecond(t, pair)))));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
compareDoublesG(uint64_t bi, uint64_t ai)
|
|
|
|
{
|
|
|
|
double a = bitsToDouble(ai);
|
|
|
|
double b = bitsToDouble(bi);
|
|
|
|
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else if (a == b) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
compareDoublesL(uint64_t bi, uint64_t ai)
|
|
|
|
{
|
|
|
|
double a = bitsToDouble(ai);
|
|
|
|
double b = bitsToDouble(bi);
|
|
|
|
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else if (a == b) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
compareFloatsG(uint32_t bi, uint32_t ai)
|
|
|
|
{
|
|
|
|
float a = bitsToFloat(ai);
|
|
|
|
float b = bitsToFloat(bi);
|
|
|
|
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else if (a == b) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
compareFloatsL(uint32_t bi, uint32_t ai)
|
|
|
|
{
|
|
|
|
float a = bitsToFloat(ai);
|
|
|
|
float b = bitsToFloat(bi);
|
|
|
|
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else if (a == b) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
int64_t
|
|
|
|
compareLongs(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
if (a < b) {
|
|
|
|
return -1;
|
|
|
|
} else if (a > b) {
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
addDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(bitsToDouble(a) + bitsToDouble(b));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
subtractDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(bitsToDouble(a) - bitsToDouble(b));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
multiplyDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(bitsToDouble(a) * bitsToDouble(b));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
divideDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(bitsToDouble(a) / bitsToDouble(b));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
moduloDouble(uint64_t b, uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(fmod(bitsToDouble(a), bitsToDouble(b)));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
negateDouble(uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(- bitsToDouble(a));
|
|
|
|
}
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
uint64_t
|
|
|
|
squareRootDouble(uint64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(sqrt(bitsToDouble(a)));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
doubleToFloat(int64_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(static_cast<float>(bitsToDouble(a)));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
doubleToInt(int64_t a)
|
|
|
|
{
|
|
|
|
return static_cast<int32_t>(bitsToDouble(a));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
int64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
doubleToLong(int64_t a)
|
|
|
|
{
|
|
|
|
return static_cast<int64_t>(bitsToDouble(a));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
addFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(bitsToFloat(a) + bitsToFloat(b));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
subtractFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(bitsToFloat(a) - bitsToFloat(b));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
multiplyFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(bitsToFloat(a) * bitsToFloat(b));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
divideFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(bitsToFloat(a) / bitsToFloat(b));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-10-15 19:12:38 +00:00
|
|
|
moduloFloat(uint32_t b, uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(fmod(bitsToFloat(a), bitsToFloat(b)));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
negateFloat(uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(- bitsToFloat(a));
|
|
|
|
}
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
uint64_t
|
|
|
|
absoluteFloat(uint32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(fabsf(bitsToFloat(a)));
|
|
|
|
}
|
|
|
|
|
2009-12-01 16:21:33 +00:00
|
|
|
int64_t
|
|
|
|
absoluteLong(int64_t a)
|
|
|
|
{
|
|
|
|
return a > 0 ? a : -a;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
|
|
|
absoluteInt(int32_t a)
|
|
|
|
{
|
|
|
|
return a > 0 ? a : -a;
|
|
|
|
}
|
|
|
|
|
2011-02-01 04:18:55 +00:00
|
|
|
unsigned
|
|
|
|
traceSize(Thread* t)
|
|
|
|
{
|
|
|
|
class Counter: public Processor::StackVisitor {
|
|
|
|
public:
|
|
|
|
Counter(): count(0) { }
|
|
|
|
|
|
|
|
virtual bool visit(Processor::StackWalker*) {
|
|
|
|
++ count;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned count;
|
|
|
|
} counter;
|
|
|
|
|
|
|
|
t->m->processor->walkStack(t, &counter);
|
|
|
|
|
|
|
|
return FixedSizeOfArray + (counter.count * ArrayElementSizeOfArray)
|
|
|
|
+ (counter.count * FixedSizeOfTraceElement);
|
|
|
|
}
|
|
|
|
|
|
|
|
void NO_RETURN
|
|
|
|
throwArithmetic(MyThread* t)
|
|
|
|
{
|
|
|
|
if (ensure(t, FixedSizeOfArithmeticException + traceSize(t))) {
|
|
|
|
atomicOr(&(t->flags), Thread::TracingFlag);
|
|
|
|
THREAD_RESOURCE0(t, atomicAnd(&(t->flags), ~Thread::TracingFlag));
|
|
|
|
|
|
|
|
throwNew(t, Machine::ArithmeticExceptionType);
|
|
|
|
} else {
|
|
|
|
// not enough memory available for a new exception and stack trace
|
|
|
|
// -- use a preallocated instance instead
|
|
|
|
throw_(t, root(t, Machine::ArithmeticException));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
int64_t
|
2010-12-20 00:47:21 +00:00
|
|
|
divideLong(MyThread* t, int64_t b, int64_t a)
|
2007-10-08 21:41:41 +00:00
|
|
|
{
|
2010-12-20 00:47:21 +00:00
|
|
|
if (LIKELY(b)) {
|
|
|
|
return a / b;
|
|
|
|
} else {
|
2011-02-01 04:18:55 +00:00
|
|
|
throwArithmetic(t);
|
2010-12-20 00:47:21 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
|
|
|
|
2009-10-29 16:12:30 +00:00
|
|
|
int64_t
|
2010-12-20 00:47:21 +00:00
|
|
|
divideInt(MyThread* t, int32_t b, int32_t a)
|
2009-10-29 16:12:30 +00:00
|
|
|
{
|
2010-12-20 00:47:21 +00:00
|
|
|
if (LIKELY(b)) {
|
|
|
|
return a / b;
|
|
|
|
} else {
|
2011-02-01 04:18:55 +00:00
|
|
|
throwArithmetic(t);
|
2010-12-20 00:47:21 +00:00
|
|
|
}
|
2009-10-29 16:12:30 +00:00
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
int64_t
|
2010-12-20 00:47:21 +00:00
|
|
|
moduloLong(MyThread* t, int64_t b, int64_t a)
|
2007-10-08 21:41:41 +00:00
|
|
|
{
|
2010-12-20 00:47:21 +00:00
|
|
|
if (LIKELY(b)) {
|
|
|
|
return a % b;
|
|
|
|
} else {
|
2011-02-01 04:18:55 +00:00
|
|
|
throwArithmetic(t);
|
2010-12-20 00:47:21 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
|
|
|
|
2009-10-29 20:14:44 +00:00
|
|
|
int64_t
|
2010-12-20 00:47:21 +00:00
|
|
|
moduloInt(MyThread* t, int32_t b, int32_t a)
|
|
|
|
{
|
|
|
|
if (LIKELY(b)) {
|
|
|
|
return a % b;
|
|
|
|
} else {
|
2011-02-01 04:18:55 +00:00
|
|
|
throwArithmetic(t);
|
2010-12-20 00:47:21 +00:00
|
|
|
}
|
2009-10-29 20:14:44 +00:00
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
floatToDouble(int32_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(static_cast<double>(bitsToFloat(a)));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
int64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
floatToInt(int32_t a)
|
|
|
|
{
|
|
|
|
return static_cast<int32_t>(bitsToFloat(a));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
int64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
floatToLong(int32_t a)
|
|
|
|
{
|
|
|
|
return static_cast<int64_t>(bitsToFloat(a));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
intToDouble(int32_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(static_cast<double>(a));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
intToFloat(int32_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(static_cast<float>(a));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2008-03-21 00:37:58 +00:00
|
|
|
longToDouble(int64_t a)
|
|
|
|
{
|
|
|
|
return doubleToBits(static_cast<double>(a));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2008-03-21 00:37:58 +00:00
|
|
|
longToFloat(int64_t a)
|
|
|
|
{
|
|
|
|
return floatToBits(static_cast<float>(a));
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2010-09-14 16:49:41 +00:00
|
|
|
makeBlankObjectArray(MyThread* t, object class_, int32_t length)
|
2007-09-30 02:48:27 +00:00
|
|
|
{
|
2008-04-26 20:56:03 +00:00
|
|
|
if (length >= 0) {
|
2010-09-14 16:49:41 +00:00
|
|
|
return reinterpret_cast<uint64_t>(makeObjectArray(t, class_, length));
|
2008-04-26 20:56:03 +00:00
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::NegativeArraySizeExceptionType, "%d", length);
|
2008-04-26 20:56:03 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
uint64_t
|
|
|
|
makeBlankObjectArrayFromReference(MyThread* t, object pair,
|
|
|
|
int32_t length)
|
|
|
|
{
|
|
|
|
return makeBlankObjectArray
|
|
|
|
(t, resolveClass
|
|
|
|
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
|
|
|
|
referenceName(t, pairSecond(t, pair))), length);
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2008-11-30 01:39:42 +00:00
|
|
|
makeBlankArray(MyThread* t, unsigned type, int32_t length)
|
2007-09-30 02:48:27 +00:00
|
|
|
{
|
2008-04-26 20:56:03 +00:00
|
|
|
if (length >= 0) {
|
2009-03-04 03:05:48 +00:00
|
|
|
object (*constructor)(Thread*, uintptr_t);
|
2008-11-30 01:39:42 +00:00
|
|
|
switch (type) {
|
|
|
|
case T_BOOLEAN:
|
|
|
|
constructor = makeBooleanArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_CHAR:
|
|
|
|
constructor = makeCharArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_FLOAT:
|
|
|
|
constructor = makeFloatArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_DOUBLE:
|
|
|
|
constructor = makeDoubleArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_BYTE:
|
|
|
|
constructor = makeByteArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_SHORT:
|
|
|
|
constructor = makeShortArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_INT:
|
|
|
|
constructor = makeIntArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case T_LONG:
|
|
|
|
constructor = makeLongArray;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default: abort(t);
|
|
|
|
}
|
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(constructor(t, length));
|
2008-04-26 20:56:03 +00:00
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::NegativeArraySizeExceptionType, "%d", length);
|
2008-04-26 20:56:03 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
lookUpAddress(int32_t key, uintptr_t* start, int32_t count,
|
2007-12-17 20:55:31 +00:00
|
|
|
uintptr_t default_)
|
2007-10-01 15:19:15 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t bottom = 0;
|
|
|
|
int32_t top = count;
|
|
|
|
for (int32_t span = top - bottom; span; span = top - bottom) {
|
|
|
|
int32_t middle = bottom + (span / 2);
|
|
|
|
uintptr_t* p = start + (middle * 2);
|
|
|
|
int32_t k = *p;
|
|
|
|
|
|
|
|
if (key < k) {
|
|
|
|
top = middle;
|
|
|
|
} else if (key > k) {
|
|
|
|
bottom = middle + 1;
|
|
|
|
} else {
|
|
|
|
return p[1];
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-17 20:55:31 +00:00
|
|
|
return default_;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2007-12-30 22:24:48 +00:00
|
|
|
setMaybeNull(MyThread* t, object o, unsigned offset, object value)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-30 22:24:48 +00:00
|
|
|
set(t, o, offset, value);
|
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::NullPointerExceptionType);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2007-12-30 22:24:48 +00:00
|
|
|
acquireMonitorForObject(MyThread* t, object o)
|
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-30 22:24:48 +00:00
|
|
|
acquire(t, o);
|
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::NullPointerExceptionType);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2011-03-26 00:37:02 +00:00
|
|
|
void
|
|
|
|
acquireMonitorForObjectOnEntrance(MyThread* t, object o)
|
|
|
|
{
|
|
|
|
if (LIKELY(o)) {
|
|
|
|
t->methodLockIsClean = false;
|
|
|
|
acquire(t, o);
|
|
|
|
t->methodLockIsClean = true;
|
|
|
|
} else {
|
|
|
|
throwNew(t, Machine::NullPointerExceptionType);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2007-12-30 22:24:48 +00:00
|
|
|
releaseMonitorForObject(MyThread* t, object o)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-30 22:24:48 +00:00
|
|
|
release(t, o);
|
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::NullPointerExceptionType);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
object
|
2008-11-11 00:07:44 +00:00
|
|
|
makeMultidimensionalArray2(MyThread* t, object class_, uintptr_t* countStack,
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t dimensions)
|
|
|
|
{
|
|
|
|
PROTECT(t, class_);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, int32_t, counts, dimensions);
|
2007-12-09 22:45:43 +00:00
|
|
|
for (int i = dimensions - 1; i >= 0; --i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(counts)[i] = countStack[dimensions - i - 1];
|
|
|
|
if (UNLIKELY(RUNTIME_ARRAY_BODY(counts)[i] < 0)) {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::NegativeArraySizeExceptionType, "%d",
|
|
|
|
RUNTIME_ARRAY_BODY(counts)[i]);
|
2007-12-09 22:45:43 +00:00
|
|
|
return 0;
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
object array = makeArray(t, RUNTIME_ARRAY_BODY(counts)[0]);
|
2007-12-09 22:45:43 +00:00
|
|
|
setObjectClass(t, array, class_);
|
|
|
|
PROTECT(t, array);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
populateMultiArray(t, array, RUNTIME_ARRAY_BODY(counts), 0, dimensions);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
return array;
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2008-05-18 15:45:11 +00:00
|
|
|
makeMultidimensionalArray(MyThread* t, object class_, int32_t dimensions,
|
2008-11-11 00:07:44 +00:00
|
|
|
int32_t offset)
|
2007-10-01 15:19:15 +00:00
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
return reinterpret_cast<uintptr_t>
|
|
|
|
(makeMultidimensionalArray2
|
|
|
|
(t, class_, static_cast<uintptr_t*>(t->stack) + offset, dimensions));
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
uint64_t
|
|
|
|
makeMultidimensionalArrayFromReference(MyThread* t, object pair,
|
|
|
|
int32_t dimensions,
|
|
|
|
int32_t offset)
|
|
|
|
{
|
|
|
|
return makeMultidimensionalArray
|
|
|
|
(t, resolveClass
|
|
|
|
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
|
|
|
|
referenceName(t, pairSecond(t, pair))), dimensions, offset);
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void NO_RETURN
|
2008-05-31 22:14:27 +00:00
|
|
|
throwArrayIndexOutOfBounds(MyThread* t)
|
2008-01-03 18:37:00 +00:00
|
|
|
{
|
2010-09-25 21:52:43 +00:00
|
|
|
if (ensure(t, FixedSizeOfArrayIndexOutOfBoundsException + traceSize(t))) {
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicOr(&(t->flags), Thread::TracingFlag);
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RESOURCE0(t, atomicAnd(&(t->flags), ~Thread::TracingFlag));
|
|
|
|
|
|
|
|
throwNew(t, Machine::ArrayIndexOutOfBoundsExceptionType);
|
2010-06-19 22:40:21 +00:00
|
|
|
} else {
|
|
|
|
// not enough memory available for a new exception and stack trace
|
|
|
|
// -- use a preallocated instance instead
|
2010-12-27 22:55:23 +00:00
|
|
|
throw_(t, root(t, Machine::ArrayIndexOutOfBoundsException));
|
2010-06-19 22:40:21 +00:00
|
|
|
}
|
2007-09-27 00:01:38 +00:00
|
|
|
}
|
|
|
|
|
2010-12-19 22:23:19 +00:00
|
|
|
void NO_RETURN
|
|
|
|
throwStackOverflow(MyThread* t)
|
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::StackOverflowErrorType);
|
2010-12-19 22:23:19 +00:00
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void NO_RETURN
|
2007-12-09 22:45:43 +00:00
|
|
|
throw_(MyThread* t, object o)
|
2007-10-09 17:15:40 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2010-12-27 22:55:23 +00:00
|
|
|
vm::throw_(t, o);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::NullPointerExceptionType);
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2007-12-23 00:00:35 +00:00
|
|
|
checkCast(MyThread* t, object class_, object o)
|
|
|
|
{
|
|
|
|
if (UNLIKELY(o and not isAssignableFrom(t, class_, objectClass(t, o)))) {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew
|
|
|
|
(t, Machine::ClassCastExceptionType, "%s as %s",
|
2008-01-15 23:33:20 +00:00
|
|
|
&byteArrayBody(t, className(t, objectClass(t, o)), 0),
|
|
|
|
&byteArrayBody(t, className(t, class_), 0));
|
2007-12-23 00:00:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
void
|
|
|
|
checkCastFromReference(MyThread* t, object pair, object o)
|
|
|
|
{
|
|
|
|
PROTECT(t, o);
|
|
|
|
|
|
|
|
object c = resolveClass
|
|
|
|
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
|
|
|
|
referenceName(t, pairSecond(t, pair)));
|
|
|
|
|
|
|
|
checkCast(t, c, o);
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
resolveField(Thread* t, object pair)
|
|
|
|
{
|
|
|
|
object reference = pairSecond(t, pair);
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object class_ = resolveClassInObject
|
|
|
|
(t, classLoader(t, methodClass(t, pairFirst(t, pair))), reference,
|
|
|
|
ReferenceClass);
|
|
|
|
|
|
|
|
return findInHierarchy
|
|
|
|
(t, class_, referenceName(t, reference), referenceSpec(t, reference),
|
|
|
|
findFieldInClass, Machine::NoSuchFieldErrorType);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
getFieldValue(Thread* t, object target, object field)
|
|
|
|
{
|
|
|
|
switch (fieldCode(t, field)) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
return cast<int8_t>(target, fieldOffset(t, field));
|
|
|
|
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
return cast<int16_t>(target, fieldOffset(t, field));
|
|
|
|
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
return cast<int32_t>(target, fieldOffset(t, field));
|
|
|
|
|
|
|
|
case DoubleField:
|
|
|
|
case LongField:
|
|
|
|
return cast<int64_t>(target, fieldOffset(t, field));
|
|
|
|
|
|
|
|
case ObjectField:
|
|
|
|
return cast<intptr_t>(target, fieldOffset(t, field));
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
getStaticFieldValueFromReference(MyThread* t, object pair)
|
|
|
|
{
|
|
|
|
object field = resolveField(t, pair);
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
2011-03-17 14:46:46 +00:00
|
|
|
initClass(t, fieldClass(t, field));
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
ACQUIRE_FIELD_FOR_READ(t, field);
|
|
|
|
|
|
|
|
return getFieldValue(t, classStaticTable(t, fieldClass(t, field)), field);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
getFieldValueFromReference(MyThread* t, object pair, object instance)
|
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
|
|
|
|
object field = resolveField(t, pair);
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
|
|
|
ACQUIRE_FIELD_FOR_READ(t, field);
|
|
|
|
|
|
|
|
return getFieldValue(t, instance, field);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
setStaticLongFieldValueFromReference(MyThread* t, object pair, uint64_t value)
|
|
|
|
{
|
|
|
|
object field = resolveField(t, pair);
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
2011-03-17 14:46:46 +00:00
|
|
|
initClass(t, fieldClass(t, field));
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
|
|
|
|
|
|
|
cast<int64_t>
|
|
|
|
(classStaticTable(t, fieldClass(t, field)), fieldOffset(t, field)) = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
setLongFieldValueFromReference(MyThread* t, object pair, object instance,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
|
|
|
|
object field = resolveField(t, pair);
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
|
|
|
|
|
|
|
cast<int64_t>(instance, fieldOffset(t, field)) = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
setStaticObjectFieldValueFromReference(MyThread* t, object pair, object value)
|
|
|
|
{
|
|
|
|
PROTECT(t, value);
|
|
|
|
|
|
|
|
object field = resolveField(t, pair);
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
2011-03-17 14:46:46 +00:00
|
|
|
initClass(t, fieldClass(t, field));
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
|
|
|
|
|
|
|
set(t, classStaticTable(t, fieldClass(t, field)), fieldOffset(t, field),
|
|
|
|
value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
setObjectFieldValueFromReference(MyThread* t, object pair, object instance,
|
|
|
|
object value)
|
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
PROTECT(t, value);
|
|
|
|
|
|
|
|
object field = resolveField(t, pair);
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
|
|
|
|
|
|
|
set(t, instance, fieldOffset(t, field), value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
setFieldValue(MyThread* t, object target, object field, uint32_t value)
|
|
|
|
{
|
|
|
|
switch (fieldCode(t, field)) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
cast<int8_t>(target, fieldOffset(t, field)) = value;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
cast<int16_t>(target, fieldOffset(t, field)) = value;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
cast<int32_t>(target, fieldOffset(t, field)) = value;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
setStaticFieldValueFromReference(MyThread* t, object pair, uint32_t value)
|
|
|
|
{
|
|
|
|
object field = resolveField(t, pair);
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
2011-03-17 14:46:46 +00:00
|
|
|
initClass(t, fieldClass(t, field));
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
|
|
|
|
|
|
|
setFieldValue(t, classStaticTable(t, fieldClass(t, field)), field, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
setFieldValueFromReference(MyThread* t, object pair, object instance,
|
|
|
|
uint32_t value)
|
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
object field = resolveField(t, pair);
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
|
|
|
|
|
|
|
setFieldValue(t, instance, field, value);
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
|
|
|
instanceOf64(Thread* t, object class_, object o)
|
|
|
|
{
|
|
|
|
return instanceOf(t, class_, o);
|
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
uint64_t
|
|
|
|
instanceOfFromReference(Thread* t, object pair, object o)
|
|
|
|
{
|
|
|
|
PROTECT(t, o);
|
|
|
|
|
|
|
|
object c = resolveClass
|
|
|
|
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
|
|
|
|
referenceName(t, pairSecond(t, pair)));
|
|
|
|
|
|
|
|
return instanceOf64(t, c, o);
|
|
|
|
}
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2009-07-22 00:57:55 +00:00
|
|
|
makeNewGeneral64(Thread* t, object class_)
|
2009-02-28 21:20:43 +00:00
|
|
|
{
|
2009-07-22 00:57:55 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeNewGeneral(t, class_));
|
2009-02-28 21:20:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
makeNew64(Thread* t, object class_)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(makeNew(t, class_));
|
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
uint64_t
|
|
|
|
makeNewFromReference(Thread* t, object pair)
|
|
|
|
{
|
|
|
|
return makeNewGeneral64
|
|
|
|
(t, resolveClass
|
|
|
|
(t, classLoader(t, methodClass(t, pairFirst(t, pair))),
|
|
|
|
referenceName(t, pairSecond(t, pair))));
|
|
|
|
}
|
|
|
|
|
2010-12-03 20:42:13 +00:00
|
|
|
uint64_t
|
|
|
|
getJClass64(Thread* t, object class_)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(getJClass(t, class_));
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
void
|
2008-04-09 19:08:13 +00:00
|
|
|
gcIfNecessary(MyThread* t)
|
|
|
|
{
|
2010-09-17 01:43:27 +00:00
|
|
|
stress(t);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
2008-04-09 19:08:13 +00:00
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
unsigned
|
|
|
|
resultSize(MyThread* t, unsigned code)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
2008-02-11 17:21:41 +00:00
|
|
|
case IntField:
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
case ObjectField:
|
2011-08-30 01:00:17 +00:00
|
|
|
return TargetBytesPerWord;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case LongField:
|
2008-02-11 17:21:41 +00:00
|
|
|
case DoubleField:
|
|
|
|
return 8;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case VoidField:
|
2008-02-11 17:21:41 +00:00
|
|
|
return 0;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-17 01:21:35 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2008-03-11 16:57:35 +00:00
|
|
|
void
|
|
|
|
pushReturnValue(MyThread* t, Frame* frame, unsigned code,
|
|
|
|
Compiler::Operand* result)
|
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
return frame->pushInt(result);
|
|
|
|
|
|
|
|
case ObjectField:
|
|
|
|
return frame->pushObject(result);
|
|
|
|
|
|
|
|
case LongField:
|
|
|
|
case DoubleField:
|
|
|
|
return frame->pushLong(result);
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-17 01:21:35 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
Compiler::Operand*
|
|
|
|
popField(MyThread* t, Frame* frame, int code)
|
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
return frame->popInt();
|
|
|
|
|
|
|
|
case DoubleField:
|
|
|
|
case LongField:
|
|
|
|
return frame->popLong();
|
|
|
|
|
|
|
|
case ObjectField:
|
|
|
|
return frame->popObject();
|
|
|
|
|
|
|
|
default: abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::OperandType
|
|
|
|
operandTypeForFieldCode(Thread* t, unsigned code)
|
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case IntField:
|
|
|
|
case LongField:
|
|
|
|
return Compiler::IntegerType;
|
|
|
|
|
|
|
|
case ObjectField:
|
|
|
|
return Compiler::ObjectType;
|
|
|
|
|
|
|
|
case FloatField:
|
|
|
|
case DoubleField:
|
|
|
|
return Compiler::FloatType;
|
|
|
|
|
|
|
|
case VoidField:
|
|
|
|
return Compiler::VoidType;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
bool
|
|
|
|
useLongJump(MyThread* t, uintptr_t target)
|
|
|
|
{
|
|
|
|
uintptr_t reach = t->arch->maximumImmediateJump();
|
|
|
|
FixedAllocator* a = codeAllocator(t);
|
|
|
|
uintptr_t start = reinterpret_cast<uintptr_t>(a->base);
|
|
|
|
uintptr_t end = reinterpret_cast<uintptr_t>(a->base) + a->capacity;
|
|
|
|
assert(t, end - start < reach);
|
|
|
|
|
|
|
|
return (target > end && (target - start) > reach)
|
|
|
|
or (target < start && (end - target) > reach);
|
|
|
|
}
|
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
Compiler::Operand*
|
|
|
|
compileDirectInvoke(MyThread* t, Frame* frame, object target, bool tailCall,
|
|
|
|
bool useThunk, unsigned rSize, Promise* addressPromise)
|
|
|
|
{
|
2009-04-07 00:34:12 +00:00
|
|
|
Compiler* c = frame->c;
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
unsigned flags = (TailCalls and tailCall ? Compiler::TailJump : 0);
|
|
|
|
unsigned traceFlags;
|
|
|
|
|
|
|
|
if (addressPromise == 0 and useLongJump(t, methodAddress(t, target))) {
|
|
|
|
flags |= Compiler::LongJumpOrCall;
|
|
|
|
traceFlags = TraceElement::LongCall;
|
|
|
|
} else {
|
|
|
|
traceFlags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (useThunk
|
|
|
|
or (TailCalls and tailCall and (methodFlags(t, target) & ACC_NATIVE)))
|
|
|
|
{
|
2011-09-20 22:30:30 +00:00
|
|
|
if (frame->context->bootContext == 0) {
|
|
|
|
flags |= Compiler::Aligned;
|
|
|
|
}
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
if (TailCalls and tailCall) {
|
2009-10-18 00:18:03 +00:00
|
|
|
traceFlags |= TraceElement::TailCall;
|
|
|
|
|
|
|
|
TraceElement* trace = frame->trace(target, traceFlags);
|
2010-06-26 03:13:59 +00:00
|
|
|
|
|
|
|
Promise* returnAddressPromise = new
|
|
|
|
(frame->context->zone.allocate(sizeof(TraceElementPromise)))
|
|
|
|
TraceElementPromise(t->m->system, trace);
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2009-04-19 22:36:11 +00:00
|
|
|
Compiler::Operand* result = c->stackCall
|
2010-06-26 03:13:59 +00:00
|
|
|
(c->promiseConstant(returnAddressPromise, Compiler::AddressType),
|
2009-10-18 00:18:03 +00:00
|
|
|
flags,
|
2009-04-22 01:39:25 +00:00
|
|
|
trace,
|
2009-04-19 22:36:11 +00:00
|
|
|
rSize,
|
2009-09-20 21:43:32 +00:00
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
2009-04-19 22:36:11 +00:00
|
|
|
methodParameterFootprint(t, target));
|
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
2011-09-20 22:30:30 +00:00
|
|
|
(TargetBytesPerWord,
|
|
|
|
frame->absoluteAddressOperand(returnAddressPromise),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord, c->memory
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->register_(t->arch->thread()), Compiler::AddressType,
|
2011-09-01 03:18:00 +00:00
|
|
|
TargetThreadTailAddress));
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
c->exit
|
|
|
|
(c->constant
|
|
|
|
((methodFlags(t, target) & ACC_NATIVE)
|
|
|
|
? nativeThunk(t) : defaultThunk(t),
|
|
|
|
Compiler::AddressType));
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2009-04-19 22:36:11 +00:00
|
|
|
return result;
|
|
|
|
} else {
|
2009-03-31 20:15:08 +00:00
|
|
|
return c->stackCall
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(defaultThunk(t), Compiler::AddressType),
|
2009-10-18 00:18:03 +00:00
|
|
|
flags,
|
|
|
|
frame->trace(target, traceFlags),
|
2009-03-31 20:15:08 +00:00
|
|
|
rSize,
|
2009-09-20 21:43:32 +00:00
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
2009-03-31 20:15:08 +00:00
|
|
|
methodParameterFootprint(t, target));
|
|
|
|
}
|
2009-04-19 22:36:11 +00:00
|
|
|
} else {
|
|
|
|
Compiler::Operand* address =
|
|
|
|
(addressPromise
|
2009-09-20 21:43:32 +00:00
|
|
|
? c->promiseConstant(addressPromise, Compiler::AddressType)
|
|
|
|
: c->constant(methodAddress(t, target), Compiler::AddressType));
|
2009-04-19 22:36:11 +00:00
|
|
|
|
|
|
|
return c->stackCall
|
|
|
|
(address,
|
|
|
|
flags,
|
|
|
|
tailCall ? 0 : frame->trace
|
|
|
|
((methodFlags(t, target) & ACC_NATIVE) ? target : 0, 0),
|
|
|
|
rSize,
|
2009-09-20 21:43:32 +00:00
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
2009-04-19 22:36:11 +00:00
|
|
|
methodParameterFootprint(t, target));
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-22 01:39:25 +00:00
|
|
|
bool
|
2009-03-31 20:15:08 +00:00
|
|
|
compileDirectInvoke(MyThread* t, Frame* frame, object target, bool tailCall)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-02-12 02:06:12 +00:00
|
|
|
unsigned rSize = resultSize(t, methodReturnCode(t, target));
|
|
|
|
|
2008-03-10 13:28:21 +00:00
|
|
|
Compiler::Operand* result = 0;
|
|
|
|
|
2009-04-22 01:39:25 +00:00
|
|
|
if (emptyMethod(t, target)) {
|
|
|
|
tailCall = false;
|
|
|
|
} else {
|
2008-11-30 04:58:09 +00:00
|
|
|
BootContext* bc = frame->context->bootContext;
|
|
|
|
if (bc) {
|
2010-06-26 03:13:59 +00:00
|
|
|
if ((methodClass(t, target) == methodClass(t, frame->context->method)
|
|
|
|
or (not classNeedsInit(t, methodClass(t, target))))
|
|
|
|
and (not (TailCalls and tailCall
|
|
|
|
and (methodFlags(t, target) & ACC_NATIVE))))
|
2008-11-30 04:58:09 +00:00
|
|
|
{
|
|
|
|
Promise* p = new (bc->zone->allocate(sizeof(ListenPromise)))
|
|
|
|
ListenPromise(t->m->system, bc->zone);
|
|
|
|
|
|
|
|
PROTECT(t, target);
|
|
|
|
object pointer = makePointer(t, p);
|
|
|
|
bc->calls = makeTriple(t, target, pointer, bc->calls);
|
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
result = compileDirectInvoke
|
|
|
|
(t, frame, target, tailCall, false, rSize, p);
|
2008-11-27 20:59:40 +00:00
|
|
|
} else {
|
2009-03-31 20:15:08 +00:00
|
|
|
result = compileDirectInvoke
|
|
|
|
(t, frame, target, tailCall, true, rSize, 0);
|
2008-11-27 20:59:40 +00:00
|
|
|
}
|
2009-10-18 00:18:03 +00:00
|
|
|
} else if (unresolved(t, methodAddress(t, target))
|
2008-11-30 04:58:09 +00:00
|
|
|
or classNeedsInit(t, methodClass(t, target)))
|
|
|
|
{
|
2009-03-31 20:15:08 +00:00
|
|
|
result = compileDirectInvoke
|
|
|
|
(t, frame, target, tailCall, true, rSize, 0);
|
2008-04-13 19:48:20 +00:00
|
|
|
} else {
|
2009-03-31 20:15:08 +00:00
|
|
|
result = compileDirectInvoke
|
|
|
|
(t, frame, target, tailCall, false, rSize, 0);
|
2008-04-13 19:48:20 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->pop(methodParameterFootprint(t, target));
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
if (rSize) {
|
2008-03-11 16:57:35 +00:00
|
|
|
pushReturnValue(t, frame, methodReturnCode(t, target), result);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2009-04-22 01:39:25 +00:00
|
|
|
|
|
|
|
return tailCall;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
unsigned
|
|
|
|
methodReferenceParameterFootprint(Thread* t, object reference, bool isStatic)
|
|
|
|
{
|
|
|
|
return parameterFootprint
|
|
|
|
(t, reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, referenceSpec(t, reference), 0)), isStatic);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
methodReferenceReturnCode(Thread* t, object reference)
|
|
|
|
{
|
|
|
|
unsigned parameterCount;
|
|
|
|
unsigned returnCode;
|
|
|
|
scanMethodSpec
|
|
|
|
(t, reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, referenceSpec(t, reference), 0)), ¶meterCount,
|
|
|
|
&returnCode);
|
|
|
|
|
|
|
|
return returnCode;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
compileReferenceInvoke(MyThread* t, Frame* frame, Compiler::Operand* method,
|
|
|
|
object reference, bool isStatic, bool tailCall)
|
|
|
|
{
|
|
|
|
unsigned parameterFootprint
|
|
|
|
= methodReferenceParameterFootprint(t, reference, isStatic);
|
|
|
|
|
|
|
|
int returnCode = methodReferenceReturnCode(t, reference);
|
|
|
|
|
|
|
|
unsigned rSize = resultSize(t, returnCode);
|
|
|
|
|
|
|
|
Compiler::Operand* result = frame->c->stackCall
|
|
|
|
(method,
|
|
|
|
tailCall ? Compiler::TailJump : 0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rSize,
|
|
|
|
operandTypeForFieldCode(t, returnCode),
|
|
|
|
parameterFootprint);
|
|
|
|
|
|
|
|
frame->pop(parameterFootprint);
|
|
|
|
|
|
|
|
if (rSize) {
|
|
|
|
pushReturnValue(t, frame, returnCode, result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
compileDirectReferenceInvoke(MyThread* t, Frame* frame, Thunk thunk,
|
|
|
|
object reference, bool isStatic, bool tailCall)
|
|
|
|
{
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object pair = makePair(t, frame->context->method, reference);
|
|
|
|
|
|
|
|
compileReferenceInvoke
|
|
|
|
(t, frame, c->call
|
|
|
|
(c->constant(getThunk(t, thunk), Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2011-03-15 23:52:02 +00:00
|
|
|
Compiler::AddressType,
|
|
|
|
2, c->register_(t->arch->thread()), frame->append(pair)),
|
|
|
|
reference, isStatic, tailCall);
|
|
|
|
}
|
|
|
|
|
2011-07-18 01:54:55 +00:00
|
|
|
void
|
|
|
|
compileAbstractInvoke(MyThread* t, Frame* frame, Compiler::Operand* method,
|
|
|
|
object target, bool tailCall)
|
|
|
|
{
|
|
|
|
unsigned parameterFootprint = methodParameterFootprint(t, target);
|
|
|
|
|
|
|
|
int returnCode = methodReturnCode(t, target);
|
|
|
|
|
|
|
|
unsigned rSize = resultSize(t, returnCode);
|
|
|
|
|
|
|
|
Compiler::Operand* result = frame->c->stackCall
|
|
|
|
(method,
|
|
|
|
tailCall ? Compiler::TailJump : 0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rSize,
|
|
|
|
operandTypeForFieldCode(t, returnCode),
|
|
|
|
parameterFootprint);
|
|
|
|
|
|
|
|
frame->pop(parameterFootprint);
|
|
|
|
|
|
|
|
if (rSize) {
|
|
|
|
pushReturnValue(t, frame, returnCode, result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
compileDirectAbstractInvoke(MyThread* t, Frame* frame, Thunk thunk,
|
|
|
|
object target, bool tailCall)
|
|
|
|
{
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
|
|
|
|
compileAbstractInvoke
|
|
|
|
(t, frame, c->call
|
|
|
|
(c->constant(getThunk(t, thunk), Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2011-07-18 01:54:55 +00:00
|
|
|
Compiler::AddressType,
|
|
|
|
2, c->register_(t->arch->thread()), frame->append(target)),
|
|
|
|
target, tailCall);
|
|
|
|
}
|
|
|
|
|
2007-12-28 00:02:05 +00:00
|
|
|
void
|
|
|
|
handleMonitorEvent(MyThread* t, Frame* frame, intptr_t function)
|
|
|
|
{
|
|
|
|
Compiler* c = frame->c;
|
2007-12-31 22:40:56 +00:00
|
|
|
object method = frame->context->method;
|
2007-12-28 00:02:05 +00:00
|
|
|
|
|
|
|
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* lock;
|
2007-12-28 00:02:05 +00:00
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
2010-11-26 19:41:31 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2007-12-28 00:02:05 +00:00
|
|
|
lock = frame->append(methodClass(t, method));
|
|
|
|
} else {
|
2009-05-03 20:57:11 +00:00
|
|
|
lock = loadLocal(frame->context, 1, savedTargetIndex(t, method));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
c->call(c->constant(function, Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
2, c->register_(t->arch->thread()), lock);
|
2008-01-11 22:16:24 +00:00
|
|
|
}
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
handleEntrance(MyThread* t, Frame* frame)
|
|
|
|
{
|
2008-01-20 18:55:08 +00:00
|
|
|
object method = frame->context->method;
|
|
|
|
|
2008-01-20 22:05:59 +00:00
|
|
|
if ((methodFlags(t, method) & (ACC_SYNCHRONIZED | ACC_STATIC))
|
|
|
|
== ACC_SYNCHRONIZED)
|
2008-01-20 18:55:08 +00:00
|
|
|
{
|
|
|
|
// save 'this' pointer in case it is overwritten.
|
|
|
|
unsigned index = savedTargetIndex(t, method);
|
2009-05-03 20:57:11 +00:00
|
|
|
storeLocal(frame->context, 1, loadLocal(frame->context, 1, 0), index);
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->set(index, Frame::Object);
|
2008-01-20 18:55:08 +00:00
|
|
|
}
|
|
|
|
|
2007-12-28 00:02:05 +00:00
|
|
|
handleMonitorEvent
|
2011-03-26 00:37:02 +00:00
|
|
|
(t, frame, getThunk(t, acquireMonitorForObjectOnEntranceThunk));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
handleExit(MyThread* t, Frame* frame)
|
|
|
|
{
|
|
|
|
handleMonitorEvent
|
2008-05-31 22:14:27 +00:00
|
|
|
(t, frame, getThunk(t, releaseMonitorForObjectThunk));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2008-11-25 17:34:48 +00:00
|
|
|
bool
|
|
|
|
inTryBlock(MyThread* t, object code, unsigned ip)
|
|
|
|
{
|
|
|
|
object table = codeExceptionHandlerTable(t, code);
|
|
|
|
if (table) {
|
|
|
|
unsigned length = exceptionHandlerTableLength(t, table);
|
|
|
|
for (unsigned i = 0; i < length; ++i) {
|
2011-08-30 01:00:17 +00:00
|
|
|
uint64_t eh = exceptionHandlerTableBody(t, table, i);
|
2008-11-25 17:34:48 +00:00
|
|
|
if (ip >= exceptionHandlerStart(eh)
|
|
|
|
and ip < exceptionHandlerEnd(eh))
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
bool
|
|
|
|
needsReturnBarrier(MyThread* t, object method)
|
|
|
|
{
|
|
|
|
return (methodFlags(t, method) & ConstructorFlag)
|
2009-08-18 20:26:28 +00:00
|
|
|
and (classVmFlags(t, methodClass(t, method)) & HasFinalMemberFlag);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2009-04-26 01:51:33 +00:00
|
|
|
returnsNext(MyThread* t, object code, unsigned ip)
|
2009-03-31 20:15:08 +00:00
|
|
|
{
|
2009-04-26 01:51:33 +00:00
|
|
|
switch (codeBody(t, code, ip)) {
|
|
|
|
case return_:
|
|
|
|
case areturn:
|
|
|
|
case ireturn:
|
|
|
|
case freturn:
|
|
|
|
case lreturn:
|
|
|
|
case dreturn:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case goto_: {
|
|
|
|
uint32_t offset = codeReadInt16(t, code, ++ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
|
|
|
return returnsNext(t, code, newIp);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
2009-04-26 01:51:33 +00:00
|
|
|
|
|
|
|
case goto_w: {
|
|
|
|
uint32_t offset = codeReadInt32(t, code, ++ip);
|
|
|
|
uint32_t newIp = (ip - 5) + offset;
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
|
|
|
return returnsNext(t, code, newIp);
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2011-03-15 23:52:02 +00:00
|
|
|
isTailCall(MyThread* t, object code, unsigned ip, object caller,
|
|
|
|
int calleeReturnCode)
|
2009-04-26 01:51:33 +00:00
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
return TailCalls
|
|
|
|
and ((methodFlags(t, caller) & ACC_SYNCHRONIZED) == 0)
|
|
|
|
and (not inTryBlock(t, code, ip - 1))
|
|
|
|
and (not needsReturnBarrier(t, caller))
|
|
|
|
and (methodReturnCode(t, caller) == VoidField
|
2011-03-15 23:52:02 +00:00
|
|
|
or methodReturnCode(t, caller) == calleeReturnCode)
|
2009-05-26 05:27:10 +00:00
|
|
|
and returnsNext(t, code, ip);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
bool
|
|
|
|
isTailCall(MyThread* t, object code, unsigned ip, object caller, object callee)
|
|
|
|
{
|
|
|
|
return isTailCall(t, code, ip, caller, methodReturnCode(t, callee));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isReferenceTailCall(MyThread* t, object code, unsigned ip, object caller,
|
|
|
|
object calleeReference)
|
|
|
|
{
|
|
|
|
return isTailCall
|
|
|
|
(t, code, ip, caller, methodReferenceReturnCode(t, calleeReference));
|
|
|
|
}
|
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
void
|
|
|
|
compile(MyThread* t, Frame* initialFrame, unsigned ip,
|
2008-09-25 00:48:32 +00:00
|
|
|
int exceptionHandlerStart = -1);
|
2008-09-20 23:42:46 +00:00
|
|
|
|
|
|
|
void
|
2008-09-25 00:48:32 +00:00
|
|
|
saveStateAndCompile(MyThread* t, Frame* initialFrame, unsigned ip)
|
2008-09-20 23:42:46 +00:00
|
|
|
{
|
2008-09-22 14:28:18 +00:00
|
|
|
Compiler::State* state = initialFrame->c->saveState();
|
2008-11-07 00:39:38 +00:00
|
|
|
compile(t, initialFrame, ip);
|
2008-09-22 14:28:18 +00:00
|
|
|
initialFrame->c->restoreState(state);
|
2008-09-20 23:42:46 +00:00
|
|
|
}
|
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
bool
|
2009-10-10 21:03:23 +00:00
|
|
|
integerBranch(MyThread* t, Frame* frame, object code, unsigned& ip,
|
|
|
|
unsigned size, Compiler::Operand* a, Compiler::Operand* b)
|
2009-10-07 00:50:32 +00:00
|
|
|
{
|
|
|
|
if (ip + 3 > codeLength(t, code)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
unsigned instruction = codeBody(t, code, ip++);
|
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
|
|
|
|
|
|
|
switch (instruction) {
|
|
|
|
case ifeq:
|
|
|
|
c->jumpIfEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifne:
|
|
|
|
c->jumpIfNotEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifgt:
|
|
|
|
c->jumpIfGreater(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifge:
|
|
|
|
c->jumpIfGreaterOrEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case iflt:
|
2009-10-10 21:03:23 +00:00
|
|
|
c->jumpIfLess(size, a, b, target);
|
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifle:
|
2009-10-10 21:03:23 +00:00
|
|
|
c->jumpIfLessOrEqual(size, a, b, target);
|
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
ip -= 3;
|
|
|
|
return false;
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
|
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2010-12-27 22:55:23 +00:00
|
|
|
return true;
|
2009-10-07 00:50:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2009-10-10 21:03:23 +00:00
|
|
|
floatBranch(MyThread* t, Frame* frame, object code, unsigned& ip,
|
|
|
|
unsigned size, bool lessIfUnordered, Compiler::Operand* a,
|
|
|
|
Compiler::Operand* b)
|
2009-08-06 16:01:57 +00:00
|
|
|
{
|
2009-10-07 00:50:32 +00:00
|
|
|
if (ip + 3 > codeLength(t, code)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
unsigned instruction = codeBody(t, code, ip++);
|
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
|
|
|
|
|
|
|
switch (instruction) {
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifeq:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfFloatEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifne:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfFloatNotEqual(size, a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifgt:
|
2009-10-07 00:50:32 +00:00
|
|
|
if (lessIfUnordered) {
|
|
|
|
c->jumpIfFloatGreater(size, a, b, target);
|
|
|
|
} else {
|
|
|
|
c->jumpIfFloatGreaterOrUnordered(size, a, b, target);
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifge:
|
2009-10-07 00:50:32 +00:00
|
|
|
if (lessIfUnordered) {
|
|
|
|
c->jumpIfFloatGreaterOrEqual(size, a, b, target);
|
|
|
|
} else {
|
|
|
|
c->jumpIfFloatGreaterOrEqualOrUnordered(size, a, b, target);
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
case iflt:
|
2009-10-07 00:50:32 +00:00
|
|
|
if (lessIfUnordered) {
|
|
|
|
c->jumpIfFloatLessOrUnordered(size, a, b, target);
|
|
|
|
} else {
|
|
|
|
c->jumpIfFloatLess(size, a, b, target);
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
case ifle:
|
|
|
|
if (lessIfUnordered) {
|
|
|
|
c->jumpIfFloatLessOrEqualOrUnordered(size, a, b, target);
|
|
|
|
} else {
|
|
|
|
c->jumpIfFloatLessOrEqual(size, a, b, target);
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
default:
|
2009-10-07 00:50:32 +00:00
|
|
|
ip -= 3;
|
2009-08-06 16:01:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
|
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2010-12-27 22:55:23 +00:00
|
|
|
return true;
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
Compiler::Operand*
|
|
|
|
popLongAddress(Frame* frame)
|
|
|
|
{
|
|
|
|
return TargetBytesPerWord == 8 ? frame->popLong() : frame->c->load
|
|
|
|
(8, 8, frame->popLong(), TargetBytesPerWord);
|
|
|
|
}
|
|
|
|
|
2009-10-18 01:26:14 +00:00
|
|
|
bool
|
|
|
|
intrinsic(MyThread* t, Frame* frame, object target)
|
|
|
|
{
|
|
|
|
#define MATCH(name, constant) \
|
2009-11-28 04:01:27 +00:00
|
|
|
(byteArrayLength(t, name) == sizeof(constant) \
|
2009-12-02 15:49:10 +00:00
|
|
|
and ::strcmp(reinterpret_cast<char*>(&byteArrayBody(t, name, 0)), \
|
|
|
|
constant) == 0)
|
2009-10-18 01:26:14 +00:00
|
|
|
|
|
|
|
object className = vm::className(t, methodClass(t, target));
|
|
|
|
if (UNLIKELY(MATCH(className, "java/lang/Math"))) {
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
if (MATCH(methodName(t, target), "sqrt")
|
|
|
|
and MATCH(methodSpec(t, target), "(D)D"))
|
|
|
|
{
|
|
|
|
frame->pushLong(c->fsqrt(8, frame->popLong()));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodName(t, target), "abs")) {
|
|
|
|
if (MATCH(methodSpec(t, target), "(I)I")) {
|
|
|
|
frame->pushInt(c->abs(4, frame->popInt()));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodSpec(t, target), "(J)J")) {
|
|
|
|
frame->pushLong(c->abs(8, frame->popLong()));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodSpec(t, target), "(F)F")) {
|
|
|
|
frame->pushInt(c->fabs(4, frame->popInt()));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2012-03-06 20:07:59 +00:00
|
|
|
} else if (UNLIKELY(MATCH(className, "sun/misc/Unsafe"))) {
|
|
|
|
Compiler* c = frame->c;
|
|
|
|
if (MATCH(methodName(t, target), "getByte")
|
|
|
|
and MATCH(methodSpec(t, target), "(J)B"))
|
|
|
|
{
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
|
|
|
(1, 1, c->memory(address, Compiler::IntegerType, 0, 0, 1),
|
|
|
|
TargetBytesPerWord));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodName(t, target), "putByte")
|
|
|
|
and MATCH(methodSpec(t, target), "(JB)V"))
|
|
|
|
{
|
|
|
|
Compiler::Operand* value = frame->popInt();
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
c->store
|
|
|
|
(TargetBytesPerWord, value, 1, c->memory
|
|
|
|
(address, Compiler::IntegerType, 0, 0, 1));
|
|
|
|
return true;
|
|
|
|
} else if ((MATCH(methodName(t, target), "getShort")
|
|
|
|
and MATCH(methodSpec(t, target), "(J)S"))
|
|
|
|
or (MATCH(methodName(t, target), "getChar")
|
|
|
|
and MATCH(methodSpec(t, target), "(J)C")))
|
|
|
|
{
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
|
|
|
(2, 2, c->memory(address, Compiler::IntegerType, 0, 0, 1),
|
|
|
|
TargetBytesPerWord));
|
|
|
|
return true;
|
|
|
|
} else if ((MATCH(methodName(t, target), "putShort")
|
|
|
|
and MATCH(methodSpec(t, target), "(JS)V"))
|
|
|
|
or (MATCH(methodName(t, target), "putChar")
|
|
|
|
and MATCH(methodSpec(t, target), "(JC)V")))
|
|
|
|
{
|
|
|
|
Compiler::Operand* value = frame->popInt();
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
c->store
|
|
|
|
(TargetBytesPerWord, value, 2, c->memory
|
|
|
|
(address, Compiler::IntegerType, 0, 0, 1));
|
|
|
|
return true;
|
|
|
|
} else if ((MATCH(methodName(t, target), "getInt")
|
|
|
|
and MATCH(methodSpec(t, target), "(J)I"))
|
|
|
|
or (MATCH(methodName(t, target), "getFloat")
|
|
|
|
and MATCH(methodSpec(t, target), "(J)F")))
|
|
|
|
{
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
|
|
|
(address, MATCH(methodName(t, target), "getInt")
|
|
|
|
? Compiler::IntegerType : Compiler::FloatType, 0, 0, 1),
|
|
|
|
TargetBytesPerWord));
|
|
|
|
return true;
|
|
|
|
} else if ((MATCH(methodName(t, target), "putInt")
|
|
|
|
and MATCH(methodSpec(t, target), "(JI)V"))
|
|
|
|
or (MATCH(methodName(t, target), "putFloat")
|
|
|
|
and MATCH(methodSpec(t, target), "(JF)V")))
|
|
|
|
{
|
|
|
|
Compiler::Operand* value = frame->popInt();
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
c->store
|
|
|
|
(TargetBytesPerWord, value, 4, c->memory
|
|
|
|
(address, MATCH(methodName(t, target), "putInt")
|
|
|
|
? Compiler::IntegerType : Compiler::FloatType, 0, 0, 1));
|
|
|
|
return true;
|
|
|
|
} else if ((MATCH(methodName(t, target), "getLong")
|
|
|
|
and MATCH(methodSpec(t, target), "(J)J"))
|
|
|
|
or (MATCH(methodName(t, target), "getDouble")
|
|
|
|
and MATCH(methodSpec(t, target), "(J)D")))
|
|
|
|
{
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
frame->pushLong
|
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
|
|
|
(address, MATCH(methodName(t, target), "getLong")
|
|
|
|
? Compiler::IntegerType : Compiler::FloatType, 0, 0, 1),
|
|
|
|
8));
|
|
|
|
return true;
|
|
|
|
} else if ((MATCH(methodName(t, target), "putLong")
|
|
|
|
and MATCH(methodSpec(t, target), "(JJ)V"))
|
|
|
|
or (MATCH(methodName(t, target), "putDouble")
|
|
|
|
and MATCH(methodSpec(t, target), "(JD)V")))
|
|
|
|
{
|
|
|
|
Compiler::Operand* value = frame->popLong();
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
|
|
|
(address, MATCH(methodName(t, target), "putLong")
|
|
|
|
? Compiler::IntegerType : Compiler::FloatType, 0, 0, 1));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodName(t, target), "getAddress")
|
|
|
|
and MATCH(methodSpec(t, target), "(J)J"))
|
|
|
|
{
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
frame->pushLong
|
|
|
|
(c->load
|
|
|
|
(TargetBytesPerWord, TargetBytesPerWord,
|
|
|
|
c->memory(address, Compiler::AddressType, 0, 0, 1), 8));
|
|
|
|
return true;
|
|
|
|
} else if (MATCH(methodName(t, target), "putAddress")
|
|
|
|
and MATCH(methodSpec(t, target), "(JJ)V"))
|
|
|
|
{
|
|
|
|
Compiler::Operand* value = frame->popLong();
|
|
|
|
Compiler::Operand* address = popLongAddress(frame);
|
|
|
|
frame->popObject();
|
|
|
|
c->store
|
|
|
|
(8, value, TargetBytesPerWord, c->memory
|
|
|
|
(address, Compiler::AddressType, 0, 0, 1));
|
|
|
|
return true;
|
|
|
|
}
|
2009-10-18 01:26:14 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
unsigned
|
|
|
|
targetFieldOffset(Context* context, object field)
|
|
|
|
{
|
|
|
|
if (context->bootContext) {
|
|
|
|
return context->bootContext->resolver->fieldOffset(context->thread, field);
|
|
|
|
} else {
|
|
|
|
return fieldOffset(context->thread, field);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void
|
2008-09-25 00:48:32 +00:00
|
|
|
compile(MyThread* t, Frame* initialFrame, unsigned ip,
|
|
|
|
int exceptionHandlerStart)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uint8_t, stackMap,
|
2009-08-27 00:26:44 +00:00
|
|
|
codeMaxStack(t, methodCode(t, initialFrame->context->method)));
|
|
|
|
Frame myFrame(initialFrame, RUNTIME_ARRAY_BODY(stackMap));
|
2007-12-09 22:45:43 +00:00
|
|
|
Frame* frame = &myFrame;
|
|
|
|
Compiler* c = frame->c;
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* context = frame->context;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
object code = methodCode(t, context->method);
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, code);
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
while (ip < codeLength(t, code)) {
|
2008-01-07 14:51:07 +00:00
|
|
|
if (context->visitTable[ip] ++) {
|
2007-12-09 22:45:43 +00:00
|
|
|
// we've already visited this part of the code
|
2008-04-20 05:23:08 +00:00
|
|
|
frame->visitLogicalIp(ip);
|
2007-12-09 22:45:43 +00:00
|
|
|
return;
|
2007-09-30 04:07:22 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->startLogicalIp(ip);
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2008-11-08 23:21:30 +00:00
|
|
|
if (exceptionHandlerStart >= 0) {
|
2008-09-25 00:48:32 +00:00
|
|
|
c->initLocalsFromLogicalIp(exceptionHandlerStart);
|
|
|
|
|
|
|
|
exceptionHandlerStart = -1;
|
2008-04-19 07:03:59 +00:00
|
|
|
|
|
|
|
frame->pushObject();
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2008-04-17 22:07:32 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, gcIfNecessaryThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-04-17 22:07:32 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
1, c->register_(t->arch->thread()));
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
// fprintf(stderr, "ip: %d map: %ld\n", ip, *(frame->map));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned instruction = codeBody(t, code, ip++);
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aaload:
|
|
|
|
case baload:
|
|
|
|
case caload:
|
|
|
|
case daload:
|
|
|
|
case faload:
|
|
|
|
case iaload:
|
|
|
|
case laload:
|
|
|
|
case saload: {
|
2008-04-19 20:41:31 +00:00
|
|
|
Compiler::Operand* index = frame->popInt();
|
|
|
|
Compiler::Operand* array = frame->popObject();
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-11-25 17:34:48 +00:00
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
2010-09-17 01:43:27 +00:00
|
|
|
frame->trace(0, 0);
|
2008-11-25 17:34:48 +00:00
|
|
|
}
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
if (CheckArrayBounds) {
|
2011-09-01 03:18:00 +00:00
|
|
|
c->checkBounds(array, TargetArrayLength, index, aioobThunk(t));
|
2008-01-08 17:10:24 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aaload:
|
|
|
|
frame->pushObject
|
|
|
|
(c->load
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, TargetBytesPerWord, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::ObjectType, TargetArrayBody, index,
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord),
|
|
|
|
TargetBytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case faload:
|
2009-08-10 19:20:23 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::FloatType, TargetArrayBody, index, 4),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-11-30 15:08:45 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case iaload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 4),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case baload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(1, 1, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 1),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case caload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->loadz
|
|
|
|
(2, 2, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 2),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case daload:
|
2009-08-10 19:20:23 +00:00
|
|
|
frame->pushLong
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::FloatType, TargetArrayBody, index, 8), 8));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case laload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushLong
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 8), 8));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case saload:
|
2008-12-21 21:41:56 +00:00
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->load
|
|
|
|
(2, 2, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 2),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aastore:
|
|
|
|
case bastore:
|
|
|
|
case castore:
|
|
|
|
case dastore:
|
|
|
|
case fastore:
|
|
|
|
case iastore:
|
|
|
|
case lastore:
|
|
|
|
case sastore: {
|
2008-04-19 22:13:57 +00:00
|
|
|
Compiler::Operand* value;
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == dastore or instruction == lastore) {
|
|
|
|
value = frame->popLong();
|
|
|
|
} else if (instruction == aastore) {
|
|
|
|
value = frame->popObject();
|
|
|
|
} else {
|
|
|
|
value = frame->popInt();
|
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-04-19 22:13:57 +00:00
|
|
|
Compiler::Operand* index = frame->popInt();
|
|
|
|
Compiler::Operand* array = frame->popObject();
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-11-25 17:34:48 +00:00
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
2010-09-17 01:43:27 +00:00
|
|
|
frame->trace(0, 0);
|
2008-11-25 17:34:48 +00:00
|
|
|
}
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
if (CheckArrayBounds) {
|
2011-09-01 03:18:00 +00:00
|
|
|
c->checkBounds(array, TargetArrayLength, index, aioobThunk(t));
|
2008-01-08 17:10:24 +00:00
|
|
|
}
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aastore: {
|
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, setMaybeNullThunk), Compiler::AddressType),
|
2008-06-10 14:49:13 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-06-10 14:49:13 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
4, c->register_(t->arch->thread()), array,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->add
|
2011-09-01 03:18:00 +00:00
|
|
|
(4, c->constant(TargetArrayBody, Compiler::IntegerType),
|
2009-09-20 21:43:32 +00:00
|
|
|
c->shl
|
2011-08-30 01:00:17 +00:00
|
|
|
(4, c->constant(log(TargetBytesPerWord), Compiler::IntegerType),
|
|
|
|
index)),
|
2008-06-10 14:49:13 +00:00
|
|
|
value);
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case fastore:
|
2009-08-10 19:20:23 +00:00
|
|
|
c->store
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, value, 4, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::FloatType, TargetArrayBody, index, 4));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case iastore:
|
2009-02-28 23:17:24 +00:00
|
|
|
c->store
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, value, 4, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 4));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case bastore:
|
2009-02-28 23:17:24 +00:00
|
|
|
c->store
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, value, 1, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 1));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case castore:
|
|
|
|
case sastore:
|
2009-02-28 23:17:24 +00:00
|
|
|
c->store
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, value, 2, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 2));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case dastore:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::FloatType, TargetArrayBody, index, 8));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case lastore:
|
2009-09-20 21:43:32 +00:00
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(array, Compiler::IntegerType, TargetArrayBody, index, 8));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aconst_null:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushObject(c->constant(0, Compiler::ObjectType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload:
|
|
|
|
frame->loadObject(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_0:
|
|
|
|
frame->loadObject(0);
|
|
|
|
break;
|
2007-10-08 23:13:55 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_1:
|
|
|
|
frame->loadObject(1);
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_2:
|
|
|
|
frame->loadObject(2);
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_3:
|
|
|
|
frame->loadObject(3);
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case anewarray: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1, false);
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2008-04-26 20:56:03 +00:00
|
|
|
Compiler::Operand* length = frame->popInt();
|
2007-10-08 23:13:55 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
|
|
|
argument = class_;
|
|
|
|
thunk = makeBlankObjectArrayThunk;
|
|
|
|
} else {
|
|
|
|
argument = makePair(t, context->method, reference);
|
|
|
|
thunk = makeBlankObjectArrayFromReferenceThunk;
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
2011-03-15 23:52:02 +00:00
|
|
|
(c->constant(getThunk(t, thunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::ObjectType,
|
2011-03-15 23:52:02 +00:00
|
|
|
3, c->register_(t->arch->thread()), frame->append(argument),
|
|
|
|
length));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case areturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2011-08-30 01:00:17 +00:00
|
|
|
c->return_(TargetBytesPerWord, frame->popObject());
|
2007-12-12 22:19:13 +00:00
|
|
|
} return;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case arraylength: {
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, TargetBytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(frame->popObject(), Compiler::IntegerType,
|
|
|
|
TargetArrayLength, 0, 1),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord));
|
2007-12-12 22:19:13 +00:00
|
|
|
} break;
|
2007-10-04 03:19:39 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore:
|
2007-12-26 23:59:55 +00:00
|
|
|
frame->storeObjectOrAddress(codeBody(t, code, ip++));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_0:
|
2007-12-27 16:02:03 +00:00
|
|
|
frame->storeObjectOrAddress(0);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-17 17:22:09 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_1:
|
2007-12-27 16:02:03 +00:00
|
|
|
frame->storeObjectOrAddress(1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_2:
|
2007-12-27 16:02:03 +00:00
|
|
|
frame->storeObjectOrAddress(2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_3:
|
2007-12-27 16:02:03 +00:00
|
|
|
frame->storeObjectOrAddress(3);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case athrow: {
|
2009-03-18 22:24:13 +00:00
|
|
|
Compiler::Operand* target = frame->popObject();
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, throw_Thunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
Compiler::NoReturn,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2009-04-07 00:34:12 +00:00
|
|
|
2, c->register_(t->arch->thread()), target);
|
2007-12-12 22:19:13 +00:00
|
|
|
} return;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case bipush:
|
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(static_cast<int8_t>(codeBody(t, code, ip++)),
|
|
|
|
Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case checkcast: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1, false);
|
|
|
|
|
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
|
|
|
argument = class_;
|
|
|
|
thunk = checkCastThunk;
|
|
|
|
} else {
|
|
|
|
argument = makePair(t, context->method, reference);
|
|
|
|
thunk = checkCastFromReferenceThunk;
|
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2008-11-02 22:25:51 +00:00
|
|
|
Compiler::Operand* instance = c->peek(1, 0);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2011-03-15 23:52:02 +00:00
|
|
|
(c->constant(getThunk(t, thunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-11 17:21:41 +00:00
|
|
|
0,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::VoidType,
|
2011-03-15 23:52:02 +00:00
|
|
|
3, c->register_(t->arch->thread()), frame->append(argument),
|
|
|
|
instance);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2f: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->f2f(8, 4, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2i: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->f2i(8, 4, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2l: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->f2i(8, 8, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dadd: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fadd(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dcmpg: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not floatBranch(t, frame, code, ip, 8, false, a, b)) {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareDoublesGThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 4,
|
2009-08-06 16:01:57 +00:00
|
|
|
static_cast<Compiler::Operand*>(0), a,
|
|
|
|
static_cast<Compiler::Operand*>(0), b));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dcmpl: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not floatBranch(t, frame, code, ip, 8, true, a, b)) {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareDoublesLThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 4,
|
2009-08-06 16:01:57 +00:00
|
|
|
static_cast<Compiler::Operand*>(0), a,
|
|
|
|
static_cast<Compiler::Operand*>(0), b));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dconst_0:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong(c->constant(doubleToBits(0.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case dconst_1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong(c->constant(doubleToBits(1.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ddiv: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fdiv(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dmul: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fmul(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dneg: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fneg(8, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case vm::drem: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->frem(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dsub: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->fsub(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup:
|
|
|
|
frame->dup();
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup_x1:
|
|
|
|
frame->dupX1();
|
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup_x2:
|
|
|
|
frame->dupX2();
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup2:
|
|
|
|
frame->dup2();
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup2_x1:
|
|
|
|
frame->dup2X1();
|
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup2_x2:
|
|
|
|
frame->dup2X2();
|
|
|
|
break;
|
2007-10-09 17:15:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2d: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->f2f(4, 8, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2i: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->f2i(4, 4, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2l: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->f2i(4, 8, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fadd: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fadd(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fcmpg: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not floatBranch(t, frame, code, ip, 4, false, a, b)) {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareFloatsGThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 2, a, b));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fcmpl: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not floatBranch(t, frame, code, ip, 4, true, a, b)) {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareFloatsLThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 2, a, b));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fconst_0:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(floatToBits(0.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case fconst_1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(floatToBits(1.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case fconst_2:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(floatToBits(2.0), Compiler::FloatType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fdiv: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fdiv(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fmul: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fmul(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fneg: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fneg(4, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case vm::frem: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->frem(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fsub: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->fsub(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case getfield:
|
|
|
|
case getstatic: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object field = resolveField(t, context->method, index - 1, false);
|
2010-03-02 01:24:25 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (LIKELY(field)) {
|
|
|
|
if ((fieldFlags(t, field) & ACC_VOLATILE)
|
2011-08-30 01:00:17 +00:00
|
|
|
and TargetBytesPerWord == 4
|
2011-03-15 23:52:02 +00:00
|
|
|
and (fieldCode(t, field) == DoubleField
|
|
|
|
or fieldCode(t, field) == LongField))
|
2008-04-23 22:56:02 +00:00
|
|
|
{
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2008-03-16 19:38:43 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
2011-08-30 01:00:17 +00:00
|
|
|
(getThunk(t, acquireMonitorForObjectThunk),
|
|
|
|
Compiler::AddressType),
|
2011-03-15 23:52:02 +00:00
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()),
|
|
|
|
frame->append(field));
|
2008-04-23 22:56:02 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
Compiler::Operand* table;
|
|
|
|
|
|
|
|
if (instruction == getstatic) {
|
|
|
|
assert(t, fieldFlags(t, field) & ACC_STATIC);
|
|
|
|
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
|
|
|
if (fieldClass(t, field) != methodClass(t, context->method)
|
|
|
|
and classNeedsInit(t, fieldClass(t, field)))
|
|
|
|
{
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, tryInitClassThunk), Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
0,
|
|
|
|
Compiler::VoidType,
|
|
|
|
2, c->register_(t->arch->thread()),
|
|
|
|
frame->append(fieldClass(t, field)));
|
|
|
|
}
|
|
|
|
|
|
|
|
table = frame->append(classStaticTable(t, fieldClass(t, field)));
|
|
|
|
} else {
|
|
|
|
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
2009-01-10 19:25:52 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
table = frame->popObject();
|
2008-11-25 17:34:48 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (inTryBlock(t, code, ip - 3)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
2008-11-25 17:34:48 +00:00
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
switch (fieldCode(t, field)) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
|
|
|
(1, 1, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1), TargetBytesPerWord));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case CharField:
|
|
|
|
frame->pushInt
|
|
|
|
(c->loadz
|
|
|
|
(2, 2, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1), TargetBytesPerWord));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case ShortField:
|
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
|
|
|
(2, 2, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1), TargetBytesPerWord));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case FloatField:
|
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::FloatType, targetFieldOffset
|
|
|
|
(context, field), 0, 1), TargetBytesPerWord));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case IntField:
|
|
|
|
frame->pushInt
|
|
|
|
(c->load
|
|
|
|
(4, 4, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1), TargetBytesPerWord));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case DoubleField:
|
|
|
|
frame->pushLong
|
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::FloatType, targetFieldOffset
|
|
|
|
(context, field), 0, 1), 8));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case LongField:
|
|
|
|
frame->pushLong
|
|
|
|
(c->load
|
|
|
|
(8, 8, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1), 8));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case ObjectField:
|
|
|
|
frame->pushObject
|
|
|
|
(c->load
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, TargetBytesPerWord,
|
2011-03-15 23:52:02 +00:00
|
|
|
c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::ObjectType, targetFieldOffset
|
|
|
|
(context, field), 0, 1), TargetBytesPerWord));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-03-03 03:18:15 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (fieldFlags(t, field) & ACC_VOLATILE) {
|
2011-08-30 01:00:17 +00:00
|
|
|
if (TargetBytesPerWord == 4
|
2011-03-15 23:52:02 +00:00
|
|
|
and (fieldCode(t, field) == DoubleField
|
|
|
|
or fieldCode(t, field) == LongField))
|
|
|
|
{
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, releaseMonitorForObjectThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()),
|
|
|
|
frame->append(field));
|
|
|
|
} else {
|
|
|
|
c->loadBarrier();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int fieldCode = vm::fieldCode
|
|
|
|
(t, byteArrayBody(t, referenceSpec(t, reference), 0));
|
|
|
|
|
|
|
|
object pair = makePair(t, context->method, reference);
|
|
|
|
|
|
|
|
unsigned rSize = resultSize(t, fieldCode);
|
|
|
|
Compiler::OperandType rType = operandTypeForFieldCode(t, fieldCode);
|
|
|
|
|
|
|
|
Compiler::Operand* result;
|
|
|
|
if (instruction == getstatic) {
|
|
|
|
result = c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
2011-03-15 23:52:02 +00:00
|
|
|
(getThunk(t, getStaticFieldValueFromReferenceThunk),
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::AddressType),
|
2011-03-15 23:52:02 +00:00
|
|
|
0, frame->trace(0, 0), rSize, rType, 2,
|
|
|
|
c->register_(t->arch->thread()), frame->append(pair));
|
2009-03-04 01:02:11 +00:00
|
|
|
} else {
|
2011-03-15 23:52:02 +00:00
|
|
|
Compiler::Operand* instance = frame->popObject();
|
|
|
|
|
|
|
|
result = c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, getFieldValueFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), rSize, rType, 3,
|
|
|
|
c->register_(t->arch->thread()), frame->append(pair),
|
|
|
|
instance);
|
2009-03-04 01:02:11 +00:00
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
pushReturnValue(t, frame, fieldCode, result);
|
2009-03-03 03:18:15 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case goto_: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-16 00:24:15 +00:00
|
|
|
c->jmp(frame->machineIp(newIp));
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = newIp;
|
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case goto_w: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt32(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 5) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-16 00:24:15 +00:00
|
|
|
c->jmp(frame->machineIp(newIp));
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = newIp;
|
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2b: {
|
2011-08-30 01:00:17 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->load(TargetBytesPerWord, 1, frame->popInt(), TargetBytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2c: {
|
2011-08-30 01:00:17 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->loadz(TargetBytesPerWord, 2, frame->popInt(), TargetBytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2d: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->i2f(4, 8, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2f: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->i2f(4, 4, frame->popInt()));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
case i2l:
|
2011-08-30 01:00:17 +00:00
|
|
|
frame->pushLong(c->load(TargetBytesPerWord, 4, frame->popInt(), 8));
|
2007-12-26 23:59:55 +00:00
|
|
|
break;
|
2007-10-04 03:19:39 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2s: {
|
2011-08-30 01:00:17 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->load(TargetBytesPerWord, 2, frame->popInt(), TargetBytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case iadd: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->add(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case iand: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->and_(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_m1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(-1, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_0:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(0, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(1, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_2:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(2, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_3:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(3, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_4:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(4, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_5:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt(c->constant(5, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case idiv: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->div(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case if_acmpeq:
|
|
|
|
case if_acmpne: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popObject();
|
|
|
|
Compiler::Operand* b = frame->popObject();
|
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == if_acmpeq) {
|
2011-08-30 01:00:17 +00:00
|
|
|
c->jumpIfEqual(TargetBytesPerWord, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2011-08-30 01:00:17 +00:00
|
|
|
c->jumpIfNotEqual(TargetBytesPerWord, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case if_icmpeq:
|
|
|
|
case if_icmpne:
|
|
|
|
case if_icmpgt:
|
|
|
|
case if_icmpge:
|
|
|
|
case if_icmplt:
|
|
|
|
case if_icmple: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case if_icmpeq:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmpne:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfNotEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmpgt:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreater(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmpge:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreaterOrEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmplt:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLess(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case if_icmple:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLessOrEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ifeq:
|
|
|
|
case ifne:
|
|
|
|
case ifgt:
|
|
|
|
case ifge:
|
|
|
|
case iflt:
|
|
|
|
case ifle: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
Compiler::Operand* a = c->constant(0, Compiler::IntegerType);
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case ifeq:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case ifne:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfNotEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case ifgt:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreater(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case ifge:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreaterOrEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case iflt:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLess(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
|
|
|
case ifle:
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLessOrEqual(4, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-06-10 14:49:13 +00:00
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ifnull:
|
|
|
|
case ifnonnull: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
Compiler::Operand* a = c->constant(0, Compiler::ObjectType);
|
|
|
|
Compiler::Operand* b = frame->popObject();
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* target = frame->machineIp(newIp);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == ifnull) {
|
2011-08-30 01:00:17 +00:00
|
|
|
c->jumpIfEqual(TargetBytesPerWord, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2011-08-30 01:00:17 +00:00
|
|
|
c->jumpIfNotEqual(TargetBytesPerWord, a, b, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iinc: {
|
|
|
|
uint8_t index = codeBody(t, code, ip++);
|
|
|
|
int8_t count = codeBody(t, code, ip++);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2009-05-15 02:08:01 +00:00
|
|
|
storeLocal
|
|
|
|
(context, 1,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->add
|
|
|
|
(4, c->constant(count, Compiler::IntegerType),
|
|
|
|
loadLocal(context, 1, index)),
|
2009-05-03 20:57:11 +00:00
|
|
|
index);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload:
|
|
|
|
case fload:
|
|
|
|
frame->loadInt(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_0:
|
|
|
|
case fload_0:
|
|
|
|
frame->loadInt(0);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_1:
|
|
|
|
case fload_1:
|
|
|
|
frame->loadInt(1);
|
|
|
|
break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_2:
|
|
|
|
case fload_2:
|
|
|
|
frame->loadInt(2);
|
|
|
|
break;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_3:
|
|
|
|
case fload_3:
|
|
|
|
frame->loadInt(3);
|
|
|
|
break;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case imul: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->mul(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-26 19:19:45 +00:00
|
|
|
case ineg: {
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->neg(4, frame->popInt()));
|
2007-12-26 19:19:45 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case instanceof: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-30 04:07:22 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1, false);
|
|
|
|
|
|
|
|
Compiler::Operand* instance = frame->popObject();
|
|
|
|
|
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
TraceElement* trace;
|
|
|
|
if (LIKELY(class_)) {
|
|
|
|
argument = class_;
|
|
|
|
thunk = instanceOf64Thunk;
|
|
|
|
trace = 0;
|
|
|
|
} else {
|
|
|
|
argument = makePair(t, context->method, reference);
|
|
|
|
thunk = instanceOfFromReferenceThunk;
|
|
|
|
trace = frame->trace(0, 0);
|
|
|
|
}
|
2007-09-30 04:07:22 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
2011-03-15 23:52:02 +00:00
|
|
|
(c->constant(getThunk(t, thunk), Compiler::AddressType),
|
|
|
|
0, trace, 4, Compiler::IntegerType,
|
|
|
|
3, c->register_(t->arch->thread()), frame->append(argument),
|
|
|
|
instance));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokeinterface: {
|
2010-12-19 22:23:19 +00:00
|
|
|
context->leaf = false;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
ip += 2;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2008-07-12 20:52:14 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object target = resolveMethod(t, context->method, index - 1, false);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
unsigned parameterFootprint;
|
|
|
|
int returnCode;
|
|
|
|
bool tailCall;
|
|
|
|
if (LIKELY(target)) {
|
|
|
|
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
argument = target;
|
|
|
|
thunk = findInterfaceMethodFromInstanceThunk;
|
|
|
|
parameterFootprint = methodParameterFootprint(t, target);
|
|
|
|
returnCode = methodReturnCode(t, target);
|
|
|
|
tailCall = isTailCall(t, code, ip, context->method, target);
|
|
|
|
} else {
|
|
|
|
argument = makePair(t, context->method, reference);
|
|
|
|
thunk = findInterfaceMethodFromInstanceAndReferenceThunk;
|
|
|
|
parameterFootprint = methodReferenceParameterFootprint
|
|
|
|
(t, reference, false);
|
|
|
|
returnCode = methodReferenceReturnCode(t, reference);
|
|
|
|
tailCall = isReferenceTailCall
|
|
|
|
(t, code, ip, context->method, reference);
|
|
|
|
}
|
2010-11-26 19:41:31 +00:00
|
|
|
|
|
|
|
unsigned rSize = resultSize(t, returnCode);
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
Compiler::Operand* result = c->stackCall
|
2008-02-11 17:21:41 +00:00
|
|
|
(c->call
|
2011-03-15 23:52:02 +00:00
|
|
|
(c->constant(getThunk(t, thunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::AddressType,
|
2011-03-15 23:52:02 +00:00
|
|
|
3, c->register_(t->arch->thread()), frame->append(argument),
|
|
|
|
c->peek(1, parameterFootprint - 1)),
|
|
|
|
tailCall ? Compiler::TailJump : 0,
|
2009-04-05 21:42:10 +00:00
|
|
|
frame->trace(0, 0),
|
2008-02-12 02:06:12 +00:00
|
|
|
rSize,
|
2010-11-26 19:41:31 +00:00
|
|
|
operandTypeForFieldCode(t, returnCode),
|
2008-07-05 20:21:13 +00:00
|
|
|
parameterFootprint);
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->pop(parameterFootprint);
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
if (rSize) {
|
2010-11-26 19:41:31 +00:00
|
|
|
pushReturnValue(t, frame, returnCode, result);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokespecial: {
|
2010-12-19 22:23:19 +00:00
|
|
|
context->leaf = false;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
|
|
|
|
|
|
|
PROTECT(t, reference);
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object target = resolveMethod(t, context->method, index - 1, false);
|
|
|
|
|
|
|
|
if (LIKELY(target)) {
|
|
|
|
object class_ = methodClass(t, context->method);
|
|
|
|
if (isSpecialMethod(t, target, class_)) {
|
|
|
|
target = findVirtualMethod(t, target, classSuper(t, class_));
|
|
|
|
}
|
2008-07-12 20:52:14 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
|
|
|
|
2011-07-18 01:54:55 +00:00
|
|
|
if (UNLIKELY(methodAbstract(t, target))) {
|
|
|
|
compileDirectAbstractInvoke
|
|
|
|
(t, frame, getMethodAddressThunk, target, tailCall);
|
|
|
|
} else {
|
|
|
|
compileDirectInvoke(t, frame, target, tailCall);
|
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
|
|
|
compileDirectReferenceInvoke
|
|
|
|
(t, frame, findSpecialMethodFromReferenceThunk, reference, false,
|
|
|
|
isReferenceTailCall(t, code, ip, context->method, reference));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokestatic: {
|
2010-12-19 22:23:19 +00:00
|
|
|
context->leaf = false;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2008-07-12 20:52:14 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object target = resolveMethod(t, context->method, index - 1, false);
|
|
|
|
|
|
|
|
if (LIKELY(target)) {
|
|
|
|
assert(t, methodFlags(t, target) & ACC_STATIC);
|
|
|
|
|
|
|
|
if (not intrinsic(t, frame, target)) {
|
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
|
|
|
compileDirectInvoke(t, frame, target, tailCall);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
compileDirectReferenceInvoke
|
|
|
|
(t, frame, findStaticMethodFromReferenceThunk, reference, true,
|
|
|
|
isReferenceTailCall(t, code, ip, context->method, reference));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokevirtual: {
|
2010-12-19 22:23:19 +00:00
|
|
|
context->leaf = false;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2008-07-12 20:52:14 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object target = resolveMethod(t, context->method, index - 1, false);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (LIKELY(target)) {
|
|
|
|
assert(t, (methodFlags(t, target) & ACC_STATIC) == 0);
|
2012-03-06 20:07:59 +00:00
|
|
|
|
|
|
|
if (not intrinsic(t, frame, target)) {
|
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
if (LIKELY(methodVirtual(t, target))) {
|
|
|
|
unsigned parameterFootprint = methodParameterFootprint(t, target);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
unsigned offset = TargetClassVtable
|
|
|
|
+ (methodOffset(t, target) * TargetBytesPerWord);
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
Compiler::Operand* instance = c->peek(1, parameterFootprint - 1);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
unsigned rSize = resultSize(t, methodReturnCode(t, target));
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
Compiler::Operand* result = c->stackCall
|
|
|
|
(c->memory
|
|
|
|
(c->and_
|
|
|
|
(TargetBytesPerWord, c->constant
|
|
|
|
(TargetPointerMask, Compiler::IntegerType),
|
|
|
|
c->memory(instance, Compiler::ObjectType, 0, 0, 1)),
|
|
|
|
Compiler::ObjectType, offset, 0, 1),
|
|
|
|
tailCall ? Compiler::TailJump : 0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rSize,
|
|
|
|
operandTypeForFieldCode(t, methodReturnCode(t, target)),
|
|
|
|
parameterFootprint);
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
frame->pop(parameterFootprint);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
if (rSize) {
|
|
|
|
pushReturnValue(t, frame, methodReturnCode(t, target), result);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// OpenJDK generates invokevirtual calls to private methods
|
|
|
|
// (e.g. readObject and writeObject for serialization), so
|
|
|
|
// we must handle such cases here.
|
2011-03-27 05:13:05 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
compileDirectInvoke(t, frame, target, tailCall);
|
2011-03-27 05:13:05 +00:00
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object pair = makePair(t, context->method, reference);
|
|
|
|
|
|
|
|
compileReferenceInvoke
|
|
|
|
(t, frame, c->call
|
|
|
|
(c->constant(getThunk(t, findVirtualMethodFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2011-03-15 23:52:02 +00:00
|
|
|
Compiler::AddressType,
|
|
|
|
3, c->register_(t->arch->thread()), frame->append(pair),
|
|
|
|
c->peek(1, methodReferenceParameterFootprint
|
|
|
|
(t, reference, false) - 1)),
|
|
|
|
reference, false, isReferenceTailCall
|
|
|
|
(t, code, ip, context->method, reference));
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ior: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->or_(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case irem: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->rem(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ireturn:
|
2007-12-14 18:27:56 +00:00
|
|
|
case freturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2009-05-31 20:15:45 +00:00
|
|
|
c->return_(4, frame->popInt());
|
2007-12-14 18:27:56 +00:00
|
|
|
} return;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ishl: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->shl(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ishr: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->shr(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore:
|
|
|
|
case fstore:
|
|
|
|
frame->storeInt(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_0:
|
|
|
|
case fstore_0:
|
|
|
|
frame->storeInt(0);
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_1:
|
|
|
|
case fstore_1:
|
|
|
|
frame->storeInt(1);
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_2:
|
|
|
|
case fstore_2:
|
|
|
|
frame->storeInt(2);
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_3:
|
|
|
|
case fstore_3:
|
|
|
|
frame->storeInt(3);
|
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case isub: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->sub(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iushr: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->ushr(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ixor: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
|
|
|
Compiler::Operand* b = frame->popInt();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushInt(c->xor_(4, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case jsr:
|
2007-12-26 23:59:55 +00:00
|
|
|
case jsr_w: {
|
2008-11-16 00:28:45 +00:00
|
|
|
uint32_t thisIp;
|
2007-12-26 23:59:55 +00:00
|
|
|
uint32_t newIp;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (instruction == jsr) {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
2008-11-16 00:28:45 +00:00
|
|
|
thisIp = ip - 3;
|
|
|
|
newIp = thisIp + offset;
|
2007-12-26 23:59:55 +00:00
|
|
|
} else {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt32(t, code, ip);
|
2008-11-16 00:28:45 +00:00
|
|
|
thisIp = ip - 5;
|
|
|
|
newIp = thisIp + offset;
|
2007-12-26 23:59:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned start = frame->startSubroutine(newIp, c->machineIp(ip));
|
2008-06-11 00:16:02 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
c->jmp(frame->machineIp(newIp));
|
|
|
|
|
2009-07-08 14:18:40 +00:00
|
|
|
saveStateAndCompile(t, frame, newIp);
|
2007-12-27 20:32:34 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
frame->endSubroutine(start);
|
2007-12-16 21:30:19 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2008-03-21 00:37:58 +00:00
|
|
|
case l2d: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushLong(c->i2f(8, 8, frame->popLong()));
|
2008-03-21 00:37:58 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case l2f: {
|
2009-08-06 16:01:57 +00:00
|
|
|
frame->pushInt(c->i2f(8, 4, frame->popLong()));
|
2008-03-21 00:37:58 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
case l2i:
|
2011-08-30 01:00:17 +00:00
|
|
|
frame->pushInt(c->load(8, 8, frame->popLong(), TargetBytesPerWord));
|
2007-12-26 23:59:55 +00:00
|
|
|
break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ladd: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->add(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-26 19:19:45 +00:00
|
|
|
case land: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->and_(8, a, b));
|
2007-12-26 19:19:45 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lcmp: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2007-12-12 22:19:13 +00:00
|
|
|
|
2009-10-10 21:03:23 +00:00
|
|
|
if (not integerBranch(t, frame, code, ip, 8, a, b)) {
|
2009-10-07 00:50:32 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, compareLongsThunk), Compiler::AddressType),
|
|
|
|
0, 0, 4, Compiler::IntegerType, 4,
|
|
|
|
static_cast<Compiler::Operand*>(0), a,
|
|
|
|
static_cast<Compiler::Operand*>(0), b));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lconst_0:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong(c->constant(0, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lconst_1:
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong(c->constant(1, Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldc:
|
|
|
|
case ldc_w: {
|
|
|
|
uint16_t index;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == ldc) {
|
|
|
|
index = codeBody(t, code, ip++);
|
|
|
|
} else {
|
|
|
|
index = codeReadInt16(t, code, ip);
|
|
|
|
}
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
object pool = codePool(t, code);
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (singletonIsObject(t, pool, index - 1)) {
|
|
|
|
object v = singletonObject(t, pool, index - 1);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
loadMemoryBarrier();
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (objectClass(t, v) == type(t, Machine::ReferenceType)) {
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = v;
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
v = resolveClassInPool(t, context->method, index - 1, false);
|
|
|
|
|
|
|
|
if (UNLIKELY(v == 0)) {
|
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, getJClassFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2011-03-15 23:52:02 +00:00
|
|
|
Compiler::ObjectType,
|
|
|
|
2, c->register_(t->arch->thread()),
|
|
|
|
frame->append(makePair(t, context->method, reference))));
|
|
|
|
}
|
2010-11-26 19:41:31 +00:00
|
|
|
}
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (v) {
|
|
|
|
if (objectClass(t, v) == type(t, Machine::ClassType)) {
|
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, getJClass64Thunk), Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2011-03-15 23:52:02 +00:00
|
|
|
Compiler::ObjectType,
|
|
|
|
2, c->register_(t->arch->thread()), frame->append(v)));
|
|
|
|
} else {
|
|
|
|
frame->pushObject(frame->append(v));
|
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushInt
|
|
|
|
(c->constant
|
|
|
|
(singletonValue(t, pool, index - 1),
|
2009-10-18 02:11:03 +00:00
|
|
|
singletonBit(t, pool, poolSize(t, pool), index - 1)
|
2009-09-20 21:43:32 +00:00
|
|
|
? Compiler::FloatType : Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldc2_w: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
object pool = codePool(t, code);
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint64_t v;
|
|
|
|
memcpy(&v, &singletonValue(t, pool, index - 1), 8);
|
2009-09-20 21:43:32 +00:00
|
|
|
frame->pushLong
|
|
|
|
(c->constant
|
2009-10-18 02:11:03 +00:00
|
|
|
(v, singletonBit(t, pool, poolSize(t, pool), index - 1)
|
2009-09-20 21:43:32 +00:00
|
|
|
? Compiler::FloatType : Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldiv_: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->div(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload:
|
|
|
|
case dload:
|
|
|
|
frame->loadLong(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_0:
|
|
|
|
case dload_0:
|
|
|
|
frame->loadLong(0);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_1:
|
|
|
|
case dload_1:
|
|
|
|
frame->loadLong(1);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_2:
|
|
|
|
case dload_2:
|
|
|
|
frame->loadLong(2);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_3:
|
|
|
|
case dload_3:
|
|
|
|
frame->loadLong(3);
|
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lmul: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->mul(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lneg:
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->neg(8, frame->popLong()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lookupswitch: {
|
|
|
|
int32_t base = ip - 1;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = (ip + 3) & ~3; // pad to four byte boundary
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* key = frame->popInt();
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
|
|
|
assert(t, defaultIp < codeLength(t, code));
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
Compiler::Operand* default_ = frame->addressOperand
|
2011-09-20 22:30:30 +00:00
|
|
|
(frame->addressPromise(c->machineIp(defaultIp)));
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t pairCount = codeReadInt32(t, code, ip);
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
if (pairCount) {
|
2011-09-20 22:30:30 +00:00
|
|
|
Promise* start = 0;
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uint32_t, ipTable, pairCount);
|
2009-08-13 01:32:12 +00:00
|
|
|
for (int32_t i = 0; i < pairCount; ++i) {
|
|
|
|
unsigned index = ip + (i * 8);
|
|
|
|
int32_t key = codeReadInt32(t, code, index);
|
|
|
|
uint32_t newIp = base + codeReadInt32(t, code, index);
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(ipTable)[i] = newIp;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
Promise* p = c->poolAppend(key);
|
|
|
|
if (i == 0) {
|
2011-09-20 22:30:30 +00:00
|
|
|
start = p;
|
2009-08-13 01:32:12 +00:00
|
|
|
}
|
2011-09-20 22:30:30 +00:00
|
|
|
c->poolAppendPromise
|
|
|
|
(frame->addressPromise(c->machineIp(newIp)));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-08-13 01:32:12 +00:00
|
|
|
assert(t, start);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
Compiler::Operand* address = c->call
|
|
|
|
(c->constant(getThunk(t, lookUpAddressThunk), Compiler::AddressType),
|
|
|
|
0, 0, TargetBytesPerWord, Compiler::AddressType,
|
|
|
|
4, key, frame->absoluteAddressOperand(start),
|
|
|
|
c->constant(pairCount, Compiler::IntegerType), default_);
|
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
c->jmp
|
2011-09-20 22:30:30 +00:00
|
|
|
(context->bootContext ? c->add
|
|
|
|
(TargetBytesPerWord, c->memory
|
|
|
|
(c->register_(t->arch->thread()), Compiler::AddressType,
|
|
|
|
TargetThreadCodeImage), address)
|
|
|
|
: address);
|
2007-12-16 00:24:15 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
Compiler::State* state = c->saveState();
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
for (int32_t i = 0; i < pairCount; ++i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, frame, RUNTIME_ARRAY_BODY(ipTable)[i]);
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
c->restoreState(state);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// a switch statement with no cases, apparently
|
|
|
|
c->jmp(default_);
|
2007-12-16 00:24:15 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
ip = defaultIp;
|
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lor: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->or_(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lrem: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->rem(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lreturn:
|
2007-12-12 22:19:13 +00:00
|
|
|
case dreturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2009-05-31 20:15:45 +00:00
|
|
|
c->return_(8, frame->popLong());
|
2007-12-12 22:19:13 +00:00
|
|
|
} return;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lshl: {
|
2008-04-28 15:53:48 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->shl(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lshr: {
|
2008-04-28 15:53:48 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->shr(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore:
|
|
|
|
case dstore:
|
|
|
|
frame->storeLong(codeBody(t, code, ip++));
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore_0:
|
|
|
|
case dstore_0:
|
|
|
|
frame->storeLong(0);
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore_1:
|
|
|
|
case dstore_1:
|
|
|
|
frame->storeLong(1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case lstore_2:
|
|
|
|
case dstore_2:
|
|
|
|
frame->storeLong(2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case lstore_3:
|
|
|
|
case dstore_3:
|
|
|
|
frame->storeLong(3);
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lsub: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->sub(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lushr: {
|
2008-04-28 15:53:48 +00:00
|
|
|
Compiler::Operand* a = frame->popInt();
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->ushr(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lxor: {
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* a = frame->popLong();
|
|
|
|
Compiler::Operand* b = frame->popLong();
|
2008-02-12 02:06:12 +00:00
|
|
|
frame->pushLong(c->xor_(8, a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case monitorenter: {
|
2009-03-18 22:24:13 +00:00
|
|
|
Compiler::Operand* target = frame->popObject();
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, acquireMonitorForObjectThunk), Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()), target);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case monitorexit: {
|
2009-03-18 22:24:13 +00:00
|
|
|
Compiler::Operand* target = frame->popObject();
|
2008-02-11 17:21:41 +00:00
|
|
|
c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(getThunk(t, releaseMonitorForObjectThunk), Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()), target);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case multianewarray: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
uint8_t dimensions = codeBody(t, code, ip++);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1, false);
|
|
|
|
|
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
|
|
|
argument = class_;
|
|
|
|
thunk = makeMultidimensionalArrayThunk;
|
|
|
|
} else {
|
|
|
|
argument = makePair(t, context->method, reference);
|
|
|
|
thunk = makeMultidimensionalArrayFromReferenceThunk;
|
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2009-03-01 22:39:52 +00:00
|
|
|
unsigned offset
|
2009-05-17 23:43:48 +00:00
|
|
|
= localOffset
|
|
|
|
(t, localSize(t, context->method) + c->topOfStack(), context->method)
|
2009-03-01 22:39:52 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2008-11-11 00:07:44 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler::Operand* result = c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
2011-03-15 23:52:02 +00:00
|
|
|
(getThunk(t, thunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::ObjectType,
|
2011-03-15 23:52:02 +00:00
|
|
|
4, c->register_(t->arch->thread()), frame->append(argument),
|
2009-09-20 21:43:32 +00:00
|
|
|
c->constant(dimensions, Compiler::IntegerType),
|
|
|
|
c->constant(offset, Compiler::IntegerType));
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->pop(dimensions);
|
|
|
|
frame->pushObject(result);
|
|
|
|
} break;
|
2007-10-12 00:30:46 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case new_: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
|
|
|
|
|
|
|
object class_ = resolveClassInPool(t, context->method, index - 1, false);
|
|
|
|
|
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
|
|
|
argument = class_;
|
|
|
|
if (classVmFlags(t, class_) & (WeakReferenceFlag | HasFinalizerFlag)) {
|
|
|
|
thunk = makeNewGeneral64Thunk;
|
|
|
|
} else {
|
|
|
|
thunk = makeNew64Thunk;
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2011-03-15 23:52:02 +00:00
|
|
|
argument = makePair(t, context->method, reference);
|
|
|
|
thunk = makeNewFromReferenceThunk;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
|
|
|
(c->constant(getThunk(t, thunk), Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2011-03-15 23:52:02 +00:00
|
|
|
Compiler::ObjectType,
|
|
|
|
2, c->register_(t->arch->thread()), frame->append(argument)));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case newarray: {
|
|
|
|
uint8_t type = codeBody(t, code, ip++);
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2008-04-26 20:56:03 +00:00
|
|
|
Compiler::Operand* length = frame->popInt();
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
frame->pushObject
|
|
|
|
(c->call
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant(getThunk(t, makeBlankArrayThunk), Compiler::AddressType),
|
2008-05-31 22:14:27 +00:00
|
|
|
0,
|
2009-03-31 20:15:08 +00:00
|
|
|
frame->trace(0, 0),
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord,
|
2009-09-20 21:43:32 +00:00
|
|
|
Compiler::ObjectType,
|
|
|
|
3, c->register_(t->arch->thread()),
|
|
|
|
c->constant(type, Compiler::IntegerType), length));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case nop: break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case pop_:
|
|
|
|
frame->pop(1);
|
|
|
|
break;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case pop2:
|
|
|
|
frame->pop(2);
|
|
|
|
break;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case putfield:
|
|
|
|
case putstatic: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object reference = singletonObject
|
|
|
|
(t, codePool(t, methodCode(t, context->method)), index - 1);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object field = resolveField(t, context->method, index - 1, false);
|
2009-01-10 19:25:52 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (LIKELY(field)) {
|
|
|
|
int fieldCode = vm::fieldCode(t, field);
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object staticTable = 0;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (instruction == putstatic) {
|
|
|
|
assert(t, fieldFlags(t, field) & ACC_STATIC);
|
2009-02-09 23:22:01 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (fieldClass(t, field) != methodClass(t, context->method)
|
|
|
|
and classNeedsInit(t, fieldClass(t, field)))
|
|
|
|
{
|
|
|
|
PROTECT(t, field);
|
2007-10-12 22:06:33 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, tryInitClassThunk), Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
0,
|
|
|
|
Compiler::VoidType,
|
|
|
|
2, c->register_(t->arch->thread()),
|
|
|
|
frame->append(fieldClass(t, field)));
|
|
|
|
}
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
staticTable = classStaticTable(t, fieldClass(t, field));
|
2010-03-02 01:24:25 +00:00
|
|
|
} else {
|
2011-03-15 23:52:02 +00:00
|
|
|
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (inTryBlock(t, code, ip - 3)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
}
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (fieldFlags(t, field) & ACC_VOLATILE) {
|
2011-08-30 01:00:17 +00:00
|
|
|
if (TargetBytesPerWord == 4
|
2011-03-15 23:52:02 +00:00
|
|
|
and (fieldCode == DoubleField or fieldCode == LongField))
|
|
|
|
{
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, acquireMonitorForObjectThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()), frame->append(field));
|
|
|
|
} else {
|
|
|
|
c->storeStoreBarrier();
|
|
|
|
}
|
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-05-23 18:35:01 +00:00
|
|
|
Compiler::Operand* value = popField(t, frame, fieldCode);
|
|
|
|
|
|
|
|
Compiler::Operand* table;
|
|
|
|
|
|
|
|
if (instruction == putstatic) {
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
|
|
|
table = frame->append(staticTable);
|
|
|
|
} else {
|
|
|
|
table = frame->popObject();
|
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
switch (fieldCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
c->store
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, value, 1, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
c->store
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, value, 2, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case FloatField:
|
|
|
|
c->store
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, value, 4, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::FloatType, targetFieldOffset
|
|
|
|
(context, field), 0, 1));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case IntField:
|
|
|
|
c->store
|
2011-08-30 01:00:17 +00:00
|
|
|
(TargetBytesPerWord, value, 4, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case DoubleField:
|
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::FloatType, targetFieldOffset
|
|
|
|
(context, field), 0, 1));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case LongField:
|
|
|
|
c->store
|
|
|
|
(8, value, 8, c->memory
|
2011-09-01 03:18:00 +00:00
|
|
|
(table, Compiler::IntegerType, targetFieldOffset
|
|
|
|
(context, field), 0, 1));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case ObjectField:
|
|
|
|
if (instruction == putfield) {
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, setMaybeNullThunk), Compiler::AddressType),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
0,
|
|
|
|
Compiler::VoidType,
|
|
|
|
4, c->register_(t->arch->thread()), table,
|
2011-09-01 03:18:00 +00:00
|
|
|
c->constant(targetFieldOffset(context, field),
|
|
|
|
Compiler::IntegerType),
|
2011-03-15 23:52:02 +00:00
|
|
|
value);
|
|
|
|
} else {
|
|
|
|
c->call
|
|
|
|
(c->constant(getThunk(t, setThunk), Compiler::AddressType),
|
|
|
|
0, 0, 0, Compiler::VoidType,
|
|
|
|
4, c->register_(t->arch->thread()), table,
|
2011-09-01 03:18:00 +00:00
|
|
|
c->constant(targetFieldOffset(context, field),
|
|
|
|
Compiler::IntegerType),
|
2011-03-15 23:52:02 +00:00
|
|
|
value);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default: abort(t);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (fieldFlags(t, field) & ACC_VOLATILE) {
|
2011-08-30 01:00:17 +00:00
|
|
|
if (TargetBytesPerWord == 4
|
2011-03-15 23:52:02 +00:00
|
|
|
and (fieldCode == DoubleField or fieldCode == LongField))
|
|
|
|
{
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, releaseMonitorForObjectThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), 0, Compiler::VoidType, 2,
|
|
|
|
c->register_(t->arch->thread()), frame->append(field));
|
|
|
|
} else {
|
|
|
|
c->storeLoadBarrier();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int fieldCode = vm::fieldCode
|
|
|
|
(t, byteArrayBody(t, referenceSpec(t, reference), 0));
|
|
|
|
|
|
|
|
Compiler::Operand* value = popField(t, frame, fieldCode);
|
|
|
|
unsigned rSize = resultSize(t, fieldCode);
|
|
|
|
Compiler::OperandType rType = operandTypeForFieldCode(t, fieldCode);
|
|
|
|
|
|
|
|
object pair = makePair(t, context->method, reference);
|
|
|
|
|
|
|
|
switch (fieldCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField: {
|
|
|
|
if (instruction == putstatic) {
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, setStaticFieldValueFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), rSize, rType, 3,
|
|
|
|
c->register_(t->arch->thread()), frame->append(pair),
|
|
|
|
value);
|
|
|
|
} else {
|
|
|
|
Compiler::Operand* instance = frame->popObject();
|
|
|
|
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, setFieldValueFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), rSize, rType, 4,
|
|
|
|
c->register_(t->arch->thread()), frame->append(pair),
|
|
|
|
instance, value);
|
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case DoubleField:
|
|
|
|
case LongField: {
|
|
|
|
if (instruction == putstatic) {
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, setStaticLongFieldValueFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), rSize, rType, 4,
|
|
|
|
c->register_(t->arch->thread()), frame->append(pair),
|
|
|
|
static_cast<Compiler::Operand*>(0), value);
|
|
|
|
} else {
|
|
|
|
Compiler::Operand* instance = frame->popObject();
|
|
|
|
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, setLongFieldValueFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), rSize, rType, 5,
|
|
|
|
c->register_(t->arch->thread()), frame->append(pair),
|
|
|
|
instance, static_cast<Compiler::Operand*>(0), value);
|
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case ObjectField: {
|
|
|
|
if (instruction == putstatic) {
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, setStaticObjectFieldValueFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), rSize, rType, 3,
|
|
|
|
c->register_(t->arch->thread()), frame->append(pair),
|
|
|
|
value);
|
|
|
|
} else {
|
|
|
|
Compiler::Operand* instance = frame->popObject();
|
|
|
|
|
|
|
|
c->call
|
|
|
|
(c->constant
|
|
|
|
(getThunk(t, setObjectFieldValueFromReferenceThunk),
|
|
|
|
Compiler::AddressType),
|
|
|
|
0, frame->trace(0, 0), rSize, rType, 4,
|
|
|
|
c->register_(t->arch->thread()), frame->append(pair),
|
|
|
|
instance, value);
|
|
|
|
}
|
|
|
|
} break;
|
2009-03-03 03:18:15 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
default: abort(t);
|
2009-03-04 01:02:11 +00:00
|
|
|
}
|
2009-03-03 03:18:15 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
case ret: {
|
|
|
|
unsigned index = codeBody(t, code, ip);
|
|
|
|
frame->returnFromSubroutine(index);
|
|
|
|
} return;
|
2007-12-26 23:59:55 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case return_:
|
2009-04-26 01:51:33 +00:00
|
|
|
if (needsReturnBarrier(t, context->method)) {
|
|
|
|
c->storeStoreBarrier();
|
2009-04-19 22:36:11 +00:00
|
|
|
}
|
2009-04-26 01:51:33 +00:00
|
|
|
|
|
|
|
handleExit(t, frame);
|
|
|
|
c->return_(0, 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
return;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case sipush:
|
|
|
|
frame->pushInt
|
2009-09-20 21:43:32 +00:00
|
|
|
(c->constant
|
|
|
|
(static_cast<int16_t>(codeReadInt16(t, code, ip)),
|
|
|
|
Compiler::IntegerType));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case swap:
|
|
|
|
frame->swap();
|
|
|
|
break;
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case tableswitch: {
|
|
|
|
int32_t base = ip - 1;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = (ip + 3) & ~3; // pad to four byte boundary
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
|
|
|
assert(t, defaultIp < codeLength(t, code));
|
|
|
|
|
|
|
|
int32_t bottom = codeReadInt32(t, code, ip);
|
|
|
|
int32_t top = codeReadInt32(t, code, ip);
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
Promise* start = 0;
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uint32_t, ipTable, top - bottom + 1);
|
2007-12-16 00:24:15 +00:00
|
|
|
for (int32_t i = 0; i < top - bottom + 1; ++i) {
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned index = ip + (i * 4);
|
|
|
|
uint32_t newIp = base + codeReadInt32(t, code, index);
|
|
|
|
assert(t, newIp < codeLength(t, code));
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(ipTable)[i] = newIp;
|
2007-12-16 00:24:15 +00:00
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
Promise* p = c->poolAppendPromise
|
|
|
|
(frame->addressPromise(c->machineIp(newIp)));
|
2007-12-09 22:45:43 +00:00
|
|
|
if (i == 0) {
|
2011-09-20 22:30:30 +00:00
|
|
|
start = p;
|
2007-09-26 23:23:03 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-12-16 00:24:15 +00:00
|
|
|
assert(t, start);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2008-04-19 22:13:57 +00:00
|
|
|
Compiler::Operand* key = frame->popInt();
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfLess(4, c->constant(bottom, Compiler::IntegerType), key,
|
|
|
|
frame->machineIp(defaultIp));
|
2008-09-20 23:42:46 +00:00
|
|
|
|
2008-11-07 00:39:38 +00:00
|
|
|
c->save(1, key);
|
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, defaultIp);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-10-07 00:50:32 +00:00
|
|
|
c->jumpIfGreater(4, c->constant(top, Compiler::IntegerType), key,
|
|
|
|
frame->machineIp(defaultIp));
|
2008-09-20 23:42:46 +00:00
|
|
|
|
2008-11-07 00:39:38 +00:00
|
|
|
c->save(1, key);
|
|
|
|
|
2008-09-20 23:42:46 +00:00
|
|
|
saveStateAndCompile(t, frame, defaultIp);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-03-07 00:11:14 +00:00
|
|
|
Compiler::Operand* normalizedKey
|
2009-09-20 21:43:32 +00:00
|
|
|
= (bottom
|
|
|
|
? c->sub(4, c->constant(bottom, Compiler::IntegerType), key) : key);
|
2009-03-07 00:11:14 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
Compiler::Operand* entry = c->memory
|
|
|
|
(frame->absoluteAddressOperand(start), Compiler::AddressType, 0,
|
|
|
|
normalizedKey, TargetBytesPerWord);
|
|
|
|
|
2009-09-20 21:43:32 +00:00
|
|
|
c->jmp
|
|
|
|
(c->load
|
2011-09-20 22:30:30 +00:00
|
|
|
(TargetBytesPerWord, TargetBytesPerWord, context->bootContext
|
|
|
|
? c->add
|
|
|
|
(TargetBytesPerWord, c->memory
|
|
|
|
(c->register_(t->arch->thread()), Compiler::AddressType,
|
|
|
|
TargetThreadCodeImage), entry)
|
|
|
|
: entry,
|
2011-08-30 01:00:17 +00:00
|
|
|
TargetBytesPerWord));
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-09-13 21:09:26 +00:00
|
|
|
Compiler::State* state = c->saveState();
|
|
|
|
|
2007-12-16 00:24:15 +00:00
|
|
|
for (int32_t i = 0; i < top - bottom + 1; ++i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, frame, RUNTIME_ARRAY_BODY(ipTable)[i]);
|
2008-09-13 21:09:26 +00:00
|
|
|
|
|
|
|
c->restoreState(state);
|
2007-12-16 00:24:15 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
ip = defaultIp;
|
|
|
|
} break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
case wide: {
|
|
|
|
switch (codeBody(t, code, ip++)) {
|
|
|
|
case aload: {
|
|
|
|
frame->loadObject(codeReadInt16(t, code, ip));
|
2007-10-04 22:41:19 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore: {
|
|
|
|
frame->storeObject(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iinc: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2010-09-23 14:50:09 +00:00
|
|
|
int16_t count = codeReadInt16(t, code, ip);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-05-15 02:08:01 +00:00
|
|
|
storeLocal
|
|
|
|
(context, 1,
|
2009-09-20 21:43:32 +00:00
|
|
|
c->add
|
|
|
|
(4, c->constant(count, Compiler::IntegerType),
|
|
|
|
loadLocal(context, 1, index)),
|
2009-05-03 20:57:11 +00:00
|
|
|
index);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload: {
|
|
|
|
frame->loadInt(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore: {
|
|
|
|
frame->storeInt(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload: {
|
|
|
|
frame->loadLong(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore: {
|
|
|
|
frame->storeLong(codeReadInt16(t, code, ip));
|
|
|
|
} break;
|
2007-10-12 14:26:36 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
case ret: {
|
|
|
|
unsigned index = codeReadInt16(t, code, ip);
|
|
|
|
c->jmp(loadLocal(context, 1, index));
|
|
|
|
frame->returnFromSubroutine(index);
|
|
|
|
} return;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default: abort(t);
|
|
|
|
}
|
|
|
|
} break;
|
2007-12-26 19:19:45 +00:00
|
|
|
|
|
|
|
default: abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2009-06-11 23:14:54 +00:00
|
|
|
FILE* compileLog = 0;
|
|
|
|
|
2007-12-26 16:56:14 +00:00
|
|
|
void
|
2008-11-11 15:20:49 +00:00
|
|
|
logCompile(MyThread* t, const void* code, unsigned size, const char* class_,
|
2012-05-02 15:49:31 +00:00
|
|
|
const char* name, const char* spec);
|
2007-12-26 16:56:14 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
int
|
|
|
|
resolveIpForwards(Context* context, int start, int end)
|
2011-03-26 00:55:25 +00:00
|
|
|
{
|
2012-03-11 11:00:08 +00:00
|
|
|
if (start < 0) {
|
|
|
|
start = 0;
|
|
|
|
}
|
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
while (start < end and context->visitTable[start] == 0) {
|
|
|
|
++ start;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (start >= end) {
|
|
|
|
return -1;
|
2011-03-27 01:55:23 +00:00
|
|
|
} else {
|
2011-04-09 00:50:22 +00:00
|
|
|
return start;
|
|
|
|
}
|
|
|
|
}
|
2011-03-26 00:55:25 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
int
|
|
|
|
resolveIpBackwards(Context* context, int start, int end)
|
|
|
|
{
|
2012-03-11 11:00:08 +00:00
|
|
|
Thread* t = context->thread;
|
|
|
|
if (start >= static_cast<int>
|
|
|
|
(codeLength(t, methodCode(t, context->method))))
|
|
|
|
{
|
|
|
|
start = codeLength(t, methodCode(t, context->method)) - 1;
|
|
|
|
}
|
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
while (start >= end and context->visitTable[start] == 0) {
|
|
|
|
-- start;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (start < end) {
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return start;
|
2011-03-26 00:55:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
object
|
|
|
|
truncateIntArray(Thread* t, object array, unsigned length)
|
|
|
|
{
|
|
|
|
expect(t, intArrayLength(t, array) > length);
|
|
|
|
|
|
|
|
PROTECT(t, array);
|
|
|
|
|
|
|
|
object newArray = makeIntArray(t, length);
|
|
|
|
memcpy(&intArrayBody(t, newArray, 0), &intArrayBody(t, array, 0),
|
|
|
|
length * 4);
|
|
|
|
|
|
|
|
return newArray;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
truncateArray(Thread* t, object array, unsigned length)
|
|
|
|
{
|
|
|
|
expect(t, arrayLength(t, array) > length);
|
|
|
|
|
|
|
|
PROTECT(t, array);
|
|
|
|
|
|
|
|
object newArray = makeArray(t, length);
|
|
|
|
memcpy(&arrayBody(t, newArray, 0), &arrayBody(t, array, 0),
|
|
|
|
length * BytesPerWord);
|
|
|
|
|
|
|
|
return newArray;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
truncateLineNumberTable(Thread* t, object table, unsigned length)
|
|
|
|
{
|
|
|
|
expect(t, lineNumberTableLength(t, table) > length);
|
|
|
|
|
|
|
|
PROTECT(t, table);
|
|
|
|
|
|
|
|
object newTable = makeLineNumberTable(t, length);
|
2011-08-30 01:00:17 +00:00
|
|
|
memcpy(&lineNumberTableBody(t, newTable, 0),
|
|
|
|
&lineNumberTableBody(t, table, 0),
|
|
|
|
length * sizeof(uint64_t));
|
2011-04-09 00:50:22 +00:00
|
|
|
|
|
|
|
return newTable;
|
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
object
|
2011-03-27 01:55:23 +00:00
|
|
|
translateExceptionHandlerTable(MyThread* t, Context* context, intptr_t start)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2011-03-27 01:55:23 +00:00
|
|
|
Compiler* c = context->compiler;
|
|
|
|
|
|
|
|
object oldTable = codeExceptionHandlerTable
|
|
|
|
(t, methodCode(t, context->method));
|
2009-08-10 13:56:16 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
if (oldTable) {
|
|
|
|
PROTECT(t, oldTable);
|
|
|
|
|
|
|
|
unsigned length = exceptionHandlerTableLength(t, oldTable);
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
object newIndex = makeIntArray(t, length * 3);
|
2008-04-11 19:03:40 +00:00
|
|
|
PROTECT(t, newIndex);
|
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
object newTable = makeArray(t, length + 1);
|
2008-04-24 22:06:36 +00:00
|
|
|
PROTECT(t, newTable);
|
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
unsigned ni = 0;
|
|
|
|
for (unsigned oi = 0; oi < length; ++ oi) {
|
2011-08-30 01:00:17 +00:00
|
|
|
uint64_t oldHandler = exceptionHandlerTableBody
|
2011-04-09 00:50:22 +00:00
|
|
|
(t, oldTable, oi);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
int handlerStart = resolveIpForwards
|
|
|
|
(context, exceptionHandlerStart(oldHandler),
|
|
|
|
exceptionHandlerEnd(oldHandler));
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (LIKELY(handlerStart >= 0)) {
|
2012-03-11 11:00:08 +00:00
|
|
|
assert(t, handlerStart < static_cast<int>
|
|
|
|
(codeLength(t, methodCode(t, context->method))));
|
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
int handlerEnd = resolveIpBackwards
|
|
|
|
(context, exceptionHandlerEnd(oldHandler),
|
|
|
|
exceptionHandlerStart(oldHandler));
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
assert(t, handlerEnd >= 0);
|
2012-03-11 11:00:08 +00:00
|
|
|
assert(t, handlerEnd < static_cast<int>
|
|
|
|
(codeLength(t, methodCode(t, context->method))));
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
intArrayBody(t, newIndex, ni * 3)
|
|
|
|
= c->machineIp(handlerStart)->value() - start;
|
|
|
|
|
|
|
|
intArrayBody(t, newIndex, (ni * 3) + 1)
|
|
|
|
= c->machineIp(handlerEnd)->value() - start;
|
|
|
|
|
|
|
|
intArrayBody(t, newIndex, (ni * 3) + 2)
|
|
|
|
= c->machineIp(exceptionHandlerIp(oldHandler))->value() - start;
|
|
|
|
|
|
|
|
object type;
|
|
|
|
if (exceptionHandlerCatchType(oldHandler)) {
|
|
|
|
type = resolveClassInPool
|
|
|
|
(t, context->method, exceptionHandlerCatchType(oldHandler) - 1);
|
|
|
|
} else {
|
|
|
|
type = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
set(t, newTable, ArrayBody + ((ni + 1) * BytesPerWord), type);
|
|
|
|
|
|
|
|
++ ni;
|
2008-04-24 22:06:36 +00:00
|
|
|
}
|
2011-04-09 00:50:22 +00:00
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (UNLIKELY(ni < length)) {
|
|
|
|
newIndex = truncateIntArray(t, newIndex, ni * 3);
|
|
|
|
newTable = truncateArray(t, newTable, ni + 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
set(t, newTable, ArrayBody, newIndex);
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
return newTable;
|
|
|
|
} else {
|
|
|
|
return 0;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
object
|
2011-03-27 01:55:23 +00:00
|
|
|
translateLineNumberTable(MyThread* t, Context* context, intptr_t start)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2011-03-27 01:55:23 +00:00
|
|
|
object oldTable = codeLineNumberTable(t, methodCode(t, context->method));
|
2008-01-07 14:51:07 +00:00
|
|
|
if (oldTable) {
|
|
|
|
PROTECT(t, oldTable);
|
|
|
|
|
|
|
|
unsigned length = lineNumberTableLength(t, oldTable);
|
2009-03-04 03:05:48 +00:00
|
|
|
object newTable = makeLineNumberTable(t, length);
|
2011-04-09 00:50:22 +00:00
|
|
|
unsigned ni = 0;
|
|
|
|
for (unsigned oi = 0; oi < length; ++oi) {
|
2011-08-30 01:00:17 +00:00
|
|
|
uint64_t oldLine = lineNumberTableBody(t, oldTable, oi);
|
2011-04-09 00:50:22 +00:00
|
|
|
|
|
|
|
int ip = resolveIpForwards
|
|
|
|
(context, lineNumberIp(oldLine), oi + 1 < length
|
|
|
|
? lineNumberIp(lineNumberTableBody(t, oldTable, oi + 1)) - 1
|
|
|
|
: lineNumberIp(oldLine) + 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (LIKELY(ip >= 0)) {
|
2011-08-30 01:00:17 +00:00
|
|
|
lineNumberTableBody(t, newTable, ni++) = lineNumber
|
|
|
|
(context->compiler->machineIp(ip)->value() - start,
|
|
|
|
lineNumberLine(oldLine));
|
2011-04-09 00:50:22 +00:00
|
|
|
}
|
|
|
|
}
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (UNLIKELY(ni < length)) {
|
|
|
|
newTable = truncateLineNumberTable(t, newTable, ni);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
return newTable;
|
|
|
|
} else {
|
|
|
|
return 0;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-11-25 23:01:30 +00:00
|
|
|
printSet(uintptr_t m, unsigned limit)
|
2008-01-07 16:01:35 +00:00
|
|
|
{
|
2008-11-25 23:01:30 +00:00
|
|
|
if (limit) {
|
|
|
|
for (unsigned i = 0; i < 16; ++i) {
|
|
|
|
if ((m >> i) & 1) {
|
|
|
|
fprintf(stderr, "1");
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "_");
|
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
void
|
|
|
|
calculateTryCatchRoots(Context* context, SubroutinePath* subroutinePath,
|
|
|
|
uintptr_t* roots, unsigned mapSize, unsigned start,
|
|
|
|
unsigned end)
|
|
|
|
{
|
|
|
|
memset(roots, 0xFF, mapSize * BytesPerWord);
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "calculate try/catch roots from %d to %d", start, end);
|
|
|
|
if (subroutinePath) {
|
|
|
|
fprintf(stderr, " ");
|
|
|
|
print(subroutinePath);
|
|
|
|
}
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (TraceElement* te = context->traceLog; te; te = te->next) {
|
|
|
|
if (te->ip >= start and te->ip < end) {
|
|
|
|
uintptr_t* traceRoots = 0;
|
|
|
|
if (subroutinePath == 0) {
|
|
|
|
traceRoots = te->map;
|
|
|
|
te->watch = true;
|
|
|
|
} else {
|
|
|
|
for (SubroutineTrace* t = te->subroutineTrace; t; t = t->next) {
|
|
|
|
if (t->path == subroutinePath) {
|
|
|
|
traceRoots = t->map;
|
|
|
|
t->watch = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (traceRoots) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, " use roots at ip %3d: ", te->ip);
|
|
|
|
printSet(*traceRoots, mapSize);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
|
|
|
roots[wi] &= traceRoots[wi];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, " skip roots at ip %3d\n", te->ip);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "result roots : ");
|
|
|
|
printSet(*roots, mapSize);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-07 16:01:35 +00:00
|
|
|
unsigned
|
2008-01-20 23:03:28 +00:00
|
|
|
calculateFrameMaps(MyThread* t, Context* context, uintptr_t* originalRoots,
|
2011-02-16 21:29:57 +00:00
|
|
|
unsigned eventIndex, SubroutinePath* subroutinePath = 0,
|
|
|
|
uintptr_t* resultRoots = 0)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2008-01-20 23:03:28 +00:00
|
|
|
// for each instruction with more than one predecessor, and for each
|
|
|
|
// stack position, determine if there exists a path to that
|
|
|
|
// instruction such that there is not an object pointer left at that
|
|
|
|
// stack position (i.e. it is uninitialized or contains primitive
|
|
|
|
// data).
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned mapSize = frameMapSizeInWords(t, context->method);
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, roots, mapSize);
|
2008-01-20 23:03:28 +00:00
|
|
|
if (originalRoots) {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(RUNTIME_ARRAY_BODY(roots), originalRoots, mapSize * BytesPerWord);
|
2008-01-20 23:03:28 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
memset(RUNTIME_ARRAY_BODY(roots), 0, mapSize * BytesPerWord);
|
2008-01-20 23:03:28 +00:00
|
|
|
}
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
int32_t ip = -1;
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
// invariant: for each stack position, roots contains a zero at that
|
|
|
|
// position if there exists some path to the current instruction
|
|
|
|
// such that there is definitely not an object pointer at that
|
|
|
|
// position. Otherwise, roots contains a one at that position,
|
|
|
|
// meaning either all known paths result in an object pointer at
|
|
|
|
// that position, or the contents of that position are as yet
|
|
|
|
// unknown.
|
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
unsigned length = context->eventLog.length();
|
|
|
|
while (eventIndex < length) {
|
2008-01-20 23:03:28 +00:00
|
|
|
Event e = static_cast<Event>(context->eventLog.get(eventIndex++));
|
2008-01-07 14:51:07 +00:00
|
|
|
switch (e) {
|
2008-07-05 20:21:13 +00:00
|
|
|
case PushContextEvent: {
|
2009-07-08 14:18:40 +00:00
|
|
|
eventIndex = calculateFrameMaps
|
2011-02-16 21:29:57 +00:00
|
|
|
(t, context, RUNTIME_ARRAY_BODY(roots), eventIndex, subroutinePath,
|
|
|
|
resultRoots);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
case PopContextEvent:
|
2011-02-16 21:29:57 +00:00
|
|
|
goto exit;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
case IpEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
ip = context->eventLog.get2(eventIndex);
|
2008-03-05 21:44:17 +00:00
|
|
|
eventIndex += 2;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2008-01-07 21:32:41 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " roots at ip %3d: ", ip);
|
2009-08-27 00:26:44 +00:00
|
|
|
printSet(*RUNTIME_ARRAY_BODY(roots), mapSize);
|
2008-01-07 21:32:41 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
2009-07-08 14:18:40 +00:00
|
|
|
uintptr_t* tableRoots
|
|
|
|
= (subroutinePath ? subroutinePath->rootTable : context->rootTable)
|
|
|
|
+ (ip * mapSize);
|
2008-01-20 23:03:28 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
if (context->visitTable[ip] > 1) {
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
2009-08-27 00:26:44 +00:00
|
|
|
uintptr_t newRoots = tableRoots[wi] & RUNTIME_ARRAY_BODY(roots)[wi];
|
2008-03-05 21:44:17 +00:00
|
|
|
|
|
|
|
if ((eventIndex == length
|
2008-07-05 20:21:13 +00:00
|
|
|
or context->eventLog.get(eventIndex) == PopContextEvent)
|
2008-03-05 21:44:17 +00:00
|
|
|
and newRoots != tableRoots[wi])
|
|
|
|
{
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "dirty roots!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
context->dirtyRoots = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tableRoots[wi] = newRoots;
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(roots)[wi] &= tableRoots[wi];
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 16:01:35 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " table roots at ip %3d: ", ip);
|
2008-11-25 23:01:30 +00:00
|
|
|
printSet(*tableRoots, mapSize);
|
2008-01-07 16:01:35 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
2008-01-20 23:03:28 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(tableRoots, RUNTIME_ARRAY_BODY(roots), mapSize * BytesPerWord);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case MarkEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
unsigned i = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
markBit(RUNTIME_ARRAY_BODY(roots), i);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case ClearEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
unsigned i = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
2008-03-05 21:44:17 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
clearBit(RUNTIME_ARRAY_BODY(roots), i);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
case PushExceptionHandlerEvent: {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
unsigned start = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
|
|
|
unsigned end = context->eventLog.get2(eventIndex);
|
2009-06-16 19:41:31 +00:00
|
|
|
eventIndex += 2;
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
if (context->subroutineTable and context->subroutineTable[start]) {
|
|
|
|
Subroutine* s = context->subroutineTable[start];
|
2009-07-13 23:49:15 +00:00
|
|
|
unsigned originalEventIndex = eventIndex;
|
|
|
|
|
|
|
|
for (SubroutineCall* c = s->calls; c; c = c->next) {
|
|
|
|
for (SubroutinePath* p = c->paths; p; p = p->listNext) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
calculateTryCatchRoots
|
|
|
|
(context, p, RUNTIME_ARRAY_BODY(roots), mapSize, start, end);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
|
|
|
eventIndex = calculateFrameMaps
|
2009-08-27 00:26:44 +00:00
|
|
|
(t, context, RUNTIME_ARRAY_BODY(roots), originalEventIndex, p);
|
2009-07-13 23:49:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
calculateTryCatchRoots
|
|
|
|
(context, 0, RUNTIME_ARRAY_BODY(roots), mapSize, start, end);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
eventIndex = calculateFrameMaps
|
|
|
|
(t, context, RUNTIME_ARRAY_BODY(roots), eventIndex, 0);
|
2009-07-13 23:49:15 +00:00
|
|
|
}
|
2009-06-16 19:41:31 +00:00
|
|
|
} break;
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
case TraceEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
TraceElement* te; context->eventLog.get(eventIndex, &te, BytesPerWord);
|
2008-11-09 23:56:37 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " trace roots at ip %3d: ", ip);
|
2009-08-27 00:26:44 +00:00
|
|
|
printSet(*RUNTIME_ARRAY_BODY(roots), mapSize);
|
2009-07-13 23:49:15 +00:00
|
|
|
if (subroutinePath) {
|
|
|
|
fprintf(stderr, " ");
|
|
|
|
print(subroutinePath);
|
|
|
|
}
|
2008-11-09 23:56:37 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
|
|
|
|
uintptr_t* map;
|
|
|
|
bool watch;
|
2009-06-26 21:36:04 +00:00
|
|
|
if (subroutinePath == 0) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
map = te->map;
|
|
|
|
watch = te->watch;
|
2009-06-26 21:36:04 +00:00
|
|
|
} else {
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutineTrace* trace = 0;
|
|
|
|
for (SubroutineTrace* t = te->subroutineTrace; t; t = t->next) {
|
|
|
|
if (t->path == subroutinePath) {
|
|
|
|
trace = t;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
break;
|
2009-07-13 23:49:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (trace == 0) {
|
|
|
|
te->subroutineTrace = trace = new
|
|
|
|
(context->zone.allocate
|
|
|
|
(sizeof(SubroutineTrace) + (mapSize * BytesPerWord)))
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
SubroutineTrace(subroutinePath, te->subroutineTrace, mapSize);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
++ te->subroutineTraceCount;
|
|
|
|
}
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
map = trace->map;
|
|
|
|
watch = trace->watch;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
|
|
|
uintptr_t v = RUNTIME_ARRAY_BODY(roots)[wi];
|
|
|
|
|
|
|
|
if (watch and map[wi] != v) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "dirty roots due to trace watch!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
context->dirtyRoots = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
map[wi] = v;
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2008-01-20 23:03:28 +00:00
|
|
|
eventIndex += BytesPerWord;
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
case PushSubroutineEvent: {
|
|
|
|
SubroutineCall* call;
|
|
|
|
context->eventLog.get(eventIndex, &call, BytesPerWord);
|
|
|
|
eventIndex += BytesPerWord;
|
2009-07-08 14:18:40 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned nextIndex = context->eventLog.get2(eventIndex);
|
2009-07-08 14:18:40 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
eventIndex = nextIndex;
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
SubroutinePath* path = 0;
|
|
|
|
for (SubroutinePath* p = call->paths; p; p = p->listNext) {
|
|
|
|
if (p->stackNext == subroutinePath) {
|
|
|
|
path = p;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (path == 0) {
|
|
|
|
path = new (context->zone.allocate(sizeof(SubroutinePath)))
|
|
|
|
SubroutinePath(call, subroutinePath,
|
|
|
|
makeRootTable(t, &(context->zone), context->method));
|
|
|
|
}
|
|
|
|
|
2011-02-16 21:29:57 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, subroutineRoots, mapSize);
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
calculateFrameMaps
|
|
|
|
(t, context, RUNTIME_ARRAY_BODY(roots), call->subroutine->logIndex,
|
2011-02-16 21:29:57 +00:00
|
|
|
path, RUNTIME_ARRAY_BODY(subroutineRoots));
|
|
|
|
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
|
|
|
RUNTIME_ARRAY_BODY(roots)[wi]
|
|
|
|
&= RUNTIME_ARRAY_BODY(subroutineRoots)[wi];
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case PopSubroutineEvent:
|
2011-02-16 21:29:57 +00:00
|
|
|
eventIndex = static_cast<unsigned>(-1);
|
|
|
|
goto exit;
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
default: abort(t);
|
|
|
|
}
|
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2011-02-16 21:29:57 +00:00
|
|
|
exit:
|
|
|
|
if (resultRoots and ip != -1) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "result roots at ip %3d: ", ip);
|
|
|
|
printSet(*RUNTIME_ARRAY_BODY(roots), mapSize);
|
|
|
|
if (subroutinePath) {
|
|
|
|
fprintf(stderr, " ");
|
|
|
|
print(subroutinePath);
|
|
|
|
}
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(resultRoots, RUNTIME_ARRAY_BODY(roots), mapSize * BytesPerWord);
|
|
|
|
}
|
|
|
|
|
2008-01-20 23:03:28 +00:00
|
|
|
return eventIndex;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
int
|
|
|
|
compareTraceElementPointers(const void* va, const void* vb)
|
|
|
|
{
|
|
|
|
TraceElement* a = *static_cast<TraceElement* const*>(va);
|
|
|
|
TraceElement* b = *static_cast<TraceElement* const*>(vb);
|
2008-04-13 19:48:20 +00:00
|
|
|
if (a->address->value() > b->address->value()) {
|
2008-04-07 23:47:41 +00:00
|
|
|
return 1;
|
2008-04-13 19:48:20 +00:00
|
|
|
} else if (a->address->value() < b->address->value()) {
|
2008-04-07 23:47:41 +00:00
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-11 21:00:18 +00:00
|
|
|
unsigned
|
2009-06-26 21:36:04 +00:00
|
|
|
simpleFrameMapTableSize(MyThread* t, object method, object map)
|
2008-04-11 21:00:18 +00:00
|
|
|
{
|
2009-04-27 14:46:43 +00:00
|
|
|
int size = frameMapSizeInBits(t, method);
|
2008-04-11 21:00:18 +00:00
|
|
|
return ceiling(intArrayLength(t, map) * size, 32 + size);
|
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uint8_t*
|
2011-09-24 05:25:52 +00:00
|
|
|
finish(MyThread* t, FixedAllocator* allocator, Assembler* a, const char* name,
|
2010-11-14 02:28:05 +00:00
|
|
|
unsigned length)
|
2008-02-11 17:21:41 +00:00
|
|
|
{
|
2011-09-24 05:25:52 +00:00
|
|
|
uint8_t* start = static_cast<uint8_t*>
|
|
|
|
(allocator->allocate(length, TargetBytesPerWord));
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2011-02-28 06:03:13 +00:00
|
|
|
a->setDestination(start);
|
|
|
|
a->write();
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
logCompile(t, start, length, 0, name, 0);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
return start;
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-12 14:26:36 +00:00
|
|
|
|
2008-11-09 23:56:37 +00:00
|
|
|
void
|
2009-06-26 21:36:04 +00:00
|
|
|
setBit(int32_t* dst, unsigned index)
|
2008-11-09 23:56:37 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
dst[index / 32] |= static_cast<int32_t>(1) << (index % 32);
|
2008-11-09 23:56:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-06-26 21:36:04 +00:00
|
|
|
clearBit(int32_t* dst, unsigned index)
|
2008-11-09 23:56:37 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
dst[index / 32] &= ~(static_cast<int32_t>(1) << (index % 32));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
copyFrameMap(int32_t* dst, uintptr_t* src, unsigned mapSizeInBits,
|
|
|
|
unsigned offset, TraceElement* p,
|
|
|
|
SubroutinePath* subroutinePath)
|
|
|
|
{
|
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " orig roots at ip %3d: ", p->ip);
|
2009-06-26 21:36:04 +00:00
|
|
|
printSet(src[0], ceiling(mapSizeInBits, BitsPerWord));
|
|
|
|
print(subroutinePath);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " final roots at ip %3d: ", p->ip);
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < p->argumentIndex; ++j) {
|
|
|
|
if (getBit(src, j)) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "1");
|
|
|
|
}
|
|
|
|
setBit(dst, offset + j);
|
|
|
|
} else {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "_");
|
|
|
|
}
|
|
|
|
clearBit(dst, offset + j);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
print(subroutinePath);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class FrameMapTableHeader {
|
|
|
|
public:
|
|
|
|
FrameMapTableHeader(unsigned indexCount):
|
|
|
|
indexCount(indexCount)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
unsigned indexCount;
|
|
|
|
};
|
|
|
|
|
|
|
|
class FrameMapTableIndexElement {
|
|
|
|
public:
|
|
|
|
FrameMapTableIndexElement(int offset, unsigned base, unsigned path):
|
|
|
|
offset(offset),
|
|
|
|
base(base),
|
|
|
|
path(path)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
int offset;
|
|
|
|
unsigned base;
|
|
|
|
unsigned path;
|
|
|
|
};
|
|
|
|
|
|
|
|
class FrameMapTablePath {
|
|
|
|
public:
|
|
|
|
FrameMapTablePath(unsigned stackIndex, unsigned elementCount, unsigned next):
|
|
|
|
stackIndex(stackIndex),
|
|
|
|
elementCount(elementCount),
|
|
|
|
next(next)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
unsigned stackIndex;
|
|
|
|
unsigned elementCount;
|
|
|
|
unsigned next;
|
|
|
|
int32_t elements[0];
|
|
|
|
};
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
int
|
|
|
|
compareInt32s(const void* va, const void* vb)
|
|
|
|
{
|
|
|
|
return *static_cast<int32_t const*>(va) - *static_cast<int32_t const*>(vb);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
compare(SubroutinePath* a, SubroutinePath* b)
|
|
|
|
{
|
|
|
|
if (a->stackNext) {
|
|
|
|
int d = compare(a->stackNext, b->stackNext);
|
|
|
|
if (d) return d;
|
|
|
|
}
|
|
|
|
int64_t av = a->call->returnAddress->value();
|
|
|
|
int64_t bv = b->call->returnAddress->value();
|
|
|
|
if (av > bv) {
|
|
|
|
return 1;
|
|
|
|
} else if (av < bv) {
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
compareSubroutineTracePointers(const void* va, const void* vb)
|
|
|
|
{
|
|
|
|
return compare((*static_cast<SubroutineTrace* const*>(va))->path,
|
|
|
|
(*static_cast<SubroutineTrace* const*>(vb))->path);
|
|
|
|
}
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
object
|
|
|
|
makeGeneralFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
2010-09-17 01:43:27 +00:00
|
|
|
TraceElement** elements, unsigned elementCount,
|
|
|
|
unsigned pathFootprint, unsigned mapCount)
|
2009-06-26 21:36:04 +00:00
|
|
|
{
|
|
|
|
unsigned mapSize = frameMapSizeInBits(t, context->method);
|
|
|
|
unsigned indexOffset = sizeof(FrameMapTableHeader);
|
|
|
|
unsigned mapsOffset = indexOffset
|
2010-09-17 01:43:27 +00:00
|
|
|
+ (elementCount * sizeof(FrameMapTableIndexElement));
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned pathsOffset = mapsOffset + (ceiling(mapCount * mapSize, 32) * 4);
|
|
|
|
|
|
|
|
object table = makeByteArray(t, pathsOffset + pathFootprint);
|
|
|
|
|
|
|
|
int8_t* body = &byteArrayBody(t, table, 0);
|
2010-09-17 01:43:27 +00:00
|
|
|
new (body) FrameMapTableHeader(elementCount);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
unsigned nextTableIndex = pathsOffset;
|
|
|
|
unsigned nextMapIndex = 0;
|
2010-09-17 01:43:27 +00:00
|
|
|
for (unsigned i = 0; i < elementCount; ++i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
TraceElement* p = elements[i];
|
|
|
|
unsigned mapBase = nextMapIndex;
|
|
|
|
|
|
|
|
unsigned pathIndex;
|
|
|
|
if (p->subroutineTrace) {
|
|
|
|
FrameMapTablePath* previous = 0;
|
|
|
|
Subroutine* subroutine = p->subroutineTrace->path->call->subroutine;
|
2009-07-13 23:49:15 +00:00
|
|
|
for (Subroutine* s = subroutine; s; s = s->stackNext) {
|
2009-07-08 14:18:40 +00:00
|
|
|
if (s->tableIndex == 0) {
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned pathObjectSize = sizeof(FrameMapTablePath)
|
|
|
|
+ (sizeof(int32_t) * s->callCount);
|
|
|
|
|
|
|
|
assert(t, nextTableIndex + pathObjectSize
|
|
|
|
<= byteArrayLength(t, table));
|
|
|
|
|
|
|
|
s->tableIndex = nextTableIndex;
|
|
|
|
|
|
|
|
nextTableIndex += pathObjectSize;
|
|
|
|
|
|
|
|
FrameMapTablePath* current = new (body + s->tableIndex)
|
|
|
|
FrameMapTablePath
|
2009-07-13 23:49:15 +00:00
|
|
|
(s->stackIndex, s->callCount,
|
|
|
|
s->stackNext ? s->stackNext->tableIndex : 0);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
unsigned i = 0;
|
|
|
|
for (SubroutineCall* c = subroutine->calls; c; c = c->next) {
|
|
|
|
assert(t, i < s->callCount);
|
|
|
|
|
2009-07-08 14:18:40 +00:00
|
|
|
current->elements[i++]
|
2009-06-26 21:36:04 +00:00
|
|
|
= static_cast<intptr_t>(c->returnAddress->value())
|
|
|
|
- reinterpret_cast<intptr_t>(start);
|
|
|
|
}
|
2009-07-13 23:49:15 +00:00
|
|
|
assert(t, i == s->callCount);
|
|
|
|
|
|
|
|
qsort(current->elements, s->callCount, sizeof(int32_t),
|
|
|
|
compareInt32s);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
if (previous) {
|
|
|
|
previous->next = s->tableIndex;
|
|
|
|
}
|
|
|
|
|
|
|
|
previous = current;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pathIndex = subroutine->tableIndex;
|
|
|
|
|
2011-08-30 01:00:17 +00:00
|
|
|
THREAD_RUNTIME_ARRAY
|
|
|
|
(t, SubroutineTrace*, traces, p->subroutineTraceCount);
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
unsigned i = 0;
|
2009-06-26 21:36:04 +00:00
|
|
|
for (SubroutineTrace* trace = p->subroutineTrace;
|
|
|
|
trace; trace = trace->next)
|
|
|
|
{
|
2009-07-13 23:49:15 +00:00
|
|
|
assert(t, i < p->subroutineTraceCount);
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(traces)[i++] = trace;
|
2009-07-13 23:49:15 +00:00
|
|
|
}
|
|
|
|
assert(t, i == p->subroutineTraceCount);
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
qsort(RUNTIME_ARRAY_BODY(traces), p->subroutineTraceCount,
|
|
|
|
sizeof(SubroutineTrace*), compareSubroutineTracePointers);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < p->subroutineTraceCount; ++i) {
|
|
|
|
assert(t, mapsOffset + ceiling(nextMapIndex + mapSize, 32) * 4
|
|
|
|
<= pathsOffset);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
copyFrameMap(reinterpret_cast<int32_t*>(body + mapsOffset),
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(traces)[i]->map, mapSize,
|
|
|
|
nextMapIndex, p, RUNTIME_ARRAY_BODY(traces)[i]->path);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
nextMapIndex += mapSize;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pathIndex = 0;
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
assert(t, mapsOffset + ceiling(nextMapIndex + mapSize, 32) * 4
|
|
|
|
<= pathsOffset);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
copyFrameMap(reinterpret_cast<int32_t*>(body + mapsOffset), p->map,
|
|
|
|
mapSize, nextMapIndex, p, 0);
|
|
|
|
|
|
|
|
nextMapIndex += mapSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned elementIndex = indexOffset
|
|
|
|
+ (i * sizeof(FrameMapTableIndexElement));
|
|
|
|
|
|
|
|
assert(t, elementIndex + sizeof(FrameMapTableIndexElement) <= mapsOffset);
|
|
|
|
|
|
|
|
new (body + elementIndex) FrameMapTableIndexElement
|
|
|
|
(static_cast<intptr_t>(p->address->value())
|
|
|
|
- reinterpret_cast<intptr_t>(start), mapBase, pathIndex);
|
|
|
|
}
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
assert(t, nextMapIndex == mapCount * mapSize);
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
makeSimpleFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
2010-09-17 01:43:27 +00:00
|
|
|
TraceElement** elements, unsigned elementCount)
|
2009-06-26 21:36:04 +00:00
|
|
|
{
|
|
|
|
unsigned mapSize = frameMapSizeInBits(t, context->method);
|
|
|
|
object table = makeIntArray
|
2010-09-17 01:43:27 +00:00
|
|
|
(t, elementCount + ceiling(elementCount * mapSize, 32));
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
assert(t, intArrayLength(t, table) == elementCount
|
2009-06-26 21:36:04 +00:00
|
|
|
+ simpleFrameMapTableSize(t, context->method, table));
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
for (unsigned i = 0; i < elementCount; ++i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
TraceElement* p = elements[i];
|
|
|
|
|
|
|
|
intArrayBody(t, table, i) = static_cast<intptr_t>(p->address->value())
|
|
|
|
- reinterpret_cast<intptr_t>(start);
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
assert(t, elementCount + ceiling((i + 1) * mapSize, 32)
|
2009-06-26 21:36:04 +00:00
|
|
|
<= intArrayLength(t, table));
|
|
|
|
|
2009-06-30 23:35:28 +00:00
|
|
|
if (mapSize) {
|
2010-09-17 01:43:27 +00:00
|
|
|
copyFrameMap(&intArrayBody(t, table, elementCount), p->map,
|
2009-06-30 23:35:28 +00:00
|
|
|
mapSize, i * mapSize, p, 0);
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return table;
|
2008-11-09 23:56:37 +00:00
|
|
|
}
|
2009-10-20 14:20:49 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
void
|
2011-02-28 06:03:13 +00:00
|
|
|
finish(MyThread* t, FixedAllocator* allocator, Context* context)
|
2008-02-11 17:21:41 +00:00
|
|
|
{
|
|
|
|
Compiler* c = context->compiler;
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-10-24 23:18:56 +00:00
|
|
|
if (false) {
|
|
|
|
logCompile
|
|
|
|
(t, 0, 0,
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, context->method), 0)),
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// for debugging:
|
|
|
|
if (false and
|
|
|
|
::strcmp
|
|
|
|
(reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
"java/lang/System") == 0 and
|
2009-10-24 23:18:56 +00:00
|
|
|
::strcmp
|
|
|
|
(reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, context->method), 0)),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
"<clinit>") == 0)
|
2009-10-24 23:18:56 +00:00
|
|
|
{
|
|
|
|
trap();
|
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
// todo: this is a CPU-intensive operation, so consider doing it
|
|
|
|
// earlier before we've acquired the global class lock to improve
|
|
|
|
// parallelism (the downside being that it may end up being a waste
|
|
|
|
// of cycles if another thread compiles the same method in parallel,
|
|
|
|
// which might be mitigated by fine-grained, per-method locking):
|
2011-02-28 06:03:13 +00:00
|
|
|
c->compile(context->leaf ? 0 : stackOverflowThunk(t),
|
2011-09-01 03:18:00 +00:00
|
|
|
TargetThreadStackLimit);
|
2011-02-28 06:03:13 +00:00
|
|
|
|
|
|
|
// we must acquire the class lock here at the latest
|
|
|
|
|
|
|
|
unsigned codeSize = c->resolve
|
2011-08-30 01:00:17 +00:00
|
|
|
(allocator->base + allocator->offset + TargetBytesPerWord);
|
2010-09-17 01:43:27 +00:00
|
|
|
|
2011-09-24 05:25:52 +00:00
|
|
|
unsigned total = pad(codeSize, TargetBytesPerWord)
|
|
|
|
+ pad(c->poolSize(), TargetBytesPerWord) + TargetBytesPerWord;
|
2010-12-27 22:55:23 +00:00
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
target_uintptr_t* code = static_cast<target_uintptr_t*>
|
2011-09-24 05:25:52 +00:00
|
|
|
(allocator->allocate(total, TargetBytesPerWord));
|
2008-11-23 23:58:01 +00:00
|
|
|
code[0] = codeSize;
|
|
|
|
uint8_t* start = reinterpret_cast<uint8_t*>(code + 1);
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
context->executableAllocator = allocator;
|
|
|
|
context->executableStart = code;
|
|
|
|
context->executableSize = total;
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
if (context->objectPool) {
|
|
|
|
object pool = allocate3
|
|
|
|
(t, allocator, Machine::ImmortalAllocation,
|
2011-09-01 03:18:00 +00:00
|
|
|
FixedSizeOfArray + ((context->objectPoolCount + 1) * BytesPerWord),
|
2008-11-29 01:23:01 +00:00
|
|
|
true);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
initArray(t, pool, context->objectPoolCount + 1);
|
2008-11-29 01:23:01 +00:00
|
|
|
mark(t, pool, 0);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
set(t, pool, ArrayBody, root(t, ObjectPools));
|
|
|
|
setRoot(t, ObjectPools, pool);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
|
|
|
unsigned i = 1;
|
|
|
|
for (PoolElement* p = context->objectPool; p; p = p->next) {
|
2011-09-01 03:18:00 +00:00
|
|
|
unsigned offset = ArrayBody + ((i++) * BytesPerWord);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
p->address = reinterpret_cast<uintptr_t>(pool) + offset;
|
|
|
|
|
|
|
|
set(t, pool, offset, p->target);
|
|
|
|
}
|
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2011-02-28 06:03:13 +00:00
|
|
|
c->write();
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
if (bc) {
|
|
|
|
for (DelayedPromise* p = bc->addresses;
|
|
|
|
p != bc->addressSentinal;
|
|
|
|
p = p->next)
|
|
|
|
{
|
|
|
|
p->basis = new (bc->zone->allocate(sizeof(ResolvedPromise)))
|
|
|
|
ResolvedPromise(p->basis->value());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
{ object newExceptionHandlerTable = translateExceptionHandlerTable
|
2011-03-27 01:55:23 +00:00
|
|
|
(t, context, reinterpret_cast<intptr_t>(start));
|
2010-12-27 22:55:23 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
PROTECT(t, newExceptionHandlerTable);
|
2007-10-08 21:41:41 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
object newLineNumberTable = translateLineNumberTable
|
2011-03-27 01:55:23 +00:00
|
|
|
(t, context, reinterpret_cast<intptr_t>(start));
|
2007-10-08 21:41:41 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
object code = methodCode(t, context->method);
|
2008-04-11 21:00:18 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
code = makeCode
|
2011-01-30 21:14:57 +00:00
|
|
|
(t, 0, newExceptionHandlerTable, newLineNumberTable,
|
2010-09-17 01:43:27 +00:00
|
|
|
reinterpret_cast<uintptr_t>(start), codeMaxStack(t, code),
|
|
|
|
codeMaxLocals(t, code), 0);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
set(t, context->method, MethodCode, code);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
if (context->traceLogCount) {
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, TraceElement*, elements, context->traceLogCount);
|
2008-04-13 19:48:20 +00:00
|
|
|
unsigned index = 0;
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned pathFootprint = 0;
|
|
|
|
unsigned mapCount = 0;
|
2008-04-13 19:48:20 +00:00
|
|
|
for (TraceElement* p = context->traceLog; p; p = p->next) {
|
|
|
|
assert(t, index < context->traceLogCount);
|
2008-04-11 21:00:18 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
if (p->address) {
|
|
|
|
SubroutineTrace* trace = p->subroutineTrace;
|
|
|
|
unsigned myMapCount = 1;
|
|
|
|
if (trace) {
|
|
|
|
for (Subroutine* s = trace->path->call->subroutine;
|
|
|
|
s; s = s->stackNext)
|
|
|
|
{
|
|
|
|
unsigned callCount = s->callCount;
|
|
|
|
myMapCount *= callCount;
|
|
|
|
if (not s->visited) {
|
|
|
|
s->visited = true;
|
|
|
|
pathFootprint += sizeof(FrameMapTablePath)
|
|
|
|
+ (sizeof(int32_t) * callCount);
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
}
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
mapCount += myMapCount;
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
RUNTIME_ARRAY_BODY(elements)[index++] = p;
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
if (p->target) {
|
|
|
|
insertCallNode
|
|
|
|
(t, makeCallNode
|
|
|
|
(t, p->address->value(), p->target, p->flags, 0));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-04-13 19:48:20 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
qsort(RUNTIME_ARRAY_BODY(elements), index,
|
2009-08-27 00:26:44 +00:00
|
|
|
sizeof(TraceElement*), compareTraceElementPointers);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
object map;
|
|
|
|
if (pathFootprint) {
|
|
|
|
map = makeGeneralFrameMapTable
|
2010-09-17 01:43:27 +00:00
|
|
|
(t, context, start, RUNTIME_ARRAY_BODY(elements), index, pathFootprint,
|
2009-08-27 00:26:44 +00:00
|
|
|
mapCount);
|
2009-06-26 21:36:04 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
map = makeSimpleFrameMapTable
|
2010-09-17 01:43:27 +00:00
|
|
|
(t, context, start, RUNTIME_ARRAY_BODY(elements), index);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
set(t, methodCode(t, context->method), CodePool, map);
|
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2008-11-11 15:20:49 +00:00
|
|
|
logCompile
|
|
|
|
(t, start, codeSize,
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, context->method), 0)),
|
|
|
|
reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
|
2008-02-11 17:21:41 +00:00
|
|
|
|
|
|
|
// for debugging:
|
2009-10-04 22:10:36 +00:00
|
|
|
if (false and
|
2009-08-27 00:26:44 +00:00
|
|
|
::strcmp
|
2008-02-11 17:21:41 +00:00
|
|
|
(reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, context->method)), 0)),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
"java/lang/System") == 0 and
|
2009-08-27 00:26:44 +00:00
|
|
|
::strcmp
|
2008-02-11 17:21:41 +00:00
|
|
|
(reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, context->method), 0)),
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
"<clinit>") == 0)
|
2008-02-11 17:21:41 +00:00
|
|
|
{
|
2009-03-03 01:40:06 +00:00
|
|
|
trap();
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2009-03-09 14:26:23 +00:00
|
|
|
syncInstructionCache(start, codeSize);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
void
|
|
|
|
compile(MyThread* t, Context* context)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-02-11 17:21:41 +00:00
|
|
|
Compiler* c = context->compiler;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-01-08 21:23:49 +00:00
|
|
|
// fprintf(stderr, "compiling %s.%s%s\n",
|
|
|
|
// &byteArrayBody(t, className(t, methodClass(t, context->method)), 0),
|
|
|
|
// &byteArrayBody(t, methodName(t, context->method), 0),
|
|
|
|
// &byteArrayBody(t, methodSpec(t, context->method), 0));
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
unsigned footprint = methodParameterFootprint(t, context->method);
|
2008-01-20 18:55:08 +00:00
|
|
|
unsigned locals = localSize(t, context->method);
|
2008-08-23 18:04:36 +00:00
|
|
|
c->init(codeLength(t, methodCode(t, context->method)), footprint, locals,
|
2008-09-28 19:00:52 +00:00
|
|
|
alignedFrameSize(t, context->method));
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uint8_t, stackMap,
|
2009-08-27 00:26:44 +00:00
|
|
|
codeMaxStack(t, methodCode(t, context->method)));
|
|
|
|
Frame frame(context, RUNTIME_ARRAY_BODY(stackMap));
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-05-15 02:08:01 +00:00
|
|
|
unsigned index = methodParameterFootprint(t, context->method);
|
2008-01-07 16:01:35 +00:00
|
|
|
if ((methodFlags(t, context->method) & ACC_STATIC) == 0) {
|
2009-05-15 02:08:01 +00:00
|
|
|
frame.set(--index, Frame::Object);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(1, index, Compiler::ObjectType);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 16:01:35 +00:00
|
|
|
for (MethodSpecIterator it
|
|
|
|
(t, reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, context->method), 0)));
|
|
|
|
it.hasNext();)
|
|
|
|
{
|
2008-01-07 14:51:07 +00:00
|
|
|
switch (*it.next()) {
|
|
|
|
case 'L':
|
|
|
|
case '[':
|
2009-05-15 02:08:01 +00:00
|
|
|
frame.set(--index, Frame::Object);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(1, index, Compiler::ObjectType);
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'J':
|
2009-08-10 19:20:23 +00:00
|
|
|
frame.set(--index, Frame::Long);
|
|
|
|
frame.set(--index, Frame::Long);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(2, index, Compiler::IntegerType);
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-11-30 15:08:45 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
case 'D':
|
2009-05-15 02:08:01 +00:00
|
|
|
frame.set(--index, Frame::Long);
|
|
|
|
frame.set(--index, Frame::Long);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(2, index, Compiler::FloatType);
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
2009-08-10 19:20:23 +00:00
|
|
|
|
|
|
|
case 'F':
|
|
|
|
frame.set(--index, Frame::Integer);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(1, index, Compiler::FloatType);
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
default:
|
2009-05-15 02:08:01 +00:00
|
|
|
frame.set(--index, Frame::Integer);
|
2009-09-20 21:43:32 +00:00
|
|
|
c->initLocal(1, index, Compiler::IntegerType);
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-09 23:56:37 +00:00
|
|
|
handleEntrance(t, &frame);
|
|
|
|
|
2008-09-13 21:09:26 +00:00
|
|
|
Compiler::State* state = c->saveState();
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
compile(t, &frame, 0);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
context->dirtyRoots = false;
|
2009-04-27 01:53:42 +00:00
|
|
|
unsigned eventIndex = calculateFrameMaps(t, context, 0, 0);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
object eht = codeExceptionHandlerTable(t, methodCode(t, context->method));
|
2007-12-09 22:45:43 +00:00
|
|
|
if (eht) {
|
|
|
|
PROTECT(t, eht);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-01-26 00:17:27 +00:00
|
|
|
unsigned visitCount = exceptionHandlerTableLength(t, eht);
|
2009-03-08 01:23:28 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, visited, visitCount);
|
2009-08-27 00:26:44 +00:00
|
|
|
memset(RUNTIME_ARRAY_BODY(visited), 0, visitCount * sizeof(bool));
|
2008-01-26 00:17:27 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
bool progress = true;
|
|
|
|
while (progress) {
|
|
|
|
progress = false;
|
2008-01-26 00:17:27 +00:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < exceptionHandlerTableLength(t, eht); ++i) {
|
2011-08-30 01:00:17 +00:00
|
|
|
uint64_t eh = exceptionHandlerTableBody(t, eht, i);
|
2011-04-09 00:50:22 +00:00
|
|
|
int start = resolveIpForwards
|
|
|
|
(context, exceptionHandlerStart(eh), exceptionHandlerEnd(eh));
|
2008-01-26 00:17:27 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
if ((not RUNTIME_ARRAY_BODY(visited)[i])
|
2011-04-09 00:50:22 +00:00
|
|
|
and start >= 0
|
2009-08-27 00:26:44 +00:00
|
|
|
and context->visitTable[start])
|
|
|
|
{
|
|
|
|
RUNTIME_ARRAY_BODY(visited)[i] = true;
|
2008-01-26 00:17:27 +00:00
|
|
|
progress = true;
|
|
|
|
|
2011-04-09 18:44:28 +00:00
|
|
|
c->restoreState(state);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2011-04-09 18:44:28 +00:00
|
|
|
THREAD_RUNTIME_ARRAY
|
|
|
|
(t, uint8_t, stackMap,
|
|
|
|
codeMaxStack(t, methodCode(t, context->method)));
|
|
|
|
Frame frame2(&frame, RUNTIME_ARRAY_BODY(stackMap));
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 18:44:28 +00:00
|
|
|
unsigned end = exceptionHandlerEnd(eh);
|
2011-08-30 01:00:17 +00:00
|
|
|
if (exceptionHandlerIp(eh) >= static_cast<unsigned>(start)
|
2011-04-09 18:44:28 +00:00
|
|
|
and exceptionHandlerIp(eh) < end)
|
|
|
|
{
|
|
|
|
end = exceptionHandlerIp(eh);
|
|
|
|
}
|
2008-01-20 23:03:28 +00:00
|
|
|
|
2011-04-09 18:44:28 +00:00
|
|
|
context->eventLog.append(PushExceptionHandlerEvent);
|
|
|
|
context->eventLog.append2(start);
|
|
|
|
context->eventLog.append2(end);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2011-04-09 18:44:28 +00:00
|
|
|
for (unsigned i = 1;
|
|
|
|
i < codeMaxStack(t, methodCode(t, context->method));
|
|
|
|
++i)
|
|
|
|
{
|
|
|
|
frame2.set(localSize(t, context->method) + i, Frame::Integer);
|
2011-04-09 00:50:22 +00:00
|
|
|
}
|
2011-04-09 18:44:28 +00:00
|
|
|
|
|
|
|
compile(t, &frame2, exceptionHandlerIp(eh), start);
|
|
|
|
|
|
|
|
context->eventLog.append(PopContextEvent);
|
|
|
|
|
|
|
|
eventIndex = calculateFrameMaps(t, context, 0, eventIndex);
|
2008-01-26 00:17:27 +00:00
|
|
|
}
|
2008-01-20 23:03:28 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
while (context->dirtyRoots) {
|
|
|
|
context->dirtyRoots = false;
|
2009-04-27 01:53:42 +00:00
|
|
|
calculateFrameMaps(t, context, 0, 0);
|
2008-03-05 21:44:17 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-11-29 23:08:14 +00:00
|
|
|
void
|
2009-10-18 00:18:03 +00:00
|
|
|
updateCall(MyThread* t, UnaryOperation op, void* returnAddress, void* target)
|
2008-11-29 23:08:14 +00:00
|
|
|
{
|
2009-10-18 00:18:03 +00:00
|
|
|
t->arch->updateCall(op, returnAddress, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void*
|
2011-09-20 22:30:30 +00:00
|
|
|
compileMethod2(MyThread* t, void* ip);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
uint64_t
|
2007-12-16 22:41:07 +00:00
|
|
|
compileMethod(MyThread* t)
|
|
|
|
{
|
2009-04-07 00:34:12 +00:00
|
|
|
void* ip;
|
2009-03-31 20:15:08 +00:00
|
|
|
if (t->tailAddress) {
|
|
|
|
ip = t->tailAddress;
|
|
|
|
t->tailAddress = 0;
|
|
|
|
} else {
|
2011-02-20 20:31:29 +00:00
|
|
|
ip = getIp(t);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(compileMethod2(t, ip));
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
void*
|
|
|
|
compileVirtualMethod2(MyThread* t, object class_, unsigned index)
|
|
|
|
{
|
2009-04-27 01:53:42 +00:00
|
|
|
// If class_ has BootstrapFlag set, that means its vtable is not yet
|
|
|
|
// available. However, we must set t->trace->targetMethod to an
|
|
|
|
// appropriate method to ensure we can accurately scan the stack for
|
|
|
|
// GC roots. We find such a method by looking for a superclass with
|
|
|
|
// a vtable and using it instead:
|
|
|
|
|
|
|
|
object c = class_;
|
|
|
|
while (classVmFlags(t, c) & BootstrapFlag) {
|
|
|
|
c = classSuper(t, c);
|
|
|
|
}
|
|
|
|
t->trace->targetMethod = arrayBody(t, classVirtualTable(t, c), index);
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RESOURCE0(t, static_cast<MyThread*>(t)->trace->targetMethod = 0;);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
PROTECT(t, class_);
|
|
|
|
|
|
|
|
object target = resolveTarget(t, class_, index);
|
2009-04-27 14:46:43 +00:00
|
|
|
PROTECT(t, target);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
compile(t, codeAllocator(t), 0, target);
|
2009-04-27 01:53:42 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
void* address = reinterpret_cast<void*>(methodAddress(t, target));
|
|
|
|
if (methodFlags(t, target) & ACC_NATIVE) {
|
|
|
|
t->trace->nativeMethod = target;
|
2009-04-05 21:42:10 +00:00
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
classVtable(t, class_, methodOffset(t, target)) = address;
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
2010-12-27 22:55:23 +00:00
|
|
|
return address;
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
compileVirtualMethod(MyThread* t)
|
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
object class_ = objectClass(t, static_cast<object>(t->virtualCallTarget));
|
|
|
|
t->virtualCallTarget = 0;
|
2009-04-05 21:42:10 +00:00
|
|
|
|
|
|
|
unsigned index = t->virtualCallIndex;
|
|
|
|
t->virtualCallIndex = 0;
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(compileVirtualMethod2(t, class_, index));
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2009-05-05 01:04:17 +00:00
|
|
|
uint64_t
|
2010-09-10 21:05:29 +00:00
|
|
|
invokeNativeFast(MyThread* t, object method, void* function)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2010-11-12 23:53:16 +00:00
|
|
|
FastNativeFunction f; memcpy(&f, &function, sizeof(void*));
|
|
|
|
return f(t, method,
|
|
|
|
static_cast<uintptr_t*>(t->stack)
|
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->frameReturnAddressSize());
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint64_t
|
2010-09-10 21:05:29 +00:00
|
|
|
invokeNativeSlow(MyThread* t, object method, void* function)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, method);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned footprint = methodParameterFootprint(t, method) + 1;
|
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
++ footprint;
|
|
|
|
}
|
2008-01-03 19:49:07 +00:00
|
|
|
unsigned count = methodParameterCount(t, method) + 2;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, args, footprint);
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned argOffset = 0;
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uint8_t, types, count);
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned typeOffset = 0;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = reinterpret_cast<uintptr_t>(t);
|
|
|
|
RUNTIME_ARRAY_BODY(types)[typeOffset++] = POINTER_TYPE;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uintptr_t* sp = static_cast<uintptr_t*>(t->stack)
|
2009-05-03 20:57:11 +00:00
|
|
|
+ t->arch->frameFooterSize()
|
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2010-09-01 16:13:52 +00:00
|
|
|
object jclass = 0;
|
|
|
|
PROTECT(t, jclass);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
2010-09-01 16:13:52 +00:00
|
|
|
jclass = getJClass(t, methodClass(t, method));
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++]
|
2010-09-01 16:13:52 +00:00
|
|
|
= reinterpret_cast<uintptr_t>(&jclass);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++]
|
|
|
|
= reinterpret_cast<uintptr_t>(sp++);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(types)[typeOffset++] = POINTER_TYPE;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MethodSpecIterator it
|
|
|
|
(t, reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0)));
|
|
|
|
|
|
|
|
while (it.hasNext()) {
|
2009-08-27 00:26:44 +00:00
|
|
|
unsigned type = RUNTIME_ARRAY_BODY(types)[typeOffset++]
|
2007-12-09 22:45:43 +00:00
|
|
|
= fieldType(t, fieldCode(t, *it.next()));
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (type) {
|
|
|
|
case INT8_TYPE:
|
|
|
|
case INT16_TYPE:
|
|
|
|
case INT32_TYPE:
|
|
|
|
case FLOAT_TYPE:
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = *(sp++);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case INT64_TYPE:
|
|
|
|
case DOUBLE_TYPE: {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(RUNTIME_ARRAY_BODY(args) + argOffset, sp, 8);
|
2007-12-18 02:09:32 +00:00
|
|
|
argOffset += (8 / BytesPerWord);
|
2009-05-03 20:57:11 +00:00
|
|
|
sp += 2;
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case POINTER_TYPE: {
|
2007-12-20 00:02:32 +00:00
|
|
|
if (*sp) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++]
|
|
|
|
= reinterpret_cast<uintptr_t>(sp);
|
2007-12-20 00:02:32 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = 0;
|
2007-12-20 00:02:32 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
++ sp;
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default: abort(t);
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-23 18:09:41 +00:00
|
|
|
unsigned returnCode = methodReturnCode(t, method);
|
|
|
|
unsigned returnType = fieldType(t, returnCode);
|
2007-12-09 22:45:43 +00:00
|
|
|
uint64_t result;
|
2008-01-20 22:05:59 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (DebugNatives) {
|
2007-12-09 22:45:43 +00:00
|
|
|
fprintf(stderr, "invoke native method %s.%s\n",
|
|
|
|
&byteArrayBody(t, className(t, methodClass(t, method)), 0),
|
|
|
|
&byteArrayBody(t, methodName(t, method), 0));
|
|
|
|
}
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2008-01-11 17:49:11 +00:00
|
|
|
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
|
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
acquire(t, methodClass(t, method));
|
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
acquire(t, *reinterpret_cast<object*>(RUNTIME_ARRAY_BODY(args)[1]));
|
2008-01-11 17:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-01 21:17:54 +00:00
|
|
|
Reference* reference = t->reference;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
{ ENTER(t, Thread::IdleState);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
bool noThrow = t->checkpoint->noThrow;
|
|
|
|
t->checkpoint->noThrow = true;
|
|
|
|
THREAD_RESOURCE(t, bool, noThrow, t->checkpoint->noThrow = noThrow);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
result = t->m->system->call
|
|
|
|
(function,
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args),
|
|
|
|
RUNTIME_ARRAY_BODY(types),
|
2008-01-03 19:49:07 +00:00
|
|
|
count,
|
2007-12-09 22:45:43 +00:00
|
|
|
footprint * BytesPerWord,
|
|
|
|
returnType);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-01-11 17:49:11 +00:00
|
|
|
if (methodFlags(t, method) & ACC_SYNCHRONIZED) {
|
|
|
|
if (methodFlags(t, method) & ACC_STATIC) {
|
|
|
|
release(t, methodClass(t, method));
|
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
release(t, *reinterpret_cast<object*>(RUNTIME_ARRAY_BODY(args)[1]));
|
2008-01-11 17:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (DebugNatives) {
|
2007-12-09 22:45:43 +00:00
|
|
|
fprintf(stderr, "return from native method %s.%s\n",
|
|
|
|
&byteArrayBody(t, className(t, methodClass(t, method)), 0),
|
|
|
|
&byteArrayBody(t, methodName(t, method), 0));
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
if (UNLIKELY(t->exception)) {
|
|
|
|
object exception = t->exception;
|
|
|
|
t->exception = 0;
|
|
|
|
vm::throw_(t, exception);
|
|
|
|
}
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
switch (returnCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
result = static_cast<int8_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case CharField:
|
|
|
|
result = static_cast<uint16_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case ShortField:
|
|
|
|
result = static_cast<int16_t>(result);
|
|
|
|
break;
|
2007-12-18 02:09:32 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
result = static_cast<int32_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case LongField:
|
|
|
|
case DoubleField:
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case ObjectField:
|
|
|
|
result = static_cast<uintptr_t>(result) ? *reinterpret_cast<uintptr_t*>
|
|
|
|
(static_cast<uintptr_t>(result)) : 0;
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case VoidField:
|
2008-02-01 21:17:54 +00:00
|
|
|
result = 0;
|
2010-12-27 22:55:23 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default: abort(t);
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
2008-02-01 21:17:54 +00:00
|
|
|
|
|
|
|
while (t->reference != reference) {
|
|
|
|
dispose(t, t->reference);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
uint64_t
|
|
|
|
invokeNative2(MyThread* t, object method)
|
|
|
|
{
|
2010-11-26 19:41:31 +00:00
|
|
|
object native = methodRuntimeDataNative(t, getMethodRuntimeData(t, method));
|
2010-09-10 21:05:29 +00:00
|
|
|
if (nativeFast(t, native)) {
|
|
|
|
return invokeNativeFast(t, method, nativeFunction(t, native));
|
2009-05-03 20:57:11 +00:00
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
return invokeNativeSlow(t, method, nativeFunction(t, native));
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
uint64_t
|
2007-12-09 22:45:43 +00:00
|
|
|
invokeNative(MyThread* t)
|
|
|
|
{
|
2008-04-07 23:47:41 +00:00
|
|
|
if (t->trace->nativeMethod == 0) {
|
2009-04-07 00:34:12 +00:00
|
|
|
void* ip;
|
2009-03-31 20:15:08 +00:00
|
|
|
if (t->tailAddress) {
|
|
|
|
ip = t->tailAddress;
|
|
|
|
t->tailAddress = 0;
|
|
|
|
} else {
|
2011-02-20 20:31:29 +00:00
|
|
|
ip = getIp(t);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
object node = findCallNode(t, ip);
|
2008-04-23 16:33:31 +00:00
|
|
|
object target = callNodeTarget(t, node);
|
2009-03-31 20:15:08 +00:00
|
|
|
if (callNodeFlags(t, node) & TraceElement::VirtualCall) {
|
2008-04-23 16:33:31 +00:00
|
|
|
target = resolveTarget(t, t->stack, target);
|
2008-04-01 17:37:59 +00:00
|
|
|
}
|
2008-04-23 16:33:31 +00:00
|
|
|
t->trace->nativeMethod = target;
|
2008-01-11 22:16:24 +00:00
|
|
|
}
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2009-03-31 20:15:08 +00:00
|
|
|
assert(t, t->tailAddress == 0);
|
|
|
|
|
2007-12-17 22:38:59 +00:00
|
|
|
uint64_t result = 0;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
t->trace->targetMethod = t->trace->nativeMethod;
|
|
|
|
|
2011-04-10 17:26:44 +00:00
|
|
|
t->m->classpath->resolveNative(t, t->trace->nativeMethod);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
result = invokeNative2(t, t->trace->nativeMethod);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-05-17 00:39:08 +00:00
|
|
|
unsigned parameterFootprint = methodParameterFootprint
|
|
|
|
(t, t->trace->targetMethod);
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
uintptr_t* stack = static_cast<uintptr_t*>(t->stack);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
if (TailCalls
|
|
|
|
and t->arch->argumentFootprint(parameterFootprint)
|
|
|
|
> t->arch->stackAlignmentInWords())
|
|
|
|
{
|
|
|
|
stack += t->arch->argumentFootprint(parameterFootprint)
|
|
|
|
- t->arch->stackAlignmentInWords();
|
|
|
|
}
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
stack += t->arch->frameReturnAddressSize();
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2011-02-20 20:31:29 +00:00
|
|
|
transition(t, getIp(t), stack, t->continuation, t->trace);
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2011-07-14 00:06:02 +00:00
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return result;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-04-10 23:48:28 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
void
|
|
|
|
findFrameMapInSimpleTable(MyThread* t, object method, object table,
|
|
|
|
int32_t offset, int32_t** map, unsigned* start)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned tableSize = simpleFrameMapTableSize(t, method, table);
|
|
|
|
unsigned indexSize = intArrayLength(t, table) - tableSize;
|
|
|
|
|
|
|
|
*map = &intArrayBody(t, table, indexSize);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
|
|
|
unsigned bottom = 0;
|
|
|
|
unsigned top = indexSize;
|
|
|
|
for (unsigned span = top - bottom; span; span = top - bottom) {
|
|
|
|
unsigned middle = bottom + (span / 2);
|
2009-06-26 21:36:04 +00:00
|
|
|
int32_t v = intArrayBody(t, table, middle);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
|
|
|
if (offset == v) {
|
2009-06-26 21:36:04 +00:00
|
|
|
*start = frameMapSizeInBits(t, method) * middle;
|
|
|
|
return;
|
2008-04-07 23:47:41 +00:00
|
|
|
} else if (offset < v) {
|
|
|
|
top = middle;
|
|
|
|
} else {
|
|
|
|
bottom = middle + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
abort(t);
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned
|
|
|
|
findFrameMap(MyThread* t, void* stack, object method, object table,
|
|
|
|
unsigned pathIndex)
|
|
|
|
{
|
|
|
|
if (pathIndex) {
|
|
|
|
FrameMapTablePath* path = reinterpret_cast<FrameMapTablePath*>
|
|
|
|
(&byteArrayBody(t, table, pathIndex));
|
|
|
|
|
|
|
|
void* address = static_cast<void**>(stack)[path->stackIndex];
|
|
|
|
uint8_t* base = reinterpret_cast<uint8_t*>(methodAddress(t, method));
|
|
|
|
for (unsigned i = 0; i < path->elementCount; ++i) {
|
|
|
|
if (address == base + path->elements[i]) {
|
|
|
|
return i + (path->elementCount * findFrameMap
|
|
|
|
(t, stack, method, table, path->next));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
abort(t);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
findFrameMapInGeneralTable(MyThread* t, void* stack, object method,
|
|
|
|
object table, int32_t offset, int32_t** map,
|
|
|
|
unsigned* start)
|
|
|
|
{
|
|
|
|
FrameMapTableHeader* header = reinterpret_cast<FrameMapTableHeader*>
|
|
|
|
(&byteArrayBody(t, table, 0));
|
|
|
|
|
|
|
|
FrameMapTableIndexElement* index
|
|
|
|
= reinterpret_cast<FrameMapTableIndexElement*>
|
|
|
|
(&byteArrayBody(t, table, sizeof(FrameMapTableHeader)));
|
|
|
|
|
|
|
|
*map = reinterpret_cast<int32_t*>(index + header->indexCount);
|
|
|
|
|
|
|
|
unsigned bottom = 0;
|
|
|
|
unsigned top = header->indexCount;
|
|
|
|
for (unsigned span = top - bottom; span; span = top - bottom) {
|
|
|
|
unsigned middle = bottom + (span / 2);
|
|
|
|
FrameMapTableIndexElement* v = index + middle;
|
|
|
|
|
|
|
|
if (offset == v->offset) {
|
2009-07-08 14:18:40 +00:00
|
|
|
*start = v->base + (findFrameMap(t, stack, method, table, v->path)
|
2009-07-13 23:49:15 +00:00
|
|
|
* frameMapSizeInBits(t, method));
|
2009-07-08 14:18:40 +00:00
|
|
|
return;
|
2009-06-26 21:36:04 +00:00
|
|
|
} else if (offset < v->offset) {
|
|
|
|
top = middle;
|
|
|
|
} else {
|
|
|
|
bottom = middle + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
findFrameMap(MyThread* t, void* stack, object method, int32_t offset,
|
|
|
|
int32_t** map, unsigned* start)
|
|
|
|
{
|
|
|
|
object table = codePool(t, methodCode(t, method));
|
2010-09-14 16:49:41 +00:00
|
|
|
if (objectClass(t, table) == type(t, Machine::IntArrayType)) {
|
2009-06-26 21:36:04 +00:00
|
|
|
findFrameMapInSimpleTable(t, method, table, offset, map, start);
|
|
|
|
} else {
|
|
|
|
findFrameMapInGeneralTable(t, stack, method, table, offset, map, start);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
void
|
2009-04-26 21:55:35 +00:00
|
|
|
visitStackAndLocals(MyThread* t, Heap::Visitor* v, void* frame, object method,
|
2009-04-27 01:53:42 +00:00
|
|
|
void* ip)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2009-04-27 14:46:43 +00:00
|
|
|
unsigned count = frameMapSizeInBits(t, method);
|
2009-04-26 21:55:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (count) {
|
2009-04-26 21:55:35 +00:00
|
|
|
void* stack = stackForFrame(t, frame, method);
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
int32_t* map;
|
|
|
|
unsigned offset;
|
|
|
|
findFrameMap
|
|
|
|
(t, stack, method, difference
|
|
|
|
(ip, reinterpret_cast<void*>(methodAddress(t, method))), &map, &offset);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
int j = offset + i;
|
|
|
|
if (map[j / 32] & (static_cast<int32_t>(1) << (j % 32))) {
|
2008-08-18 15:23:01 +00:00
|
|
|
v->visit(localObject(t, stack, method, i));
|
2007-10-17 01:21:35 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
void
|
2009-05-15 02:08:01 +00:00
|
|
|
visitArgument(MyThread* t, Heap::Visitor* v, void* stack, unsigned index)
|
2009-04-27 01:53:42 +00:00
|
|
|
{
|
|
|
|
v->visit(static_cast<object*>(stack)
|
2009-05-15 02:08:01 +00:00
|
|
|
+ index
|
2009-04-27 01:53:42 +00:00
|
|
|
+ t->arch->frameReturnAddressSize()
|
|
|
|
+ t->arch->frameFooterSize());
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
visitArguments(MyThread* t, Heap::Visitor* v, void* stack, object method)
|
|
|
|
{
|
|
|
|
unsigned index = 0;
|
|
|
|
|
|
|
|
if ((methodFlags(t, method) & ACC_STATIC) == 0) {
|
2009-05-15 02:08:01 +00:00
|
|
|
visitArgument(t, v, stack, index++);
|
2009-04-27 01:53:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (MethodSpecIterator it
|
|
|
|
(t, reinterpret_cast<const char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0)));
|
|
|
|
it.hasNext();)
|
|
|
|
{
|
|
|
|
switch (*it.next()) {
|
|
|
|
case 'L':
|
|
|
|
case '[':
|
2009-05-15 02:08:01 +00:00
|
|
|
visitArgument(t, v, stack, index++);
|
2009-04-27 01:53:42 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'J':
|
|
|
|
case 'D':
|
|
|
|
index += 2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
++ index;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-26 23:23:03 +00:00
|
|
|
void
|
2007-12-09 22:45:43 +00:00
|
|
|
visitStack(MyThread* t, Heap::Visitor* v)
|
2007-09-26 23:23:03 +00:00
|
|
|
{
|
2011-02-20 03:33:26 +00:00
|
|
|
void* ip = getIp(t);
|
2008-08-17 19:32:40 +00:00
|
|
|
void* stack = t->stack;
|
2007-12-30 22:24:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread::CallTrace* trace = t->trace;
|
2009-04-27 01:53:42 +00:00
|
|
|
object targetMethod = (trace ? trace->targetMethod : 0);
|
2011-01-26 00:22:43 +00:00
|
|
|
object target = targetMethod;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-16 22:41:07 +00:00
|
|
|
while (stack) {
|
2009-05-17 23:43:48 +00:00
|
|
|
if (targetMethod) {
|
|
|
|
visitArguments(t, v, stack, targetMethod);
|
|
|
|
targetMethod = 0;
|
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object method = methodForIp(t, ip);
|
|
|
|
if (method) {
|
|
|
|
PROTECT(t, method);
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
void* nextIp = ip;
|
|
|
|
nextFrame(t, &nextIp, &stack, method, target);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
visitStackAndLocals(t, v, stack, method, ip);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
ip = nextIp;
|
|
|
|
|
|
|
|
target = method;
|
2007-12-09 22:45:43 +00:00
|
|
|
} else if (trace) {
|
2008-08-17 19:32:40 +00:00
|
|
|
stack = trace->stack;
|
2011-02-20 20:31:29 +00:00
|
|
|
ip = trace->ip;
|
2007-12-09 22:45:43 +00:00
|
|
|
trace = trace->next;
|
2009-04-27 01:53:42 +00:00
|
|
|
|
|
|
|
if (trace) {
|
|
|
|
targetMethod = trace->targetMethod;
|
2011-01-26 00:22:43 +00:00
|
|
|
target = targetMethod;
|
|
|
|
} else {
|
|
|
|
target = 0;
|
2009-04-27 01:53:42 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2008-07-12 00:11:13 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
void
|
|
|
|
walkContinuationBody(MyThread* t, Heap::Walker* w, object c, int start)
|
|
|
|
{
|
2009-05-17 23:43:48 +00:00
|
|
|
const int BodyOffset = ContinuationBody / BytesPerWord;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-05 01:04:17 +00:00
|
|
|
object method = static_cast<object>
|
|
|
|
(t->m->heap->follow(continuationMethod(t, c)));
|
2009-05-17 23:43:48 +00:00
|
|
|
int count = frameMapSizeInBits(t, method);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
if (count) {
|
2009-05-17 23:43:48 +00:00
|
|
|
int stack = BodyOffset
|
|
|
|
+ (continuationFramePointerOffset(t, c) / BytesPerWord)
|
|
|
|
- t->arch->framePointerOffset()
|
|
|
|
- stackOffsetFromFrame(t, method);
|
|
|
|
|
|
|
|
int first = stack + localOffsetFromStack(t, count - 1, method);
|
|
|
|
if (start > first) {
|
|
|
|
count -= start - first;
|
|
|
|
}
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
int32_t* map;
|
|
|
|
unsigned offset;
|
|
|
|
findFrameMap
|
|
|
|
(t, reinterpret_cast<uintptr_t*>(c) + stack, method, difference
|
2009-05-03 20:57:11 +00:00
|
|
|
(continuationAddress(t, c),
|
2009-06-26 21:36:04 +00:00
|
|
|
reinterpret_cast<void*>(methodAddress(t, method))), &map, &offset);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-17 23:43:48 +00:00
|
|
|
for (int i = count - 1; i >= 0; --i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
int j = offset + i;
|
|
|
|
if (map[j / 32] & (static_cast<int32_t>(1) << (j % 32))) {
|
2009-05-17 23:43:48 +00:00
|
|
|
if (not w->visit(stack + localOffsetFromStack(t, i, method))) {
|
2009-05-03 20:57:11 +00:00
|
|
|
return;
|
2009-05-17 23:43:48 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-05-23 22:15:06 +00:00
|
|
|
callContinuation(MyThread* t, object continuation, object result,
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
object exception, void* ip, void* stack)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
|
|
|
assert(t, t->exception == 0);
|
|
|
|
|
|
|
|
if (exception) {
|
|
|
|
t->exception = exception;
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
MyThread::TraceContext c(t, ip, stack, continuation, t->trace);
|
2010-06-25 15:51:35 +00:00
|
|
|
|
2011-01-28 04:06:01 +00:00
|
|
|
void* frame;
|
|
|
|
findUnwindTarget(t, &ip, &frame, &stack, &continuation);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
|
2011-01-27 18:54:41 +00:00
|
|
|
popResources(t);
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
transition(t, ip, stack, continuation, t->trace);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2011-01-28 04:06:01 +00:00
|
|
|
vmJump(ip, 0, stack, t, reinterpret_cast<uintptr_t>(result), 0);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
int8_t*
|
2009-05-23 22:15:06 +00:00
|
|
|
returnSpec(MyThread* t, object method)
|
|
|
|
{
|
2009-05-24 01:49:14 +00:00
|
|
|
int8_t* s = &byteArrayBody(t, methodSpec(t, method), 0);
|
2009-05-23 22:15:06 +00:00
|
|
|
while (*s and *s != ')') ++ s;
|
|
|
|
expect(t, *s == ')');
|
|
|
|
return s + 1;
|
|
|
|
}
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
object
|
|
|
|
returnClass(MyThread* t, object method)
|
|
|
|
{
|
2009-08-10 13:56:16 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
int8_t* spec = returnSpec(t, method);
|
|
|
|
unsigned length = strlen(reinterpret_cast<char*>(spec));
|
|
|
|
object name;
|
|
|
|
if (*spec == '[') {
|
|
|
|
name = makeByteArray(t, length + 1);
|
|
|
|
memcpy(&byteArrayBody(t, name, 0), spec, length);
|
|
|
|
} else {
|
|
|
|
assert(t, *spec == 'L');
|
|
|
|
assert(t, spec[length - 1] == ';');
|
|
|
|
name = makeByteArray(t, length - 1);
|
|
|
|
memcpy(&byteArrayBody(t, name, 0), spec + 1, length - 2);
|
|
|
|
}
|
2009-08-10 13:56:16 +00:00
|
|
|
|
|
|
|
return resolveClass(t, classLoader(t, methodClass(t, method)), name);
|
2009-05-24 01:49:14 +00:00
|
|
|
}
|
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
bool
|
|
|
|
compatibleReturnType(MyThread* t, object oldMethod, object newMethod)
|
|
|
|
{
|
|
|
|
if (oldMethod == newMethod) {
|
|
|
|
return true;
|
|
|
|
} else if (methodReturnCode(t, oldMethod) == methodReturnCode(t, newMethod))
|
|
|
|
{
|
|
|
|
if (methodReturnCode(t, oldMethod) == ObjectField) {
|
2009-05-24 01:49:14 +00:00
|
|
|
PROTECT(t, newMethod);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
object oldClass = returnClass(t, oldMethod);
|
|
|
|
PROTECT(t, oldClass);
|
|
|
|
|
|
|
|
object newClass = returnClass(t, newMethod);
|
|
|
|
|
|
|
|
return isAssignableFrom(t, oldClass, newClass);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return methodReturnCode(t, oldMethod) == VoidField;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
void
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
jumpAndInvoke(MyThread* t, object method, void* stack, ...)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2009-05-24 01:49:14 +00:00
|
|
|
t->trace->targetMethod = 0;
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (methodFlags(t, method) & ACC_NATIVE) {
|
|
|
|
t->trace->nativeMethod = method;
|
|
|
|
} else {
|
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
unsigned argumentCount = methodParameterFootprint(t, method);
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, arguments, argumentCount);
|
2009-05-29 00:56:05 +00:00
|
|
|
va_list a; va_start(a, stack);
|
2009-05-24 01:49:14 +00:00
|
|
|
for (unsigned i = 0; i < argumentCount; ++i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(arguments)[i] = va_arg(a, uintptr_t);
|
2009-05-24 01:49:14 +00:00
|
|
|
}
|
|
|
|
va_end(a);
|
2011-01-27 18:54:41 +00:00
|
|
|
|
|
|
|
assert(t, t->exception == 0);
|
|
|
|
|
|
|
|
popResources(t);
|
2009-05-24 01:49:14 +00:00
|
|
|
|
|
|
|
vmJumpAndInvoke
|
|
|
|
(t, reinterpret_cast<void*>(methodAddress(t, method)),
|
2009-05-27 01:02:39 +00:00
|
|
|
stack,
|
2009-05-24 01:49:14 +00:00
|
|
|
argumentCount * BytesPerWord,
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(arguments),
|
2009-05-29 01:50:44 +00:00
|
|
|
(t->arch->alignFrameSize(t->arch->argumentFootprint(argumentCount))
|
|
|
|
+ t->arch->frameReturnAddressSize())
|
2009-05-29 00:56:05 +00:00
|
|
|
* BytesPerWord);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
callContinuation(MyThread* t, object continuation, object result,
|
|
|
|
object exception)
|
|
|
|
{
|
|
|
|
enum {
|
|
|
|
Call,
|
|
|
|
Unwind,
|
2010-12-27 22:55:23 +00:00
|
|
|
Rewind
|
2009-05-23 22:15:06 +00:00
|
|
|
} action;
|
|
|
|
|
|
|
|
object nextContinuation = 0;
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (t->continuation == 0
|
2009-05-23 22:15:06 +00:00
|
|
|
or continuationContext(t, t->continuation)
|
|
|
|
!= continuationContext(t, continuation))
|
|
|
|
{
|
|
|
|
PROTECT(t, continuation);
|
|
|
|
PROTECT(t, result);
|
|
|
|
PROTECT(t, exception);
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (compatibleReturnType
|
2009-05-23 22:15:06 +00:00
|
|
|
(t, t->trace->originalMethod, continuationContextMethod
|
2009-05-25 04:36:16 +00:00
|
|
|
(t, continuationContext(t, continuation))))
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2009-05-24 01:49:14 +00:00
|
|
|
object oldContext;
|
|
|
|
object unwindContext;
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (t->continuation) {
|
|
|
|
oldContext = continuationContext(t, t->continuation);
|
|
|
|
unwindContext = oldContext;
|
|
|
|
} else {
|
|
|
|
oldContext = 0;
|
|
|
|
unwindContext = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
object rewindContext = 0;
|
|
|
|
|
|
|
|
for (object newContext = continuationContext(t, continuation);
|
|
|
|
newContext; newContext = continuationContextNext(t, newContext))
|
|
|
|
{
|
|
|
|
if (newContext == oldContext) {
|
|
|
|
unwindContext = 0;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
rewindContext = newContext;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unwindContext
|
|
|
|
and continuationContextContinuation(t, unwindContext))
|
|
|
|
{
|
|
|
|
nextContinuation = continuationContextContinuation(t, unwindContext);
|
2009-05-23 22:15:06 +00:00
|
|
|
result = makeUnwindResult(t, continuation, result, exception);
|
|
|
|
action = Unwind;
|
2009-05-24 01:49:14 +00:00
|
|
|
} else if (rewindContext
|
|
|
|
and continuationContextContinuation(t, rewindContext))
|
|
|
|
{
|
|
|
|
nextContinuation = continuationContextContinuation(t, rewindContext);
|
|
|
|
action = Rewind;
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, RewindMethod) == 0) {
|
2009-05-24 01:49:14 +00:00
|
|
|
PROTECT(t, nextContinuation);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
object method = resolveMethod
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, root(t, Machine::BootLoader), "avian/Continuations", "rewind",
|
2009-06-03 00:55:12 +00:00
|
|
|
"(Ljava/lang/Runnable;Lavian/Callback;Ljava/lang/Object;"
|
|
|
|
"Ljava/lang/Throwable;)V");
|
2011-01-28 04:06:01 +00:00
|
|
|
|
|
|
|
PROTECT(t, method);
|
2009-05-25 00:22:36 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2011-01-28 04:06:01 +00:00
|
|
|
|
|
|
|
setRoot(t, RewindMethod, method);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-25 04:49:39 +00:00
|
|
|
} else {
|
|
|
|
action = Call;
|
|
|
|
}
|
2009-05-24 01:49:14 +00:00
|
|
|
} else {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::IncompatibleContinuationExceptionType);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
action = Call;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* ip;
|
2011-01-28 04:06:01 +00:00
|
|
|
void* frame;
|
2009-05-23 22:15:06 +00:00
|
|
|
void* stack;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
object threadContinuation;
|
2011-01-28 04:06:01 +00:00
|
|
|
findUnwindTarget(t, &ip, &frame, &stack, &threadContinuation);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case Call: {
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
callContinuation(t, continuation, result, exception, ip, stack);
|
2009-05-23 22:15:06 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case Unwind: {
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
callContinuation(t, nextContinuation, result, 0, ip, stack);
|
2009-05-23 22:15:06 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case Rewind: {
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
transition(t, 0, 0, nextContinuation, t->trace);
|
2009-05-25 04:49:39 +00:00
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
jumpAndInvoke
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
(t, root(t, RewindMethod), stack,
|
2009-05-25 04:27:50 +00:00
|
|
|
continuationContextBefore(t, continuationContext(t, nextContinuation)),
|
2009-05-25 04:49:39 +00:00
|
|
|
continuation, result, exception);
|
2009-05-23 22:15:06 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
void
|
|
|
|
callWithCurrentContinuation(MyThread* t, object receiver)
|
|
|
|
{
|
|
|
|
object method = 0;
|
|
|
|
void* ip = 0;
|
|
|
|
void* stack = 0;
|
|
|
|
|
|
|
|
{ PROTECT(t, receiver);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, ReceiveMethod) == 0) {
|
2009-06-03 00:55:12 +00:00
|
|
|
object m = resolveMethod
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, root(t, Machine::BootLoader), "avian/CallbackReceiver", "receive",
|
2009-06-03 00:55:12 +00:00
|
|
|
"(Lavian/Callback;)Ljava/lang/Object;");
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (m) {
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, ReceiveMethod, m);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
object continuationClass = type(t, Machine::ContinuationType);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (classVmFlags(t, continuationClass) & BootstrapFlag) {
|
2010-09-14 16:49:41 +00:00
|
|
|
resolveSystemClass
|
|
|
|
(t, root(t, Machine::BootLoader),
|
|
|
|
vm::className(t, continuationClass));
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
method = findInterfaceMethod
|
|
|
|
(t, root(t, ReceiveMethod), objectClass(t, receiver));
|
|
|
|
PROTECT(t, method);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
t->continuation = makeCurrentContinuation(t, &ip, &stack);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
jumpAndInvoke(t, method, stack, receiver, t->continuation);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dynamicWind(MyThread* t, object before, object thunk, object after)
|
|
|
|
{
|
|
|
|
void* ip = 0;
|
|
|
|
void* stack = 0;
|
|
|
|
|
|
|
|
{ PROTECT(t, before);
|
|
|
|
PROTECT(t, thunk);
|
|
|
|
PROTECT(t, after);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, WindMethod) == 0) {
|
2009-06-03 00:55:12 +00:00
|
|
|
object method = resolveMethod
|
2010-09-14 16:49:41 +00:00
|
|
|
(t, root(t, Machine::BootLoader), "avian/Continuations", "wind",
|
2009-06-03 00:55:12 +00:00
|
|
|
"(Ljava/lang/Runnable;Ljava/util/concurrent/Callable;"
|
2011-01-28 04:06:01 +00:00
|
|
|
"Ljava/lang/Runnable;)Lavian/Continuations$UnwindResult;");
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (method) {
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, WindMethod, method);
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
t->continuation = makeCurrentContinuation(t, &ip, &stack);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
object newContext = makeContinuationContext
|
|
|
|
(t, continuationContext(t, t->continuation), before, after,
|
|
|
|
t->continuation, t->trace->originalMethod);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
set(t, t->continuation, ContinuationContext, newContext);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
jumpAndInvoke(t, root(t, WindMethod), stack, before, thunk, after);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
class ArgumentList {
|
|
|
|
public:
|
2009-02-17 02:49:28 +00:00
|
|
|
ArgumentList(Thread* t, uintptr_t* array, unsigned size, bool* objectMask,
|
|
|
|
object this_, const char* spec, bool indirectObjects,
|
|
|
|
va_list arguments):
|
2007-09-25 23:53:11 +00:00
|
|
|
t(static_cast<MyThread*>(t)),
|
|
|
|
array(array),
|
|
|
|
objectMask(objectMask),
|
2009-02-17 02:49:28 +00:00
|
|
|
size(size),
|
2009-05-03 20:57:11 +00:00
|
|
|
position(0),
|
2007-10-12 17:56:43 +00:00
|
|
|
protector(this)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
|
|
|
if (this_) {
|
|
|
|
addObject(this_);
|
|
|
|
}
|
|
|
|
|
2007-10-12 17:56:43 +00:00
|
|
|
for (MethodSpecIterator it(t, spec); it.hasNext();) {
|
|
|
|
switch (*it.next()) {
|
2007-09-25 23:53:11 +00:00
|
|
|
case 'L':
|
|
|
|
case '[':
|
|
|
|
if (indirectObjects) {
|
|
|
|
object* v = va_arg(arguments, object*);
|
|
|
|
addObject(v ? *v : 0);
|
|
|
|
} else {
|
|
|
|
addObject(va_arg(arguments, object));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'J':
|
|
|
|
addLong(va_arg(arguments, uint64_t));
|
|
|
|
break;
|
2007-10-12 17:56:43 +00:00
|
|
|
|
2012-02-28 22:35:28 +00:00
|
|
|
case 'D':
|
|
|
|
addLong(doubleToBits(va_arg(arguments, double)));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'F':
|
|
|
|
addInt(floatToBits(va_arg(arguments, double)));
|
|
|
|
break;
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
default:
|
|
|
|
addInt(va_arg(arguments, uint32_t));
|
2007-10-12 17:56:43 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2009-02-17 02:49:28 +00:00
|
|
|
ArgumentList(Thread* t, uintptr_t* array, unsigned size, bool* objectMask,
|
|
|
|
object this_, const char* spec, object arguments):
|
2007-09-25 23:53:11 +00:00
|
|
|
t(static_cast<MyThread*>(t)),
|
|
|
|
array(array),
|
|
|
|
objectMask(objectMask),
|
2009-02-17 02:49:28 +00:00
|
|
|
size(size),
|
2009-05-03 20:57:11 +00:00
|
|
|
position(0),
|
2007-10-12 17:56:43 +00:00
|
|
|
protector(this)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
|
|
|
if (this_) {
|
|
|
|
addObject(this_);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned index = 0;
|
2007-10-12 17:56:43 +00:00
|
|
|
for (MethodSpecIterator it(t, spec); it.hasNext();) {
|
|
|
|
switch (*it.next()) {
|
2007-09-25 23:53:11 +00:00
|
|
|
case 'L':
|
|
|
|
case '[':
|
|
|
|
addObject(objectArrayBody(t, arguments, index++));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'J':
|
|
|
|
case 'D':
|
2011-09-13 02:26:32 +00:00
|
|
|
addLong(cast<int64_t>(objectArrayBody(t, arguments, index++), 8));
|
2007-09-25 23:53:11 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
addInt(cast<int32_t>(objectArrayBody(t, arguments, index++),
|
|
|
|
BytesPerWord));
|
2011-03-17 14:46:46 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void addObject(object v) {
|
2009-05-03 20:57:11 +00:00
|
|
|
assert(t, position < size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
array[position] = reinterpret_cast<uintptr_t>(v);
|
|
|
|
objectMask[position] = true;
|
2009-05-03 20:57:11 +00:00
|
|
|
++ position;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-10-12 22:06:33 +00:00
|
|
|
void addInt(uintptr_t v) {
|
2009-05-03 20:57:11 +00:00
|
|
|
assert(t, position < size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
array[position] = v;
|
|
|
|
objectMask[position] = false;
|
2009-05-03 20:57:11 +00:00
|
|
|
++ position;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void addLong(uint64_t v) {
|
2009-05-03 20:57:11 +00:00
|
|
|
assert(t, position < size - 1);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
memcpy(array + position, &v, 8);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
objectMask[position] = false;
|
2007-12-23 20:06:24 +00:00
|
|
|
objectMask[position + 1] = false;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
position += 2;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
|
|
|
uintptr_t* array;
|
|
|
|
bool* objectMask;
|
2009-02-17 02:49:28 +00:00
|
|
|
unsigned size;
|
2007-09-25 23:53:11 +00:00
|
|
|
unsigned position;
|
2007-10-12 17:56:43 +00:00
|
|
|
|
|
|
|
class MyProtector: public Thread::Protector {
|
|
|
|
public:
|
|
|
|
MyProtector(ArgumentList* list): Protector(list->t), list(list) { }
|
|
|
|
|
|
|
|
virtual void visit(Heap::Visitor* v) {
|
2009-05-17 23:43:48 +00:00
|
|
|
for (unsigned i = 0; i < list->position; ++i) {
|
2007-10-12 17:56:43 +00:00
|
|
|
if (list->objectMask[i]) {
|
2007-10-28 19:14:53 +00:00
|
|
|
v->visit(reinterpret_cast<object*>(list->array + i));
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ArgumentList* list;
|
|
|
|
} protector;
|
2007-09-25 23:53:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
object
|
|
|
|
invoke(Thread* thread, object method, ArgumentList* arguments)
|
|
|
|
{
|
|
|
|
MyThread* t = static_cast<MyThread*>(thread);
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-03-26 00:55:25 +00:00
|
|
|
if (false) {
|
|
|
|
PROTECT(t, method);
|
|
|
|
|
|
|
|
compile(t, local::codeAllocator(static_cast<MyThread*>(t)), 0,
|
|
|
|
resolveMethod
|
|
|
|
(t, root(t, Machine::AppLoader),
|
|
|
|
"foo/ClassName",
|
|
|
|
"methodName",
|
|
|
|
"()V"));
|
|
|
|
}
|
|
|
|
|
2010-12-19 22:23:19 +00:00
|
|
|
uintptr_t stackLimit = t->stackLimit;
|
|
|
|
uintptr_t stackPosition = reinterpret_cast<uintptr_t>(&t);
|
|
|
|
if (stackLimit == 0) {
|
|
|
|
t->stackLimit = stackPosition - StackSizeInBytes;
|
|
|
|
} else if (stackPosition < stackLimit) {
|
2010-12-27 22:55:23 +00:00
|
|
|
throwNew(t, Machine::StackOverflowErrorType);
|
2010-12-19 22:23:19 +00:00
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RESOURCE(t, uintptr_t, stackLimit,
|
|
|
|
static_cast<MyThread*>(t)->stackLimit = stackLimit);
|
|
|
|
|
2007-09-26 23:23:03 +00:00
|
|
|
unsigned returnCode = methodReturnCode(t, method);
|
2007-09-25 23:53:11 +00:00
|
|
|
unsigned returnType = fieldType(t, returnCode);
|
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
uint64_t result;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
{ MyThread::CallTrace trace(t, method);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
MyCheckpoint checkpoint(t);
|
|
|
|
|
2009-05-15 02:08:01 +00:00
|
|
|
assert(t, arguments->position == arguments->size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
result = vmInvoke
|
2008-12-02 02:38:00 +00:00
|
|
|
(t, reinterpret_cast<void*>(methodAddress(t, method)),
|
2009-05-15 02:08:01 +00:00
|
|
|
arguments->array,
|
|
|
|
arguments->position * BytesPerWord,
|
|
|
|
t->arch->alignFrameSize
|
2009-05-17 00:39:08 +00:00
|
|
|
(t->arch->argumentFootprint(arguments->position))
|
2009-04-25 23:33:42 +00:00
|
|
|
* BytesPerWord,
|
2009-02-17 02:49:28 +00:00
|
|
|
returnType);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-04-20 16:21:32 +00:00
|
|
|
if (t->exception) {
|
2010-09-14 16:49:41 +00:00
|
|
|
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
2008-04-20 16:21:32 +00:00
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
2010-12-27 22:55:23 +00:00
|
|
|
|
|
|
|
object exception = t->exception;
|
|
|
|
t->exception = 0;
|
|
|
|
vm::throw_(t, exception);
|
2008-04-20 16:21:32 +00:00
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
object r;
|
|
|
|
switch (returnCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
r = makeInt(t, result);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LongField:
|
|
|
|
case DoubleField:
|
|
|
|
r = makeLong(t, result);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ObjectField:
|
2007-12-16 00:24:15 +00:00
|
|
|
r = reinterpret_cast<object>(result);
|
2007-09-25 23:53:11 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VoidField:
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2009-09-19 00:01:54 +00:00
|
|
|
}
|
2007-09-24 01:39:03 +00:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2010-12-20 00:47:21 +00:00
|
|
|
class SignalHandler: public System::SignalHandler {
|
2007-12-30 22:24:48 +00:00
|
|
|
public:
|
2010-12-20 00:47:21 +00:00
|
|
|
SignalHandler(Machine::Type type, Machine::Root root, unsigned fixedSize):
|
|
|
|
m(0), type(type), root(root), fixedSize(fixedSize) { }
|
2007-12-30 22:24:48 +00:00
|
|
|
|
2011-01-28 04:06:01 +00:00
|
|
|
virtual bool handleSignal(void** ip, void** frame, void** stack,
|
|
|
|
void** thread)
|
|
|
|
{
|
2007-12-30 22:24:48 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(m->localThread->get());
|
2009-08-18 21:47:08 +00:00
|
|
|
if (t and t->state == Thread::ActiveState) {
|
2008-04-07 23:51:32 +00:00
|
|
|
object node = methodForIp(t, *ip);
|
2008-01-02 01:07:12 +00:00
|
|
|
if (node) {
|
2009-09-04 23:08:45 +00:00
|
|
|
// add one to the IP since findLineNumber will subtract one
|
2009-09-04 21:09:40 +00:00
|
|
|
// when we make the trace:
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
MyThread::TraceContext context
|
|
|
|
(t, static_cast<uint8_t*>(*ip) + 1,
|
|
|
|
static_cast<void**>(*stack) - t->arch->frameReturnAddressSize(),
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
t->continuation, t->trace);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2010-12-20 00:47:21 +00:00
|
|
|
if (ensure(t, fixedSize + traceSize(t))) {
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicOr(&(t->flags), Thread::TracingFlag);
|
2010-12-20 00:47:21 +00:00
|
|
|
t->exception = makeThrowable(t, type);
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicAnd(&(t->flags), ~Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
} else {
|
2010-12-20 00:47:21 +00:00
|
|
|
// not enough memory available for a new exception and stack
|
|
|
|
// trace -- use a preallocated instance instead
|
|
|
|
t->exception = vm::root(t, root);
|
2010-06-19 22:40:21 +00:00
|
|
|
}
|
2008-01-02 01:07:12 +00:00
|
|
|
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
// printTrace(t, t->exception);
|
2009-08-13 15:17:05 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
object continuation;
|
2011-01-28 04:06:01 +00:00
|
|
|
findUnwindTarget(t, ip, frame, stack, &continuation);
|
2008-04-23 16:33:31 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
transition(t, ip, stack, continuation, t->trace);
|
2008-04-23 16:33:31 +00:00
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
*thread = t;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
return true;
|
|
|
|
}
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2009-06-11 23:14:54 +00:00
|
|
|
|
|
|
|
if (compileLog) {
|
|
|
|
fflush(compileLog);
|
|
|
|
}
|
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
return false;
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Machine* m;
|
2010-12-20 00:47:21 +00:00
|
|
|
Machine::Type type;
|
|
|
|
Machine::Root root;
|
|
|
|
unsigned fixedSize;
|
2007-12-30 22:24:48 +00:00
|
|
|
};
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
bool
|
|
|
|
isThunk(MyThread* t, void* ip);
|
|
|
|
|
2010-07-06 22:13:11 +00:00
|
|
|
bool
|
|
|
|
isVirtualThunk(MyThread* t, void* ip);
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
bool
|
|
|
|
isThunkUnsafeStack(MyThread* t, void* ip);
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
void
|
2011-09-20 22:30:30 +00:00
|
|
|
boot(MyThread* t, BootImage* image, uint8_t* code);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2008-04-22 15:31:40 +00:00
|
|
|
class MyProcessor;
|
|
|
|
|
|
|
|
MyProcessor*
|
|
|
|
processor(MyThread* t);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
void
|
2011-09-24 05:25:52 +00:00
|
|
|
compileThunks(MyThread* t, FixedAllocator* allocator);
|
2010-09-14 16:49:41 +00:00
|
|
|
|
2012-05-02 15:49:31 +00:00
|
|
|
class CompilationHandlerList {
|
|
|
|
public:
|
|
|
|
CompilationHandlerList(CompilationHandlerList* next, Processor::CompilationHandler* handler):
|
|
|
|
next(next),
|
|
|
|
handler(handler) {}
|
|
|
|
|
|
|
|
void dispose(Allocator* allocator) {
|
|
|
|
if(this) {
|
|
|
|
next->dispose(allocator);
|
|
|
|
handler->dispose();
|
|
|
|
allocator->free(this, sizeof(*this));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CompilationHandlerList* next;
|
|
|
|
Processor::CompilationHandler* handler;
|
|
|
|
};
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
class MyProcessor: public Processor {
|
2007-09-24 01:39:03 +00:00
|
|
|
public:
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
class Thunk {
|
|
|
|
public:
|
|
|
|
Thunk():
|
|
|
|
start(0), frameSavedOffset(0), length(0)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
Thunk(uint8_t* start, unsigned frameSavedOffset, unsigned length):
|
|
|
|
start(start), frameSavedOffset(frameSavedOffset), length(length)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
uint8_t* start;
|
|
|
|
unsigned frameSavedOffset;
|
|
|
|
unsigned length;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ThunkCollection {
|
|
|
|
public:
|
|
|
|
Thunk default_;
|
|
|
|
Thunk defaultVirtual;
|
|
|
|
Thunk native;
|
|
|
|
Thunk aioob;
|
2010-12-19 22:23:19 +00:00
|
|
|
Thunk stackOverflow;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
Thunk table;
|
|
|
|
};
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
MyProcessor(System* s, Allocator* allocator, bool useNativeFeatures):
|
2007-09-25 23:53:11 +00:00
|
|
|
s(s),
|
2008-01-13 22:05:08 +00:00
|
|
|
allocator(allocator),
|
2010-09-14 16:49:41 +00:00
|
|
|
roots(0),
|
2009-10-10 23:46:43 +00:00
|
|
|
bootImage(0),
|
2011-09-20 22:30:30 +00:00
|
|
|
heapImage(0),
|
|
|
|
codeImage(0),
|
|
|
|
codeImageSize(0),
|
2010-12-20 00:47:21 +00:00
|
|
|
segFaultHandler(Machine::NullPointerExceptionType,
|
|
|
|
Machine::NullPointerException,
|
|
|
|
FixedSizeOfNullPointerException),
|
|
|
|
divideByZeroHandler(Machine::ArithmeticExceptionType,
|
|
|
|
Machine::ArithmeticException,
|
|
|
|
FixedSizeOfArithmeticException),
|
2009-06-01 03:16:58 +00:00
|
|
|
codeAllocator(s, 0, 0),
|
2010-06-25 01:09:50 +00:00
|
|
|
callTableSize(0),
|
2012-05-02 15:49:31 +00:00
|
|
|
useNativeFeatures(useNativeFeatures),
|
|
|
|
compilationHandlers(0)
|
2011-09-20 22:30:30 +00:00
|
|
|
{
|
|
|
|
thunkTable[compileMethodIndex] = voidPointer(local::compileMethod);
|
|
|
|
thunkTable[compileVirtualMethodIndex] = voidPointer(compileVirtualMethod);
|
|
|
|
thunkTable[invokeNativeIndex] = voidPointer(invokeNative);
|
|
|
|
thunkTable[throwArrayIndexOutOfBoundsIndex] = voidPointer
|
|
|
|
(throwArrayIndexOutOfBounds);
|
|
|
|
thunkTable[throwStackOverflowIndex] = voidPointer(throwStackOverflow);
|
|
|
|
#define THUNK(s) thunkTable[s##Index] = voidPointer(s);
|
|
|
|
#include "thunks.cpp"
|
|
|
|
#undef THUNK
|
2011-09-24 04:21:54 +00:00
|
|
|
// Set the dummyIndex entry to a constant which should require the
|
|
|
|
// maximum number of bytes to represent in assembly code
|
|
|
|
// (i.e. can't be represented by a smaller number of bytes and
|
|
|
|
// implicitly sign- or zero-extended). We'll use this property
|
|
|
|
// later to determine the maximum size of a thunk in the thunk
|
|
|
|
// table.
|
2011-09-21 01:50:38 +00:00
|
|
|
thunkTable[dummyIndex] = reinterpret_cast<void*>
|
2011-09-24 04:21:54 +00:00
|
|
|
(static_cast<uintptr_t>(UINT64_C(0x5555555555555555)));
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
virtual Thread*
|
|
|
|
makeThread(Machine* m, object javaThread, Thread* parent)
|
|
|
|
{
|
2008-04-13 18:15:04 +00:00
|
|
|
MyThread* t = new (m->heap->allocate(sizeof(MyThread)))
|
2009-10-10 23:46:43 +00:00
|
|
|
MyThread(m, javaThread, static_cast<MyThread*>(parent),
|
|
|
|
useNativeFeatures);
|
2011-09-20 22:30:30 +00:00
|
|
|
|
|
|
|
t->heapImage = heapImage;
|
|
|
|
t->codeImage = codeImage;
|
|
|
|
t->thunkTable = thunkTable;
|
2009-05-05 01:04:17 +00:00
|
|
|
|
2009-05-25 00:22:36 +00:00
|
|
|
if (false) {
|
2011-09-01 03:18:00 +00:00
|
|
|
fprintf(stderr, "stack %d\n",
|
|
|
|
difference(&(t->stack), t));
|
|
|
|
fprintf(stderr, "scratch %d\n",
|
|
|
|
difference(&(t->scratch), t));
|
|
|
|
fprintf(stderr, "continuation %d\n",
|
|
|
|
difference(&(t->continuation), t));
|
|
|
|
fprintf(stderr, "exception %d\n",
|
|
|
|
difference(&(t->exception), t));
|
|
|
|
fprintf(stderr, "exceptionStackAdjustment %d\n",
|
|
|
|
difference(&(t->exceptionStackAdjustment), t));
|
|
|
|
fprintf(stderr, "exceptionOffset %d\n",
|
|
|
|
difference(&(t->exceptionOffset), t));
|
|
|
|
fprintf(stderr, "exceptionHandler %d\n",
|
|
|
|
difference(&(t->exceptionHandler), t));
|
|
|
|
fprintf(stderr, "tailAddress %d\n",
|
|
|
|
difference(&(t->tailAddress), t));
|
|
|
|
fprintf(stderr, "stackLimit %d\n",
|
|
|
|
difference(&(t->stackLimit), t));
|
|
|
|
fprintf(stderr, "ip %d\n",
|
|
|
|
difference(&(t->ip), t));
|
|
|
|
fprintf(stderr, "virtualCallTarget %d\n",
|
|
|
|
difference(&(t->virtualCallTarget), t));
|
|
|
|
fprintf(stderr, "virtualCallIndex %d\n",
|
|
|
|
difference(&(t->virtualCallIndex), t));
|
2011-09-30 20:44:25 +00:00
|
|
|
fprintf(stderr, "heapImage %d\n",
|
|
|
|
difference(&(t->heapImage), t));
|
|
|
|
fprintf(stderr, "codeImage %d\n",
|
|
|
|
difference(&(t->codeImage), t));
|
|
|
|
fprintf(stderr, "thunkTable %d\n",
|
|
|
|
difference(&(t->thunkTable), t));
|
2009-05-25 00:22:36 +00:00
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
t->init();
|
|
|
|
|
2007-10-25 22:06:05 +00:00
|
|
|
return t;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual object
|
|
|
|
makeMethod(vm::Thread* t,
|
|
|
|
uint8_t vmFlags,
|
|
|
|
uint8_t returnCode,
|
|
|
|
uint8_t parameterCount,
|
|
|
|
uint8_t parameterFootprint,
|
|
|
|
uint16_t flags,
|
|
|
|
uint16_t offset,
|
|
|
|
object name,
|
|
|
|
object spec,
|
2009-09-19 00:01:54 +00:00
|
|
|
object addendum,
|
2007-12-09 22:45:43 +00:00
|
|
|
object class_,
|
|
|
|
object code)
|
2007-10-04 03:19:39 +00:00
|
|
|
{
|
2010-09-17 01:43:27 +00:00
|
|
|
if (code) {
|
|
|
|
codeCompiled(t, code) = local::defaultThunk(static_cast<MyThread*>(t));
|
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
return vm::makeMethod
|
|
|
|
(t, vmFlags, returnCode, parameterCount, parameterFootprint, flags,
|
2010-11-26 19:41:31 +00:00
|
|
|
offset, 0, 0, name, spec, addendum, class_, code);
|
2007-10-04 03:19:39 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual object
|
|
|
|
makeClass(vm::Thread* t,
|
|
|
|
uint16_t flags,
|
2009-08-18 20:26:28 +00:00
|
|
|
uint16_t vmFlags,
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t fixedSize,
|
2009-08-18 20:26:28 +00:00
|
|
|
uint8_t arrayElementSize,
|
|
|
|
uint8_t arrayDimensions,
|
2007-12-09 22:45:43 +00:00
|
|
|
object objectMask,
|
|
|
|
object name,
|
2009-08-27 22:26:25 +00:00
|
|
|
object sourceFile,
|
2007-12-09 22:45:43 +00:00
|
|
|
object super,
|
|
|
|
object interfaceTable,
|
|
|
|
object virtualTable,
|
|
|
|
object fieldTable,
|
|
|
|
object methodTable,
|
|
|
|
object staticTable,
|
2009-09-19 00:01:54 +00:00
|
|
|
object addendum,
|
2007-12-09 22:45:43 +00:00
|
|
|
object loader,
|
|
|
|
unsigned vtableLength)
|
2007-09-26 23:23:03 +00:00
|
|
|
{
|
2007-12-11 21:26:59 +00:00
|
|
|
return vm::makeClass
|
2009-08-18 20:26:28 +00:00
|
|
|
(t, flags, vmFlags, fixedSize, arrayElementSize, arrayDimensions,
|
2011-04-01 01:16:57 +00:00
|
|
|
0, objectMask, name, sourceFile, super, interfaceTable, virtualTable,
|
|
|
|
fieldTable, methodTable, staticTable, addendum, loader, 0,
|
2010-11-26 19:41:31 +00:00
|
|
|
vtableLength);
|
2007-12-11 21:26:59 +00:00
|
|
|
}
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-11 21:26:59 +00:00
|
|
|
virtual void
|
|
|
|
initVtable(Thread* t, object c)
|
|
|
|
{
|
2009-06-04 23:20:55 +00:00
|
|
|
PROTECT(t, c);
|
2009-04-19 22:36:11 +00:00
|
|
|
for (int i = classLength(t, c) - 1; i >= 0; --i) {
|
2009-06-04 23:20:55 +00:00
|
|
|
void* thunk = reinterpret_cast<void*>
|
2009-04-07 00:34:12 +00:00
|
|
|
(virtualThunk(static_cast<MyThread*>(t), i));
|
2009-06-04 23:20:55 +00:00
|
|
|
classVtable(t, c, i) = thunk;
|
2007-09-26 23:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
virtual void
|
2007-10-12 17:56:43 +00:00
|
|
|
visitObjects(Thread* vmt, Heap::Visitor* v)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-10-12 17:56:43 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (t == t->m->rootThread) {
|
2010-09-14 16:49:41 +00:00
|
|
|
v->visit(&roots);
|
2007-10-14 01:18:25 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
for (MyThread::CallTrace* trace = t->trace; trace; trace = trace->next) {
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(trace->continuation));
|
2008-04-07 23:47:41 +00:00
|
|
|
v->visit(&(trace->nativeMethod));
|
2009-04-27 01:53:42 +00:00
|
|
|
v->visit(&(trace->targetMethod));
|
2009-05-24 01:49:14 +00:00
|
|
|
v->visit(&(trace->originalMethod));
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(t->continuation));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
for (Reference* r = t->reference; r; r = r->next) {
|
|
|
|
v->visit(&(r->target));
|
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
visitStack(t, v);
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual void
|
|
|
|
walkStack(Thread* vmt, StackVisitor* v)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyStackWalker walker(t);
|
|
|
|
walker.walk(v);
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-10-04 22:41:19 +00:00
|
|
|
virtual int
|
2007-12-09 22:45:43 +00:00
|
|
|
lineNumber(Thread* vmt, object method, int ip)
|
2007-10-04 22:41:19 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
return findLineNumber(static_cast<MyThread*>(vmt), method, ip);
|
2007-10-04 22:41:19 +00:00
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
virtual object*
|
2007-09-30 03:33:38 +00:00
|
|
|
makeLocalReference(Thread* vmt, object o)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-09-30 03:33:38 +00:00
|
|
|
if (o) {
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2009-12-17 02:16:51 +00:00
|
|
|
|
|
|
|
for (Reference* r = t->reference; r; r = r->next) {
|
|
|
|
if (r->target == o) {
|
|
|
|
acquire(t, r);
|
|
|
|
|
|
|
|
return &(r->target);
|
|
|
|
}
|
|
|
|
}
|
2007-09-30 03:33:38 +00:00
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
Reference* r = new (t->m->heap->allocate(sizeof(Reference)))
|
2012-02-29 18:51:30 +00:00
|
|
|
Reference(o, &(t->reference), false);
|
2007-09-30 03:33:38 +00:00
|
|
|
|
2009-12-17 02:16:51 +00:00
|
|
|
acquire(t, r);
|
|
|
|
|
2007-09-30 03:33:38 +00:00
|
|
|
return &(r->target);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void
|
2007-09-30 03:33:38 +00:00
|
|
|
disposeLocalReference(Thread* t, object* r)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-09-30 03:33:38 +00:00
|
|
|
if (r) {
|
2009-12-17 02:16:51 +00:00
|
|
|
release(t, reinterpret_cast<Reference*>(r));
|
2007-09-30 03:33:38 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
virtual object
|
|
|
|
invokeArray(Thread* t, object method, object this_, object arguments)
|
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
assert(t, t->exception == 0);
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
assert(t, t->state == Thread::ActiveState
|
|
|
|
or t->state == Thread::ExclusiveState);
|
|
|
|
|
|
|
|
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
|
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
method = findMethod(t, method, this_);
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
const char* spec = reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned size = methodParameterFootprint(t, method);
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, array, size);
|
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, objectMask, size);
|
2009-08-27 00:26:44 +00:00
|
|
|
ArgumentList list
|
|
|
|
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_, spec, arguments);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(static_cast<MyThread*>(t),
|
2009-08-27 00:26:44 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual object
|
|
|
|
invokeList(Thread* t, object method, object this_, bool indirectObjects,
|
|
|
|
va_list arguments)
|
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
assert(t, t->exception == 0);
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
assert(t, t->state == Thread::ActiveState
|
|
|
|
or t->state == Thread::ExclusiveState);
|
|
|
|
|
|
|
|
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
|
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
method = findMethod(t, method, this_);
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
const char* spec = reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned size = methodParameterFootprint(t, method);
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, array, size);
|
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, objectMask, size);
|
2007-09-25 23:53:11 +00:00
|
|
|
ArgumentList list
|
2009-08-27 00:26:44 +00:00
|
|
|
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_, spec, indirectObjects, arguments);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(static_cast<MyThread*>(t),
|
2009-08-27 00:26:44 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual object
|
2009-08-10 13:56:16 +00:00
|
|
|
invokeList(Thread* t, object loader, const char* className,
|
|
|
|
const char* methodName, const char* methodSpec,
|
|
|
|
object this_, va_list arguments)
|
2007-09-24 01:39:03 +00:00
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
assert(t, t->exception == 0);
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
assert(t, t->state == Thread::ActiveState
|
|
|
|
or t->state == Thread::ExclusiveState);
|
|
|
|
|
2010-09-10 21:05:29 +00:00
|
|
|
unsigned size = parameterFootprint(t, methodSpec, this_ == 0);
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, array, size);
|
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, objectMask, size);
|
2007-09-25 23:53:11 +00:00
|
|
|
ArgumentList list
|
2009-08-27 00:26:44 +00:00
|
|
|
(t, RUNTIME_ARRAY_BODY(array), size, RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_, methodSpec, false, arguments);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2009-08-10 13:56:16 +00:00
|
|
|
object method = resolveMethod
|
|
|
|
(t, loader, className, methodName, methodSpec);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
assert(t, ((methodFlags(t, method) & ACC_STATIC) == 0) xor (this_ == 0));
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
|
|
|
compile(static_cast<MyThread*>(t),
|
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)), 0, method);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
virtual void dispose(Thread* vmt) {
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
|
2007-12-11 21:26:59 +00:00
|
|
|
while (t->reference) {
|
|
|
|
vm::dispose(t, t->reference);
|
|
|
|
}
|
2008-01-10 01:20:36 +00:00
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
t->arch->release();
|
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
t->m->heap->free(t, sizeof(*t));
|
2012-05-02 15:49:31 +00:00
|
|
|
|
2007-12-11 21:26:59 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
virtual void dispose() {
|
2009-04-05 21:42:10 +00:00
|
|
|
if (codeAllocator.base) {
|
|
|
|
s->freeExecutable(codeAllocator.base, codeAllocator.capacity);
|
|
|
|
}
|
|
|
|
|
2012-05-02 15:49:31 +00:00
|
|
|
compilationHandlers->dispose(allocator);
|
|
|
|
|
2008-01-10 01:20:36 +00:00
|
|
|
s->handleSegFault(0);
|
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
allocator->free(this, sizeof(*this));
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
|
|
|
|
virtual object getStackTrace(Thread* vmt, Thread* vmTarget) {
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
MyThread* target = static_cast<MyThread*>(vmTarget);
|
2008-12-02 02:38:00 +00:00
|
|
|
MyProcessor* p = this;
|
2008-04-22 15:31:40 +00:00
|
|
|
|
2008-04-09 19:08:13 +00:00
|
|
|
class Visitor: public System::ThreadVisitor {
|
|
|
|
public:
|
2008-05-31 22:14:27 +00:00
|
|
|
Visitor(MyThread* t, MyProcessor* p, MyThread* target):
|
2009-11-25 02:15:27 +00:00
|
|
|
t(t), p(p), target(target), trace(0)
|
2008-05-31 22:14:27 +00:00
|
|
|
{ }
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
virtual void visit(void* ip, void* stack, void* link) {
|
|
|
|
MyThread::TraceContext c(target, link);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2008-04-21 17:29:36 +00:00
|
|
|
if (methodForIp(t, ip)) {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in Java code - use the register values
|
|
|
|
c.ip = ip;
|
|
|
|
c.stack = stack;
|
2011-01-26 00:22:43 +00:00
|
|
|
c.javaStackLimit = stack;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
} else if (target->transition) {
|
|
|
|
// we caught the thread in native code while in the middle
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
// of updating the context fields (MyThread::stack, etc.)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
static_cast<MyThread::Context&>(c) = *(target->transition);
|
|
|
|
} else if (isVmInvokeUnsafeStack(ip)) {
|
|
|
|
// we caught the thread in native code just after returning
|
|
|
|
// from java code, but before clearing MyThread::stack
|
|
|
|
// (which now contains a garbage value), and the most recent
|
|
|
|
// Java frame, if any, can be found in
|
|
|
|
// MyThread::continuation or MyThread::trace
|
|
|
|
c.ip = 0;
|
|
|
|
c.stack = 0;
|
2010-07-06 22:13:11 +00:00
|
|
|
} else if (target->stack
|
|
|
|
and (not isThunkUnsafeStack(t, ip))
|
|
|
|
and (not isVirtualThunk(t, ip)))
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in a thunk or native code, and the
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
// saved stack pointer indicates the most recent Java frame
|
|
|
|
// on the stack
|
2011-02-20 20:31:29 +00:00
|
|
|
c.ip = getIp(target);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
c.stack = target->stack;
|
2010-07-06 22:13:11 +00:00
|
|
|
} else if (isThunk(t, ip) or isVirtualThunk(t, ip)) {
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
// we caught the thread in a thunk where the stack register
|
|
|
|
// indicates the most recent Java frame on the stack
|
2011-02-21 22:25:52 +00:00
|
|
|
|
|
|
|
// On e.g. x86, the return address will have already been
|
|
|
|
// pushed onto the stack, in which case we use getIp to
|
|
|
|
// retrieve it. On e.g. PowerPC and ARM, it will be in the
|
|
|
|
// link register. Note that we can't just check if the link
|
|
|
|
// argument is null here, since we use ecx/rcx as a
|
|
|
|
// pseudo-link register on x86 for the purpose of tail
|
|
|
|
// calls.
|
|
|
|
c.ip = t->arch->hasLinkRegister() ? link : getIp(t, link, stack);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
c.stack = stack;
|
2008-04-22 15:31:40 +00:00
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in native code, and the most recent
|
|
|
|
// Java frame, if any, can be found in
|
|
|
|
// MyThread::continuation or MyThread::trace
|
|
|
|
c.ip = 0;
|
|
|
|
c.stack = 0;
|
2008-04-21 17:29:36 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2010-06-19 22:40:21 +00:00
|
|
|
if (ensure(t, traceSize(target))) {
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicOr(&(t->flags), Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
trace = makeTrace(t, target);
|
2010-09-14 16:49:41 +00:00
|
|
|
atomicAnd(&(t->flags), ~Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
2008-05-31 22:14:27 +00:00
|
|
|
MyProcessor* p;
|
2008-04-09 19:08:13 +00:00
|
|
|
MyThread* target;
|
|
|
|
object trace;
|
2008-05-31 22:14:27 +00:00
|
|
|
} visitor(t, p, target);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2008-04-21 22:36:13 +00:00
|
|
|
t->m->system->visit(t->systemThread, target->systemThread, &visitor);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
2008-04-09 19:08:13 +00:00
|
|
|
PROTECT(t, visitor.trace);
|
|
|
|
|
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
|
|
|
|
2010-09-27 15:39:44 +00:00
|
|
|
return visitor.trace ? visitor.trace : makeObjectArray(t, 0);
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2009-06-01 03:16:58 +00:00
|
|
|
virtual void initialize(BootImage* image, uint8_t* code, unsigned capacity) {
|
|
|
|
bootImage = image;
|
2009-04-05 21:42:10 +00:00
|
|
|
codeAllocator.base = code;
|
|
|
|
codeAllocator.capacity = capacity;
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
2012-05-02 15:49:31 +00:00
|
|
|
virtual void addCompilationHandler(CompilationHandler* handler) {
|
|
|
|
compilationHandlers = new(allocator->allocate(sizeof(CompilationHandlerList))) CompilationHandlerList(compilationHandlers, handler);
|
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
virtual void compileMethod(Thread* vmt, Zone* zone, object* constants,
|
|
|
|
object* calls, DelayedPromise** addresses,
|
2011-09-01 03:18:00 +00:00
|
|
|
object method, OffsetResolver* resolver)
|
2008-11-23 23:58:01 +00:00
|
|
|
{
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2011-09-01 03:18:00 +00:00
|
|
|
BootContext bootContext(t, *constants, *calls, *addresses, zone, resolver);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(t, &codeAllocator, &bootContext, method);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
*constants = bootContext.constants;
|
|
|
|
*calls = bootContext.calls;
|
2008-12-02 16:45:20 +00:00
|
|
|
*addresses = bootContext.addresses;
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
virtual void visitRoots(Thread* t, HeapWalker* w) {
|
|
|
|
bootImage->methodTree = w->visitRoot(root(t, MethodTree));
|
|
|
|
bootImage->methodTreeSentinal = w->visitRoot(root(t, MethodTreeSentinal));
|
|
|
|
bootImage->virtualThunks = w->visitRoot(root(t, VirtualThunks));
|
2008-11-28 22:02:45 +00:00
|
|
|
}
|
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
virtual void normalizeVirtualThunks(Thread* t) {
|
|
|
|
for (unsigned i = 0; i < wordArrayLength(t, root(t, VirtualThunks));
|
|
|
|
i += 2)
|
|
|
|
{
|
|
|
|
if (wordArrayBody(t, root(t, VirtualThunks), i)) {
|
|
|
|
wordArrayBody(t, root(t, VirtualThunks), i)
|
|
|
|
-= reinterpret_cast<uintptr_t>(codeAllocator.base);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-01 03:16:58 +00:00
|
|
|
virtual unsigned* makeCallTable(Thread* t, HeapWalker* w) {
|
|
|
|
bootImage->codeSize = codeAllocator.offset;
|
|
|
|
bootImage->callCount = callTableSize;
|
2008-11-28 22:02:45 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned* table = static_cast<unsigned*>
|
|
|
|
(t->m->heap->allocate(callTableSize * sizeof(unsigned) * 2));
|
2008-11-29 23:08:14 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned index = 0;
|
2010-09-14 16:49:41 +00:00
|
|
|
for (unsigned i = 0; i < arrayLength(t, root(t, CallTable)); ++i) {
|
|
|
|
for (object p = arrayBody(t, root(t, CallTable), i);
|
|
|
|
p; p = callNodeNext(t, p))
|
|
|
|
{
|
2011-09-17 02:53:08 +00:00
|
|
|
table[index++] = targetVW
|
|
|
|
(callNodeAddress(t, p)
|
|
|
|
- reinterpret_cast<uintptr_t>(codeAllocator.base));
|
|
|
|
table[index++] = targetVW
|
|
|
|
(w->map()->find(callNodeTarget(t, p))
|
|
|
|
| (static_cast<unsigned>(callNodeFlags(t, p)) << TargetBootShift));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
2008-11-29 23:08:14 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
return table;
|
|
|
|
}
|
2008-11-28 22:02:45 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
virtual void boot(Thread* t, BootImage* image, uint8_t* code) {
|
2009-06-01 03:16:58 +00:00
|
|
|
if (codeAllocator.base == 0) {
|
|
|
|
codeAllocator.base = static_cast<uint8_t*>
|
|
|
|
(s->tryAllocateExecutable(ExecutableAreaSizeInBytes));
|
|
|
|
codeAllocator.capacity = ExecutableAreaSizeInBytes;
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
if (image and code) {
|
|
|
|
local::boot(static_cast<MyThread*>(t), image, code);
|
2008-12-02 02:38:00 +00:00
|
|
|
} else {
|
2010-11-26 19:41:31 +00:00
|
|
|
roots = makeArray(t, RootCount);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, CallTable, makeArray(t, 128));
|
|
|
|
|
|
|
|
setRoot(t, MethodTreeSentinal, makeTreeNode(t, 0, 0, 0));
|
|
|
|
setRoot(t, MethodTree, root(t, MethodTreeSentinal));
|
|
|
|
set(t, root(t, MethodTree), TreeNodeLeft,
|
|
|
|
root(t, MethodTreeSentinal));
|
|
|
|
set(t, root(t, MethodTree), TreeNodeRight,
|
|
|
|
root(t, MethodTreeSentinal));
|
2011-09-30 19:17:28 +00:00
|
|
|
}
|
2009-10-18 00:18:03 +00:00
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
local::compileThunks(static_cast<MyThread*>(t), &codeAllocator);
|
|
|
|
|
2011-10-03 14:05:49 +00:00
|
|
|
if (not (image and code)) {
|
|
|
|
bootThunks = thunks;
|
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
segFaultHandler.m = t->m;
|
|
|
|
expect(t, t->m->system->success
|
|
|
|
(t->m->system->handleSegFault(&segFaultHandler)));
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
divideByZeroHandler.m = t->m;
|
|
|
|
expect(t, t->m->system->success
|
|
|
|
(t->m->system->handleDivideByZero(÷ByZeroHandler)));
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
virtual void callWithCurrentContinuation(Thread* t, object receiver) {
|
|
|
|
if (Continuations) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::callWithCurrentContinuation(static_cast<MyThread*>(t), receiver);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
2009-05-26 05:27:10 +00:00
|
|
|
abort(t);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
virtual void dynamicWind(Thread* t, object before, object thunk,
|
2009-05-23 22:15:06 +00:00
|
|
|
object after)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::dynamicWind(static_cast<MyThread*>(t), before, thunk, after);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
2009-05-26 05:27:10 +00:00
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
virtual void feedResultToContinuation(Thread* t, object continuation,
|
2009-05-23 22:15:06 +00:00
|
|
|
object result)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
|
|
|
callContinuation(static_cast<MyThread*>(t), continuation, result, 0);
|
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
virtual void feedExceptionToContinuation(Thread* t, object continuation,
|
2009-05-23 22:15:06 +00:00
|
|
|
object exception)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
|
|
|
callContinuation(static_cast<MyThread*>(t), continuation, 0, exception);
|
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
}
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
virtual void walkContinuationBody(Thread* t, Heap::Walker* w, object o,
|
|
|
|
unsigned start)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::walkContinuationBody(static_cast<MyThread*>(t), w, o, start);
|
2009-05-26 05:27:10 +00:00
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
2007-09-24 01:39:03 +00:00
|
|
|
|
|
|
|
System* s;
|
2008-01-13 22:05:08 +00:00
|
|
|
Allocator* allocator;
|
2010-09-14 16:49:41 +00:00
|
|
|
object roots;
|
2009-10-10 23:46:43 +00:00
|
|
|
BootImage* bootImage;
|
2011-09-20 22:30:30 +00:00
|
|
|
uintptr_t* heapImage;
|
|
|
|
uint8_t* codeImage;
|
|
|
|
unsigned codeImageSize;
|
2010-12-20 00:47:21 +00:00
|
|
|
SignalHandler segFaultHandler;
|
|
|
|
SignalHandler divideByZeroHandler;
|
2009-04-05 21:42:10 +00:00
|
|
|
FixedAllocator codeAllocator;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
ThunkCollection thunks;
|
2011-10-02 00:11:02 +00:00
|
|
|
ThunkCollection bootThunks;
|
2009-10-10 23:46:43 +00:00
|
|
|
unsigned callTableSize;
|
|
|
|
bool useNativeFeatures;
|
2011-09-20 22:30:30 +00:00
|
|
|
void* thunkTable[dummyIndex + 1];
|
2012-05-02 15:49:31 +00:00
|
|
|
CompilationHandlerList* compilationHandlers;
|
2007-09-24 01:39:03 +00:00
|
|
|
};
|
|
|
|
|
2012-05-02 15:49:31 +00:00
|
|
|
void
|
|
|
|
logCompile(MyThread* t, const void* code, unsigned size, const char* class_,
|
|
|
|
const char* name, const char* spec)
|
|
|
|
{
|
|
|
|
static bool open = false;
|
|
|
|
if (not open) {
|
|
|
|
open = true;
|
|
|
|
const char* path = findProperty(t, "avian.jit.log");
|
|
|
|
if (path) {
|
|
|
|
compileLog = vm::fopen(path, "wb");
|
|
|
|
} else if (DebugCompile) {
|
|
|
|
compileLog = stderr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (compileLog) {
|
|
|
|
fprintf(compileLog, "%p %p %s.%s%s\n",
|
|
|
|
code, static_cast<const uint8_t*>(code) + size,
|
|
|
|
class_, name, spec);
|
|
|
|
}
|
|
|
|
|
|
|
|
MyProcessor* p = static_cast<MyProcessor*>(t->m->processor);
|
|
|
|
for(CompilationHandlerList* h = p->compilationHandlers; h; h = h->next) {
|
|
|
|
h->handler->compiled(code, 0, 0, class_, name, spec);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
void*
|
|
|
|
compileMethod2(MyThread* t, void* ip)
|
|
|
|
{
|
|
|
|
object node = findCallNode(t, ip);
|
|
|
|
object target = callNodeTarget(t, node);
|
|
|
|
|
|
|
|
PROTECT(t, node);
|
|
|
|
PROTECT(t, target);
|
|
|
|
|
|
|
|
t->trace->targetMethod = target;
|
|
|
|
|
|
|
|
THREAD_RESOURCE0(t, static_cast<MyThread*>(t)->trace->targetMethod = 0);
|
|
|
|
|
|
|
|
compile(t, codeAllocator(t), 0, target);
|
|
|
|
|
2011-10-03 14:05:49 +00:00
|
|
|
uint8_t* updateIp = static_cast<uint8_t*>(ip);
|
|
|
|
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
|
|
|
bool updateCaller = updateIp < p->codeImage
|
|
|
|
or updateIp >= p->codeImage + p->codeImageSize;
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
uintptr_t address;
|
2011-10-03 14:05:49 +00:00
|
|
|
if (methodFlags(t, target) & ACC_NATIVE) {
|
|
|
|
address = useLongJump(t, reinterpret_cast<uintptr_t>(ip))
|
|
|
|
or (not updateCaller) ? bootNativeThunk(t) : nativeThunk(t);
|
2011-09-20 22:30:30 +00:00
|
|
|
} else {
|
|
|
|
address = methodAddress(t, target);
|
|
|
|
}
|
|
|
|
|
2011-10-03 14:05:49 +00:00
|
|
|
if (updateCaller) {
|
2011-09-20 22:30:30 +00:00
|
|
|
UnaryOperation op;
|
|
|
|
if (callNodeFlags(t, node) & TraceElement::LongCall) {
|
|
|
|
if (callNodeFlags(t, node) & TraceElement::TailCall) {
|
|
|
|
op = AlignedLongJump;
|
|
|
|
} else {
|
|
|
|
op = AlignedLongCall;
|
|
|
|
}
|
|
|
|
} else if (callNodeFlags(t, node) & TraceElement::TailCall) {
|
|
|
|
op = AlignedJump;
|
|
|
|
} else {
|
|
|
|
op = AlignedCall;
|
|
|
|
}
|
|
|
|
|
|
|
|
updateCall(t, op, updateIp, reinterpret_cast<void*>(address));
|
|
|
|
}
|
|
|
|
|
|
|
|
return reinterpret_cast<void*>(address);
|
|
|
|
}
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
bool
|
2010-06-17 02:29:41 +00:00
|
|
|
isThunk(MyProcessor::ThunkCollection* thunks, void* ip)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
2010-06-17 02:29:41 +00:00
|
|
|
uint8_t* thunkStart = thunks->default_.start;
|
|
|
|
uint8_t* thunkEnd = thunks->table.start
|
|
|
|
+ (thunks->table.length * ThunkCount);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
return (reinterpret_cast<uintptr_t>(ip)
|
|
|
|
>= reinterpret_cast<uintptr_t>(thunkStart)
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
2010-06-17 02:29:41 +00:00
|
|
|
< reinterpret_cast<uintptr_t>(thunkEnd));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isThunk(MyThread* t, void* ip)
|
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
return isThunk(&(p->thunks), ip) or isThunk(&(p->bootThunks), ip);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isThunkUnsafeStack(MyProcessor::Thunk* thunk, void* ip)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(ip)
|
|
|
|
>= reinterpret_cast<uintptr_t>(thunk->start)
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
|
|
|
< reinterpret_cast<uintptr_t>(thunk->start + thunk->frameSavedOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2010-06-17 02:29:41 +00:00
|
|
|
isThunkUnsafeStack(MyProcessor::ThunkCollection* thunks, void* ip)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
2010-12-19 22:23:19 +00:00
|
|
|
const unsigned NamedThunkCount = 5;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
MyProcessor::Thunk table[NamedThunkCount + ThunkCount];
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
table[0] = thunks->default_;
|
|
|
|
table[1] = thunks->defaultVirtual;
|
|
|
|
table[2] = thunks->native;
|
|
|
|
table[3] = thunks->aioob;
|
2010-12-19 22:23:19 +00:00
|
|
|
table[4] = thunks->stackOverflow;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
for (unsigned i = 0; i < ThunkCount; ++i) {
|
|
|
|
new (table + NamedThunkCount + i) MyProcessor::Thunk
|
|
|
|
(thunks->table.start + (i * thunks->table.length),
|
|
|
|
thunks->table.frameSavedOffset,
|
|
|
|
thunks->table.length);
|
|
|
|
}
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
for (unsigned i = 0; i < NamedThunkCount + ThunkCount; ++i) {
|
|
|
|
if (isThunkUnsafeStack(table + i, ip)) {
|
|
|
|
return true;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-07-06 22:13:11 +00:00
|
|
|
bool
|
|
|
|
isVirtualThunk(MyThread* t, void* ip)
|
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
for (unsigned i = 0; i < wordArrayLength(t, root(t, VirtualThunks)); i += 2)
|
|
|
|
{
|
|
|
|
uintptr_t start = wordArrayBody(t, root(t, VirtualThunks), i);
|
|
|
|
uintptr_t end = start + wordArrayBody(t, root(t, VirtualThunks), i + 1);
|
2010-07-06 22:13:11 +00:00
|
|
|
|
|
|
|
if (reinterpret_cast<uintptr_t>(ip) >= start
|
|
|
|
and reinterpret_cast<uintptr_t>(ip) < end)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
bool
|
|
|
|
isThunkUnsafeStack(MyThread* t, void* ip)
|
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
return isThunk(t, ip)
|
|
|
|
and (isThunkUnsafeStack(&(p->thunks), ip)
|
|
|
|
or isThunkUnsafeStack(&(p->bootThunks), ip));
|
2010-06-17 02:29:41 +00:00
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
object
|
|
|
|
findCallNode(MyThread* t, void* address)
|
2008-05-31 22:14:27 +00:00
|
|
|
{
|
2008-12-02 02:38:00 +00:00
|
|
|
if (DebugCallTable) {
|
|
|
|
fprintf(stderr, "find call node %p\n", address);
|
|
|
|
}
|
|
|
|
|
2009-03-03 01:40:06 +00:00
|
|
|
// we must use a version of the call table at least as recent as the
|
|
|
|
// compiled form of the method containing the specified address (see
|
|
|
|
// compile(MyThread*, Allocator*, BootContext*, object)):
|
2009-11-30 15:38:16 +00:00
|
|
|
loadMemoryBarrier();
|
2009-03-03 01:40:06 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
object table = root(t, CallTable);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
intptr_t key = reinterpret_cast<intptr_t>(address);
|
2009-10-18 00:18:03 +00:00
|
|
|
unsigned index = static_cast<uintptr_t>(key) & (arrayLength(t, table) - 1);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
for (object n = arrayBody(t, table, index);
|
|
|
|
n; n = callNodeNext(t, n))
|
|
|
|
{
|
|
|
|
intptr_t k = callNodeAddress(t, n);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
if (k == key) {
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
object
|
|
|
|
resizeTable(MyThread* t, object oldTable, unsigned newLength)
|
|
|
|
{
|
|
|
|
PROTECT(t, oldTable);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
object oldNode = 0;
|
|
|
|
PROTECT(t, oldNode);
|
2008-08-16 18:46:14 +00:00
|
|
|
|
2009-03-04 03:05:48 +00:00
|
|
|
object newTable = makeArray(t, newLength);
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, newTable);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
for (unsigned i = 0; i < arrayLength(t, oldTable); ++i) {
|
|
|
|
for (oldNode = arrayBody(t, oldTable, i);
|
|
|
|
oldNode;
|
|
|
|
oldNode = callNodeNext(t, oldNode))
|
|
|
|
{
|
|
|
|
intptr_t k = callNodeAddress(t, oldNode);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned index = k & (newLength - 1);
|
|
|
|
|
|
|
|
object newNode = makeCallNode
|
|
|
|
(t, callNodeAddress(t, oldNode),
|
|
|
|
callNodeTarget(t, oldNode),
|
2009-03-31 20:15:08 +00:00
|
|
|
callNodeFlags(t, oldNode),
|
2008-12-02 02:38:00 +00:00
|
|
|
arrayBody(t, newTable, index));
|
|
|
|
|
|
|
|
set(t, newTable, ArrayBody + (index * BytesPerWord), newNode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return newTable;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
insertCallNode(MyThread* t, object table, unsigned* size, object node)
|
|
|
|
{
|
|
|
|
if (DebugCallTable) {
|
|
|
|
fprintf(stderr, "insert call node %p\n",
|
|
|
|
reinterpret_cast<void*>(callNodeAddress(t, node)));
|
|
|
|
}
|
|
|
|
|
|
|
|
PROTECT(t, table);
|
|
|
|
PROTECT(t, node);
|
|
|
|
|
|
|
|
++ (*size);
|
|
|
|
|
|
|
|
if (*size >= arrayLength(t, table) * 2) {
|
|
|
|
table = resizeTable(t, table, arrayLength(t, table) * 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
intptr_t key = callNodeAddress(t, node);
|
|
|
|
unsigned index = static_cast<uintptr_t>(key) & (arrayLength(t, table) - 1);
|
|
|
|
|
|
|
|
set(t, node, CallNodeNext, arrayBody(t, table, index));
|
|
|
|
set(t, table, ArrayBody + (index * BytesPerWord), node);
|
|
|
|
|
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
insertCallNode(MyThread* t, object node)
|
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, CallTable, insertCallNode
|
|
|
|
(t, root(t, CallTable), &(processor(t)->callTableSize), node));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
object
|
2011-09-01 03:18:00 +00:00
|
|
|
makeClassMap(Thread* t, unsigned* table, unsigned count,
|
|
|
|
uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2009-03-04 03:05:48 +00:00
|
|
|
object array = makeArray(t, nextPowerOfTwo(count));
|
2008-12-02 02:38:00 +00:00
|
|
|
object map = makeHashMap(t, 0, array);
|
|
|
|
PROTECT(t, map);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
|
|
|
object c = bootObject(heap, table[i]);
|
|
|
|
hashMapInsert(t, map, className(t, c), c, byteArrayHash);
|
|
|
|
}
|
|
|
|
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
2010-09-14 16:49:41 +00:00
|
|
|
makeStaticTableArray(Thread* t, unsigned* bootTable, unsigned bootCount,
|
|
|
|
unsigned* appTable, unsigned appCount, uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
object array = makeArray(t, bootCount + appCount);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
for (unsigned i = 0; i < bootCount; ++i) {
|
2008-12-02 02:38:00 +00:00
|
|
|
set(t, array, ArrayBody + (i * BytesPerWord),
|
2010-09-14 16:49:41 +00:00
|
|
|
classStaticTable(t, bootObject(heap, bootTable[i])));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < appCount; ++i) {
|
|
|
|
set(t, array, ArrayBody + ((bootCount + i) * BytesPerWord),
|
|
|
|
classStaticTable(t, bootObject(heap, appTable[i])));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
makeStringMap(Thread* t, unsigned* table, unsigned count, uintptr_t* heap)
|
|
|
|
{
|
2009-03-04 03:05:48 +00:00
|
|
|
object array = makeArray(t, nextPowerOfTwo(count));
|
2008-12-02 02:38:00 +00:00
|
|
|
object map = makeWeakHashMap(t, 0, array);
|
|
|
|
PROTECT(t, map);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
|
|
|
object s = bootObject(heap, table[i]);
|
|
|
|
hashMapInsert(t, map, s, 0, stringHash);
|
|
|
|
}
|
|
|
|
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
object
|
|
|
|
makeCallTable(MyThread* t, uintptr_t* heap, unsigned* calls, unsigned count,
|
|
|
|
uintptr_t base)
|
|
|
|
{
|
2009-03-04 03:05:48 +00:00
|
|
|
object table = makeArray(t, nextPowerOfTwo(count));
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, table);
|
|
|
|
|
|
|
|
unsigned size = 0;
|
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
|
|
|
unsigned address = calls[i * 2];
|
|
|
|
unsigned target = calls[(i * 2) + 1];
|
|
|
|
|
|
|
|
object node = makeCallNode
|
|
|
|
(t, base + address, bootObject(heap, target & BootMask),
|
|
|
|
target >> BootShift, 0);
|
|
|
|
|
|
|
|
table = insertCallNode(t, table, &size, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-12-03 02:39:56 +00:00
|
|
|
fixupHeap(MyThread* t UNUSED, uintptr_t* map, unsigned size, uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
|
|
|
for (unsigned word = 0; word < size; ++word) {
|
|
|
|
uintptr_t w = map[word];
|
|
|
|
if (w) {
|
|
|
|
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
|
|
|
|
if (w & (static_cast<uintptr_t>(1) << bit)) {
|
|
|
|
unsigned index = indexOf(word, bit);
|
2011-09-01 03:18:00 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t* p = heap + index;
|
|
|
|
assert(t, *p);
|
|
|
|
|
|
|
|
uintptr_t number = *p & BootMask;
|
|
|
|
uintptr_t mark = *p >> BootShift;
|
|
|
|
|
|
|
|
if (number) {
|
|
|
|
*p = reinterpret_cast<uintptr_t>(heap + (number - 1)) | mark;
|
2011-09-17 02:53:08 +00:00
|
|
|
// fprintf(stderr, "fixup %d: %d 0x%x\n", index, static_cast<unsigned>(number), static_cast<unsigned>(*p));
|
2008-12-02 02:38:00 +00:00
|
|
|
} else {
|
|
|
|
*p = mark;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
void
|
|
|
|
resetClassRuntimeState(Thread* t, object c, uintptr_t* heap, unsigned heapSize)
|
|
|
|
{
|
|
|
|
classRuntimeDataIndex(t, c) = 0;
|
|
|
|
|
|
|
|
if (classArrayElementSize(t, c) == 0) {
|
|
|
|
object staticTable = classStaticTable(t, c);
|
|
|
|
if (staticTable) {
|
|
|
|
for (unsigned i = 0; i < singletonCount(t, staticTable); ++i) {
|
|
|
|
if (singletonIsObject(t, staticTable, i)
|
|
|
|
and (reinterpret_cast<uintptr_t*>
|
|
|
|
(singletonObject(t, staticTable, i)) < heap or
|
|
|
|
reinterpret_cast<uintptr_t*>
|
|
|
|
(singletonObject(t, staticTable, i)) > heap + heapSize))
|
|
|
|
{
|
|
|
|
singletonObject(t, staticTable, i) = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (classMethodTable(t, c)) {
|
|
|
|
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
|
|
|
|
object m = arrayBody(t, classMethodTable(t, c), i);
|
|
|
|
|
|
|
|
methodNativeID(t, m) = 0;
|
|
|
|
methodRuntimeDataIndex(t, m) = 0;
|
|
|
|
|
|
|
|
if (methodVmFlags(t, m) & ClassInitFlag) {
|
|
|
|
classVmFlags(t, c) |= NeedInitFlag;
|
|
|
|
classVmFlags(t, c) &= ~InitErrorFlag;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t->m->processor->initVtable(t, c);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
resetRuntimeState(Thread* t, object map, uintptr_t* heap, unsigned heapSize)
|
|
|
|
{
|
|
|
|
for (HashMapIterator it(t, map); it.hasMore();) {
|
|
|
|
resetClassRuntimeState(t, tripleSecond(t, it.next()), heap, heapSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
void
|
2011-09-01 17:36:00 +00:00
|
|
|
fixupMethods(Thread* t, object map, BootImage* image UNUSED, uint8_t* code)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
for (HashMapIterator it(t, map); it.hasMore();) {
|
2008-12-02 02:38:00 +00:00
|
|
|
object c = tripleSecond(t, it.next());
|
|
|
|
|
|
|
|
if (classMethodTable(t, c)) {
|
|
|
|
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
|
|
|
|
object method = arrayBody(t, classMethodTable(t, c), i);
|
2010-11-26 19:41:31 +00:00
|
|
|
if (methodCode(t, method)) {
|
2011-09-01 03:18:00 +00:00
|
|
|
assert(t, methodCompiled(t, method)
|
|
|
|
<= static_cast<int32_t>(image->codeSize));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
codeCompiled(t, methodCode(t, method))
|
2011-09-01 03:18:00 +00:00
|
|
|
= methodCompiled(t, method) + reinterpret_cast<uintptr_t>(code);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-11-26 19:41:31 +00:00
|
|
|
if (DebugCompile) {
|
2008-12-02 02:38:00 +00:00
|
|
|
logCompile
|
|
|
|
(static_cast<MyThread*>(t),
|
|
|
|
reinterpret_cast<uint8_t*>(methodCompiled(t, method)),
|
|
|
|
reinterpret_cast<uintptr_t*>
|
|
|
|
(methodCompiled(t, method))[-1],
|
|
|
|
reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, className(t, methodClass(t, method)), 0)),
|
|
|
|
reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, methodName(t, method), 0)),
|
|
|
|
reinterpret_cast<char*>
|
|
|
|
(&byteArrayBody(t, methodSpec(t, method), 0)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t->m->processor->initVtable(t, c);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
MyProcessor::Thunk
|
|
|
|
thunkToThunk(const BootImage::Thunk& thunk, uint8_t* base)
|
|
|
|
{
|
|
|
|
return MyProcessor::Thunk
|
|
|
|
(base + thunk.start, thunk.frameSavedOffset, thunk.length);
|
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
void
|
2011-09-20 22:30:30 +00:00
|
|
|
findThunks(MyThread* t, BootImage* image, uint8_t* code)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
p->bootThunks.default_ = thunkToThunk(image->thunks.default_, code);
|
|
|
|
p->bootThunks.defaultVirtual
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
= thunkToThunk(image->thunks.defaultVirtual, code);
|
2011-10-02 00:11:02 +00:00
|
|
|
p->bootThunks.native = thunkToThunk(image->thunks.native, code);
|
|
|
|
p->bootThunks.aioob = thunkToThunk(image->thunks.aioob, code);
|
|
|
|
p->bootThunks.stackOverflow
|
2010-12-19 22:23:19 +00:00
|
|
|
= thunkToThunk(image->thunks.stackOverflow, code);
|
2011-10-02 00:11:02 +00:00
|
|
|
p->bootThunks.table = thunkToThunk(image->thunks.table, code);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
void
|
2011-09-01 03:18:00 +00:00
|
|
|
fixupVirtualThunks(MyThread* t, uint8_t* code)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2011-01-28 04:06:01 +00:00
|
|
|
for (unsigned i = 0; i < wordArrayLength(t, root(t, VirtualThunks)); i += 2)
|
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
if (wordArrayBody(t, root(t, VirtualThunks), i)) {
|
|
|
|
wordArrayBody(t, root(t, VirtualThunks), i)
|
2011-09-01 03:18:00 +00:00
|
|
|
= wordArrayBody(t, root(t, VirtualThunks), i)
|
2009-04-05 21:42:10 +00:00
|
|
|
+ reinterpret_cast<uintptr_t>(code);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
void
|
2011-09-20 22:30:30 +00:00
|
|
|
boot(MyThread* t, BootImage* image, uint8_t* code)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
|
|
|
assert(t, image->magic == BootImage::Magic);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
unsigned* bootClassTable = reinterpret_cast<unsigned*>(image + 1);
|
|
|
|
unsigned* appClassTable = bootClassTable + image->bootClassCount;
|
|
|
|
unsigned* stringTable = appClassTable + image->appClassCount;
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned* callTable = stringTable + image->stringCount;
|
|
|
|
|
|
|
|
uintptr_t* heapMap = reinterpret_cast<uintptr_t*>
|
2009-10-18 00:35:19 +00:00
|
|
|
(padWord(reinterpret_cast<uintptr_t>(callTable + (image->callCount * 2))));
|
2011-09-20 22:30:30 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned heapMapSizeInWords = ceiling
|
|
|
|
(heapMapSize(image->heapSize), BytesPerWord);
|
|
|
|
uintptr_t* heap = heapMap + heapMapSizeInWords;
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
MyProcessor* p = static_cast<MyProcessor*>(t->m->processor);
|
|
|
|
|
|
|
|
t->heapImage = p->heapImage = heap;
|
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
// fprintf(stderr, "heap from %p to %p\n",
|
|
|
|
// heap, heap + ceiling(image->heapSize, BytesPerWord));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
t->codeImage = p->codeImage = code;
|
|
|
|
p->codeImageSize = image->codeSize;
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
// fprintf(stderr, "code from %p to %p\n",
|
|
|
|
// code, code + image->codeSize);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
static bool fixed = false;
|
|
|
|
|
|
|
|
if (not fixed) {
|
|
|
|
fixupHeap(t, heapMap, heapMapSizeInWords, heap);
|
|
|
|
}
|
2008-12-02 02:38:00 +00:00
|
|
|
|
|
|
|
t->m->heap->setImmortalHeap(heap, image->heapSize / BytesPerWord);
|
|
|
|
|
2010-11-26 19:41:31 +00:00
|
|
|
t->m->types = bootObject(heap, image->types);
|
|
|
|
|
|
|
|
t->m->roots = makeArray(t, Machine::RootCount);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, Machine::BootLoader, bootObject(heap, image->bootLoader));
|
|
|
|
setRoot(t, Machine::AppLoader, bootObject(heap, image->appLoader));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-11-26 19:41:31 +00:00
|
|
|
p->roots = makeArray(t, RootCount);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, MethodTree, bootObject(heap, image->methodTree));
|
|
|
|
setRoot(t, MethodTreeSentinal, bootObject(heap, image->methodTreeSentinal));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, VirtualThunks, bootObject(heap, image->virtualThunks));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-11-26 19:41:31 +00:00
|
|
|
{ object map = makeClassMap(t, bootClassTable, image->bootClassCount, heap);
|
|
|
|
set(t, root(t, Machine::BootLoader), ClassLoaderMap, map);
|
|
|
|
}
|
|
|
|
|
|
|
|
systemClassLoaderFinder(t, root(t, Machine::BootLoader)) = t->m->bootFinder;
|
|
|
|
|
|
|
|
{ object map = makeClassMap(t, appClassTable, image->appClassCount, heap);
|
|
|
|
set(t, root(t, Machine::AppLoader), ClassLoaderMap, map);
|
|
|
|
}
|
2009-08-10 13:56:16 +00:00
|
|
|
|
2010-11-26 19:41:31 +00:00
|
|
|
systemClassLoaderFinder(t, root(t, Machine::AppLoader)) = t->m->appFinder;
|
2010-09-14 16:49:41 +00:00
|
|
|
|
|
|
|
setRoot(t, Machine::StringMap, makeStringMap
|
|
|
|
(t, stringTable, image->stringCount, heap));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
|
|
|
p->callTableSize = image->callCount;
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, CallTable, makeCallTable
|
|
|
|
(t, heap, callTable, image->callCount,
|
|
|
|
reinterpret_cast<uintptr_t>(code)));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, StaticTableArray, makeStaticTableArray
|
|
|
|
(t, bootClassTable, image->bootClassCount,
|
|
|
|
appClassTable, image->appClassCount, heap));
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
findThunks(t, image, code);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
if (fixed) {
|
|
|
|
resetRuntimeState
|
|
|
|
(t, classLoaderMap(t, root(t, Machine::BootLoader)), heap,
|
|
|
|
image->heapSize);
|
|
|
|
|
|
|
|
resetRuntimeState
|
|
|
|
(t, classLoaderMap(t, root(t, Machine::AppLoader)), heap,
|
|
|
|
image->heapSize);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < arrayLength(t, t->m->types); ++i) {
|
|
|
|
resetClassRuntimeState
|
|
|
|
(t, type(t, static_cast<Machine::Type>(i)), heap, image->heapSize);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fixupVirtualThunks(t, code);
|
|
|
|
|
|
|
|
fixupMethods
|
|
|
|
(t, classLoaderMap(t, root(t, Machine::BootLoader)), image, code);
|
|
|
|
|
|
|
|
fixupMethods
|
|
|
|
(t, classLoaderMap(t, root(t, Machine::AppLoader)), image, code);
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
fixed = true;
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, Machine::BootstrapClassMap, makeHashMap(t, 0, 0));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
intptr_t
|
|
|
|
getThunk(MyThread* t, Thunk thunk)
|
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
|
|
|
return reinterpret_cast<intptr_t>
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
(p->thunks.table.start + (thunk * p->thunks.table.length));
|
|
|
|
}
|
|
|
|
|
|
|
|
BootImage::Thunk
|
|
|
|
thunkToThunk(const MyProcessor::Thunk& thunk, uint8_t* base)
|
|
|
|
{
|
|
|
|
return BootImage::Thunk
|
|
|
|
(thunk.start - base, thunk.frameSavedOffset, thunk.length);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
|
|
|
|
{
|
|
|
|
Assembler* a = c->assembler;
|
|
|
|
|
|
|
|
if (processor(t)->bootImage) {
|
|
|
|
Assembler::Memory table(t->arch->thread(), TargetThreadThunkTable);
|
2011-09-24 04:21:54 +00:00
|
|
|
Assembler::Register scratch(t->arch->scratch());
|
2011-09-20 22:30:30 +00:00
|
|
|
a->apply(Move, TargetBytesPerWord, MemoryOperand, &table,
|
2011-09-21 01:50:38 +00:00
|
|
|
TargetBytesPerWord, RegisterOperand, &scratch);
|
|
|
|
Assembler::Memory proc(scratch.low, index * TargetBytesPerWord);
|
|
|
|
a->apply(Move, TargetBytesPerWord, MemoryOperand, &proc,
|
|
|
|
TargetBytesPerWord, RegisterOperand, &scratch);
|
|
|
|
a->apply
|
|
|
|
(call ? Call : Jump, TargetBytesPerWord, RegisterOperand, &scratch);
|
2011-09-20 22:30:30 +00:00
|
|
|
} else {
|
|
|
|
Assembler::Constant proc
|
|
|
|
(new (c->zone.allocate(sizeof(ResolvedPromise)))
|
|
|
|
ResolvedPromise(reinterpret_cast<intptr_t>(t->thunkTable[index])));
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
a->apply
|
|
|
|
(call ? LongCall : LongJump, TargetBytesPerWord, ConstantOperand, &proc);
|
|
|
|
}
|
|
|
|
}
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
void
|
2011-09-24 05:25:52 +00:00
|
|
|
compileThunks(MyThread* t, FixedAllocator* allocator)
|
2011-09-20 22:30:30 +00:00
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
{ Context context(t);
|
|
|
|
Assembler* a = context.assembler;
|
2009-02-09 23:22:01 +00:00
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
a->saveFrame(TargetThreadStack, TargetThreadIp);
|
2009-02-09 23:22:01 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.default_.frameSavedOffset = a->length();
|
|
|
|
|
2009-02-09 23:22:01 +00:00
|
|
|
Assembler::Register thread(t->arch->thread());
|
2011-08-30 01:00:17 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, RegisterOperand, &thread);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, compileMethodIndex);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
a->popFrame(t->arch->alignFrameSize(1));
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
Assembler::Register result(t->arch->returnLow());
|
2011-08-30 01:00:17 +00:00
|
|
|
a->apply(Jump, TargetBytesPerWord, RegisterOperand, &result);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
p->thunks.default_.length = a->endBlock(false)->resolve(0, 0);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.default_.start = finish
|
|
|
|
(t, allocator, a, "default", p->thunks.default_.length);
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
{ Context context(t);
|
|
|
|
Assembler* a = context.assembler;
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
Assembler::Register class_(t->arch->virtualCallTarget());
|
|
|
|
Assembler::Memory virtualCallTargetSrc
|
|
|
|
(t->arch->stack(),
|
2009-05-12 18:16:55 +00:00
|
|
|
(t->arch->frameFooterSize() + t->arch->frameReturnAddressSize())
|
2011-08-30 01:00:17 +00:00
|
|
|
* TargetBytesPerWord);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2011-08-30 01:00:17 +00:00
|
|
|
a->apply(Move, TargetBytesPerWord, MemoryOperand, &virtualCallTargetSrc,
|
|
|
|
TargetBytesPerWord, RegisterOperand, &class_);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
Assembler::Memory virtualCallTargetDst
|
2011-09-01 03:18:00 +00:00
|
|
|
(t->arch->thread(), TargetThreadVirtualCallTarget);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2011-08-30 01:00:17 +00:00
|
|
|
a->apply(Move, TargetBytesPerWord, RegisterOperand, &class_,
|
|
|
|
TargetBytesPerWord, MemoryOperand, &virtualCallTargetDst);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2009-04-19 22:36:11 +00:00
|
|
|
Assembler::Register index(t->arch->virtualCallIndex());
|
2009-04-05 21:42:10 +00:00
|
|
|
Assembler::Memory virtualCallIndex
|
2011-09-01 03:18:00 +00:00
|
|
|
(t->arch->thread(), TargetThreadVirtualCallIndex);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2011-08-30 01:00:17 +00:00
|
|
|
a->apply(Move, TargetBytesPerWord, RegisterOperand, &index,
|
|
|
|
TargetBytesPerWord, MemoryOperand, &virtualCallIndex);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
a->saveFrame(TargetThreadStack, TargetThreadIp);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.defaultVirtual.frameSavedOffset = a->length();
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
Assembler::Register thread(t->arch->thread());
|
2011-08-30 01:00:17 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, RegisterOperand, &thread);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, compileVirtualMethodIndex);
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
a->popFrame(t->arch->alignFrameSize(1));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
|
|
|
Assembler::Register result(t->arch->returnLow());
|
2011-08-30 01:00:17 +00:00
|
|
|
a->apply(Jump, TargetBytesPerWord, RegisterOperand, &result);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
p->thunks.defaultVirtual.length = a->endBlock(false)->resolve(0, 0);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.defaultVirtual.start = finish
|
|
|
|
(t, allocator, a, "defaultVirtual", p->thunks.defaultVirtual.length);
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
{ Context context(t);
|
|
|
|
Assembler* a = context.assembler;
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
a->saveFrame(TargetThreadStack, TargetThreadIp);
|
2008-08-16 18:46:14 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.native.frameSavedOffset = a->length();
|
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
Assembler::Register thread(t->arch->thread());
|
2011-08-30 01:00:17 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, RegisterOperand, &thread);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, invokeNativeIndex);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
a->popFrameAndUpdateStackAndReturn
|
2011-09-01 03:18:00 +00:00
|
|
|
(t->arch->alignFrameSize(1), TargetThreadStack);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
p->thunks.native.length = a->endBlock(false)->resolve(0, 0);
|
2011-09-20 22:30:30 +00:00
|
|
|
|
|
|
|
p->thunks.native.start = finish
|
|
|
|
(t, allocator, a, "native", p->thunks.native.length);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
{ Context context(t);
|
|
|
|
Assembler* a = context.assembler;
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
a->saveFrame(TargetThreadStack, TargetThreadIp);
|
2008-08-16 18:46:14 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.aioob.frameSavedOffset = a->length();
|
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
Assembler::Register thread(t->arch->thread());
|
2011-08-30 01:00:17 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, RegisterOperand, &thread);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, throwArrayIndexOutOfBoundsIndex);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
p->thunks.aioob.length = a->endBlock(false)->resolve(0, 0);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.aioob.start = finish
|
|
|
|
(t, allocator, a, "aioob", p->thunks.aioob.length);
|
|
|
|
}
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
{ Context context(t);
|
|
|
|
Assembler* a = context.assembler;
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-09-01 03:18:00 +00:00
|
|
|
a->saveFrame(TargetThreadStack, TargetThreadIp);
|
2010-12-19 22:23:19 +00:00
|
|
|
|
|
|
|
p->thunks.stackOverflow.frameSavedOffset = a->length();
|
|
|
|
|
|
|
|
Assembler::Register thread(t->arch->thread());
|
2011-08-30 01:00:17 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, RegisterOperand, &thread);
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, throwStackOverflowIndex);
|
2010-12-19 22:23:19 +00:00
|
|
|
|
|
|
|
p->thunks.stackOverflow.length = a->endBlock(false)->resolve(0, 0);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.stackOverflow.start = finish
|
|
|
|
(t, allocator, a, "stackOverflow", p->thunks.stackOverflow.length);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
{ { Context context(t);
|
|
|
|
Assembler* a = context.assembler;
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
a->saveFrame(TargetThreadStack, TargetThreadIp);
|
2008-11-28 04:44:04 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.table.frameSavedOffset = a->length();
|
2008-11-28 04:44:04 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, dummyIndex, false);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.table.length = a->endBlock(false)->resolve(0, 0);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.table.start = static_cast<uint8_t*>
|
2011-09-24 05:25:52 +00:00
|
|
|
(allocator->allocate
|
|
|
|
(p->thunks.table.length * ThunkCount, TargetBytesPerWord));
|
2008-11-28 04:44:04 +00:00
|
|
|
}
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
uint8_t* start = p->thunks.table.start;
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
#define THUNK(s) { \
|
|
|
|
Context context(t); \
|
|
|
|
Assembler* a = context.assembler; \
|
|
|
|
\
|
|
|
|
a->saveFrame(TargetThreadStack, TargetThreadIp); \
|
|
|
|
\
|
|
|
|
p->thunks.table.frameSavedOffset = a->length(); \
|
|
|
|
\
|
|
|
|
compileCall(t, &context, s##Index, false); \
|
|
|
|
\
|
|
|
|
expect(t, a->endBlock(false)->resolve(0, 0) \
|
|
|
|
<= p->thunks.table.length); \
|
|
|
|
\
|
|
|
|
a->setDestination(start); \
|
|
|
|
a->write(); \
|
|
|
|
\
|
|
|
|
logCompile(t, start, p->thunks.table.length, 0, #s, 0); \
|
|
|
|
\
|
|
|
|
start += p->thunks.table.length; \
|
2010-12-19 22:23:19 +00:00
|
|
|
}
|
2011-09-20 22:30:30 +00:00
|
|
|
#include "thunks.cpp"
|
|
|
|
#undef THUNK
|
2010-12-19 22:23:19 +00:00
|
|
|
}
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
BootImage* image = p->bootImage;
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-11-28 22:02:45 +00:00
|
|
|
if (image) {
|
2011-09-20 22:30:30 +00:00
|
|
|
uint8_t* imageBase = p->codeAllocator.base;
|
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
image->thunks.default_ = thunkToThunk(p->thunks.default_, imageBase);
|
2011-09-20 22:30:30 +00:00
|
|
|
image->thunks.defaultVirtual = thunkToThunk
|
|
|
|
(p->thunks.defaultVirtual, imageBase);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
image->thunks.native = thunkToThunk(p->thunks.native, imageBase);
|
|
|
|
image->thunks.aioob = thunkToThunk(p->thunks.aioob, imageBase);
|
2011-09-20 22:30:30 +00:00
|
|
|
image->thunks.stackOverflow = thunkToThunk
|
|
|
|
(p->thunks.stackOverflow, imageBase);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
image->thunks.table = thunkToThunk(p->thunks.table, imageBase);
|
2008-11-28 22:02:45 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyProcessor*
|
|
|
|
processor(MyThread* t)
|
2007-10-04 03:19:39 +00:00
|
|
|
{
|
2008-12-02 02:38:00 +00:00
|
|
|
return static_cast<MyProcessor*>(t->m->processor);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-12-30 22:24:48 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uintptr_t
|
2008-05-31 22:14:27 +00:00
|
|
|
defaultThunk(MyThread* t)
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.default_.start);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
uintptr_t
|
|
|
|
bootDefaultThunk(MyThread* t)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.default_.start);
|
|
|
|
}
|
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
uintptr_t
|
|
|
|
defaultVirtualThunk(MyThread* t)
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>
|
|
|
|
(processor(t)->thunks.defaultVirtual.start);
|
2009-04-07 00:34:12 +00:00
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uintptr_t
|
2008-05-31 22:14:27 +00:00
|
|
|
nativeThunk(MyThread* t)
|
2008-04-09 19:08:13 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.native.start);
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
uintptr_t
|
|
|
|
bootNativeThunk(MyThread* t)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.native.start);
|
|
|
|
}
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
uintptr_t
|
2008-05-31 22:14:27 +00:00
|
|
|
aioobThunk(MyThread* t)
|
2008-04-09 19:08:13 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.aioob.start);
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
|
|
|
|
2010-12-19 22:23:19 +00:00
|
|
|
uintptr_t
|
|
|
|
stackOverflowThunk(MyThread* t)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.stackOverflow.start);
|
|
|
|
}
|
|
|
|
|
2009-10-18 00:18:03 +00:00
|
|
|
bool
|
|
|
|
unresolved(MyThread* t, uintptr_t methodAddress)
|
|
|
|
{
|
2011-10-02 00:11:02 +00:00
|
|
|
return methodAddress == defaultThunk(t)
|
|
|
|
or methodAddress == bootDefaultThunk(t);
|
2009-10-18 00:18:03 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
uintptr_t
|
2010-07-06 22:13:11 +00:00
|
|
|
compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2009-04-07 00:34:12 +00:00
|
|
|
Context context(t);
|
2009-04-05 21:42:10 +00:00
|
|
|
Assembler* a = context.assembler;
|
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
ResolvedPromise indexPromise(index);
|
|
|
|
Assembler::Constant indexConstant(&indexPromise);
|
2009-04-19 22:36:11 +00:00
|
|
|
Assembler::Register indexRegister(t->arch->virtualCallIndex());
|
2011-08-30 01:00:17 +00:00
|
|
|
a->apply(Move, TargetBytesPerWord, ConstantOperand, &indexConstant,
|
|
|
|
TargetBytesPerWord, RegisterOperand, &indexRegister);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2009-04-07 00:34:12 +00:00
|
|
|
ResolvedPromise defaultVirtualThunkPromise(defaultVirtualThunk(t));
|
|
|
|
Assembler::Constant thunk(&defaultVirtualThunkPromise);
|
2011-08-30 01:00:17 +00:00
|
|
|
a->apply(Jump, TargetBytesPerWord, ConstantOperand, &thunk);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
*size = a->endBlock(false)->resolve(0, 0);
|
2010-07-06 22:13:11 +00:00
|
|
|
|
2011-09-24 05:25:52 +00:00
|
|
|
uint8_t* start = static_cast<uint8_t*>
|
|
|
|
(codeAllocator(t)->allocate(*size, TargetBytesPerWord));
|
2009-04-07 00:34:12 +00:00
|
|
|
|
2011-02-28 06:03:13 +00:00
|
|
|
a->setDestination(start);
|
|
|
|
a->write();
|
2009-04-08 00:55:43 +00:00
|
|
|
|
2010-07-06 22:13:11 +00:00
|
|
|
logCompile(t, start, *size, 0, "virtualThunk", 0);
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2009-04-08 00:55:43 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(start);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t
|
|
|
|
virtualThunk(MyThread* t, unsigned index)
|
|
|
|
{
|
2011-04-10 20:46:53 +00:00
|
|
|
ACQUIRE(t, t->m->classLock);
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, VirtualThunks) == 0
|
|
|
|
or wordArrayLength(t, root(t, VirtualThunks)) <= index * 2)
|
2010-07-06 22:13:11 +00:00
|
|
|
{
|
|
|
|
object newArray = makeWordArray(t, nextPowerOfTwo((index + 1) * 2));
|
2010-09-14 16:49:41 +00:00
|
|
|
if (root(t, VirtualThunks)) {
|
2009-04-05 21:42:10 +00:00
|
|
|
memcpy(&wordArrayBody(t, newArray, 0),
|
2010-09-14 16:49:41 +00:00
|
|
|
&wordArrayBody(t, root(t, VirtualThunks), 0),
|
|
|
|
wordArrayLength(t, root(t, VirtualThunks)) * BytesPerWord);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
2010-09-14 16:49:41 +00:00
|
|
|
setRoot(t, VirtualThunks, newArray);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
if (wordArrayBody(t, root(t, VirtualThunks), index * 2) == 0) {
|
2011-04-10 20:46:53 +00:00
|
|
|
unsigned size;
|
|
|
|
uintptr_t thunk = compileVirtualThunk(t, index, &size);
|
|
|
|
wordArrayBody(t, root(t, VirtualThunks), index * 2) = thunk;
|
|
|
|
wordArrayBody(t, root(t, VirtualThunks), (index * 2) + 1) = size;
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
return wordArrayBody(t, root(t, VirtualThunks), index * 2);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2007-10-16 17:21:26 +00:00
|
|
|
void
|
2011-02-28 06:03:13 +00:00
|
|
|
compile(MyThread* t, FixedAllocator* allocator, BootContext* bootContext,
|
2008-11-23 23:58:01 +00:00
|
|
|
object method)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-11-30 04:58:09 +00:00
|
|
|
PROTECT(t, method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-11-30 04:58:09 +00:00
|
|
|
if (bootContext == 0) {
|
|
|
|
initClass(t, methodClass(t, method));
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
if (methodAddress(t, method) != defaultThunk(t)) {
|
|
|
|
return;
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
assert(t, (methodFlags(t, method) & ACC_NATIVE) == 0);
|
2008-04-10 23:48:28 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
// We must avoid acquiring any locks until after the first pass of
|
|
|
|
// compilation, since this pass may trigger classloading operations
|
|
|
|
// involving application classloaders and thus the potential for
|
|
|
|
// deadlock. To make this safe, we use a private clone of the
|
|
|
|
// method so that we won't be confused if another thread updates the
|
|
|
|
// original while we're working.
|
2007-12-11 00:48:09 +00:00
|
|
|
|
2010-11-04 17:02:09 +00:00
|
|
|
object clone = methodClone(t, method);
|
2007-12-28 16:50:26 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
loadMemoryBarrier();
|
|
|
|
|
|
|
|
if (methodAddress(t, method) != defaultThunk(t)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
PROTECT(t, clone);
|
|
|
|
|
|
|
|
Context context(t, bootContext, clone);
|
|
|
|
compile(t, &context);
|
|
|
|
|
2010-09-22 19:58:46 +00:00
|
|
|
{ object ehTable = codeExceptionHandlerTable(t, methodCode(t, clone));
|
|
|
|
|
|
|
|
if (ehTable) {
|
|
|
|
PROTECT(t, ehTable);
|
|
|
|
|
|
|
|
// resolve all exception handler catch types before we acquire
|
|
|
|
// the class lock:
|
|
|
|
for (unsigned i = 0; i < exceptionHandlerTableLength(t, ehTable); ++i) {
|
2011-08-30 01:00:17 +00:00
|
|
|
uint64_t handler = exceptionHandlerTableBody(t, ehTable, i);
|
2010-09-22 19:58:46 +00:00
|
|
|
if (exceptionHandlerCatchType(handler)) {
|
|
|
|
resolveClassInPool
|
|
|
|
(t, clone, exceptionHandlerCatchType(handler) - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
ACQUIRE(t, t->m->classLock);
|
|
|
|
|
|
|
|
if (methodAddress(t, method) != defaultThunk(t)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
finish(t, allocator, &context);
|
|
|
|
|
|
|
|
if (DebugMethodTree) {
|
|
|
|
fprintf(stderr, "insert method at %p\n",
|
|
|
|
reinterpret_cast<void*>(methodCompiled(t, clone)));
|
2007-12-11 00:48:09 +00:00
|
|
|
}
|
2010-09-17 01:43:27 +00:00
|
|
|
|
|
|
|
// We can't update the MethodCode field on the original method
|
|
|
|
// before it is placed into the method tree, since another thread
|
|
|
|
// might call the method, from which stack unwinding would fail
|
|
|
|
// (since there is not yet an entry in the method tree). However,
|
|
|
|
// we can't insert the original method into the tree before updating
|
|
|
|
// the MethodCode field on it since we rely on that field to
|
|
|
|
// determine its position in the tree. Therefore, we insert the
|
|
|
|
// clone in its place. Later, we'll replace the clone with the
|
|
|
|
// original to save memory.
|
|
|
|
|
|
|
|
setRoot
|
|
|
|
(t, MethodTree, treeInsert
|
|
|
|
(t, &(context.zone), root(t, MethodTree),
|
|
|
|
methodCompiled(t, clone), clone, root(t, MethodTreeSentinal),
|
|
|
|
compareIpToMethodBounds));
|
|
|
|
|
|
|
|
storeStoreMemoryBarrier();
|
|
|
|
|
|
|
|
set(t, method, MethodCode, methodCode(t, clone));
|
|
|
|
|
|
|
|
if (methodVirtual(t, method)) {
|
|
|
|
classVtable(t, methodClass(t, method), methodOffset(t, method))
|
|
|
|
= reinterpret_cast<void*>(methodCompiled(t, clone));
|
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
// we've compiled the method and inserted it into the tree without
|
|
|
|
// error, so we ensure that the executable area not be deallocated
|
|
|
|
// when we dispose of the context:
|
|
|
|
context.executableAllocator = 0;
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
treeUpdate(t, root(t, MethodTree), methodCompiled(t, clone),
|
|
|
|
method, root(t, MethodTreeSentinal), compareIpToMethodBounds);
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-11 00:48:09 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
object&
|
2010-09-14 16:49:41 +00:00
|
|
|
root(Thread* t, Root root)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
return arrayBody(t, processor(static_cast<MyThread*>(t))->roots, root);
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
void
|
|
|
|
setRoot(Thread* t, Root root, object value)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
set(t, processor(static_cast<MyThread*>(t))->roots,
|
|
|
|
ArrayBody + (root * BytesPerWord), value);
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
FixedAllocator*
|
|
|
|
codeAllocator(MyThread* t)
|
|
|
|
{
|
|
|
|
return &(processor(t)->codeAllocator);
|
2008-01-10 01:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
} // namespace local
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
namespace vm {
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
Processor*
|
2009-10-10 23:46:43 +00:00
|
|
|
makeProcessor(System* system, Allocator* allocator, bool useNativeFeatures)
|
2007-09-24 01:39:03 +00:00
|
|
|
{
|
2009-08-27 00:26:44 +00:00
|
|
|
return new (allocator->allocate(sizeof(local::MyProcessor)))
|
2009-10-10 23:46:43 +00:00
|
|
|
local::MyProcessor(system, allocator, useNativeFeatures);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace vm
|