2014-04-21 02:14:48 +00:00
|
|
|
/* Copyright (c) 2008-2014, Avian Contributors
|
2008-02-19 18:06:52 +00:00
|
|
|
|
|
|
|
Permission to use, copy, modify, and/or distribute this software
|
|
|
|
for any purpose with or without fee is hereby granted, provided
|
|
|
|
that the above copyright notice and this permission notice appear
|
|
|
|
in all copies.
|
|
|
|
|
|
|
|
There is NO WARRANTY for this software. See license.txt for
|
|
|
|
details. */
|
|
|
|
|
2013-02-27 20:25:50 +00:00
|
|
|
#include "avian/machine.h"
|
|
|
|
#include "avian/util.h"
|
|
|
|
#include "avian/alloc-vector.h"
|
|
|
|
#include "avian/process.h"
|
|
|
|
#include "avian/target.h"
|
|
|
|
#include "avian/arch.h"
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2014-05-21 00:37:25 +00:00
|
|
|
#include <avian/system/memory.h>
|
|
|
|
|
2014-02-07 21:24:56 +00:00
|
|
|
#include <avian/codegen/assembler.h>
|
|
|
|
#include <avian/codegen/architecture.h>
|
|
|
|
#include <avian/codegen/compiler.h>
|
|
|
|
#include <avian/codegen/targets.h>
|
|
|
|
#include <avian/codegen/lir.h>
|
|
|
|
#include <avian/codegen/runtime.h>
|
2013-02-20 05:12:28 +00:00
|
|
|
|
2013-02-20 05:56:05 +00:00
|
|
|
#include <avian/util/runtime-array.h>
|
2013-12-11 04:36:55 +00:00
|
|
|
#include <avian/util/list.h>
|
2014-02-25 22:46:35 +00:00
|
|
|
#include <avian/util/slice.h>
|
2014-02-25 22:15:37 +00:00
|
|
|
#include <avian/util/fixed-allocator.h>
|
2013-02-11 00:51:59 +00:00
|
|
|
|
2014-05-04 01:32:40 +00:00
|
|
|
#include "debug-util.h"
|
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
using namespace vm;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
extern "C" uint64_t vmInvoke(void* thread,
|
|
|
|
void* function,
|
|
|
|
void* arguments,
|
|
|
|
unsigned argumentFootprint,
|
|
|
|
unsigned frameSize,
|
|
|
|
unsigned returnType);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
extern "C" void vmInvoke_returnAddress();
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
extern "C" void vmInvoke_safeStack();
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
extern "C" void vmJumpAndInvoke(void* thread,
|
|
|
|
void* function,
|
|
|
|
void* stack,
|
|
|
|
unsigned argumentFootprint,
|
|
|
|
uintptr_t* arguments,
|
|
|
|
unsigned frameSize);
|
2009-05-05 01:04:17 +00:00
|
|
|
|
2013-12-18 21:38:05 +00:00
|
|
|
using namespace avian::codegen;
|
2014-02-22 00:06:17 +00:00
|
|
|
using namespace avian::system;
|
2013-02-11 15:07:46 +00:00
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
namespace {
|
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
namespace local {
|
|
|
|
|
2011-02-02 15:32:40 +00:00
|
|
|
const bool DebugCompile = false;
|
2007-12-26 23:59:55 +00:00
|
|
|
const bool DebugNatives = false;
|
2008-04-07 23:47:41 +00:00
|
|
|
const bool DebugCallTable = false;
|
2008-04-11 19:03:40 +00:00
|
|
|
const bool DebugMethodTree = false;
|
2014-05-04 01:32:40 +00:00
|
|
|
const bool DebugInstructions = false;
|
2007-12-11 23:52:28 +00:00
|
|
|
|
2014-09-22 17:10:35 +00:00
|
|
|
#ifndef AVIAN_AOT_ONLY
|
|
|
|
const bool DebugFrameMaps = false;
|
2008-01-08 17:10:24 +00:00
|
|
|
const bool CheckArrayBounds = true;
|
2014-09-22 17:10:35 +00:00
|
|
|
const unsigned ExecutableAreaSizeInBytes = 30 * 1024 * 1024;
|
|
|
|
#endif
|
2008-01-08 17:10:24 +00:00
|
|
|
|
2009-05-26 05:27:10 +00:00
|
|
|
#ifdef AVIAN_CONTINUATIONS
|
|
|
|
const bool Continuations = true;
|
|
|
|
#else
|
|
|
|
const bool Continuations = false;
|
|
|
|
#endif
|
|
|
|
|
2011-08-30 01:00:17 +00:00
|
|
|
const unsigned MaxNativeCallFootprint = TargetBytesPerWord == 8 ? 4 : 5;
|
2008-09-28 19:00:52 +00:00
|
|
|
|
2008-11-25 23:01:30 +00:00
|
|
|
const unsigned InitialZoneCapacityInBytes = 64 * 1024;
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
enum ThunkIndex {
|
|
|
|
compileMethodIndex,
|
|
|
|
compileVirtualMethodIndex,
|
|
|
|
invokeNativeIndex,
|
|
|
|
throwArrayIndexOutOfBoundsIndex,
|
|
|
|
throwStackOverflowIndex,
|
|
|
|
|
|
|
|
#define THUNK(s) s##Index,
|
|
|
|
#include "thunks.cpp"
|
|
|
|
#undef THUNK
|
|
|
|
dummyIndex
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
inline bool isVmInvokeUnsafeStack(void* ip)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(ip)
|
2014-07-11 15:50:18 +00:00
|
|
|
>= reinterpret_cast<uintptr_t>(voidPointer(vmInvoke_returnAddress))
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
|
|
|
< reinterpret_cast<uintptr_t>(voidPointer(vmInvoke_safeStack));
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
2011-02-20 20:31:29 +00:00
|
|
|
class MyThread;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void* getIp(MyThread*);
|
2011-02-20 20:31:29 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyThread : public Thread {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
|
|
|
class CallTrace {
|
|
|
|
public:
|
2014-07-11 15:47:57 +00:00
|
|
|
CallTrace(MyThread* t, GcMethod* method)
|
|
|
|
: t(t),
|
|
|
|
ip(getIp(t)),
|
|
|
|
stack(t->stack),
|
|
|
|
scratch(t->scratch),
|
|
|
|
continuation(t->continuation),
|
|
|
|
nativeMethod((method->flags() & ACC_NATIVE) ? method : 0),
|
|
|
|
targetMethod(0),
|
|
|
|
originalMethod(method),
|
|
|
|
next(t->trace)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
doTransition(t, 0, 0, 0, this);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
~CallTrace()
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->stack == 0);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
t->scratch = scratch;
|
|
|
|
|
2011-02-25 18:04:23 +00:00
|
|
|
doTransition(t, ip, stack, continuation, next);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
2011-02-20 20:31:29 +00:00
|
|
|
void* ip;
|
2008-08-16 18:46:14 +00:00
|
|
|
void* stack;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
void* scratch;
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* continuation;
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* nativeMethod;
|
|
|
|
GcMethod* targetMethod;
|
|
|
|
GcMethod* originalMethod;
|
2007-12-09 22:45:43 +00:00
|
|
|
CallTrace* next;
|
|
|
|
};
|
2007-10-01 15:19:15 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
class Context {
|
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyProtector : public Thread::Protector {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyProtector(MyThread* t, Context* context)
|
|
|
|
: Protector(t), context(context)
|
|
|
|
{
|
|
|
|
}
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void visit(Heap::Visitor* v)
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
v->visit(&(context->continuation));
|
|
|
|
}
|
|
|
|
|
|
|
|
Context* context;
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
Context(MyThread* t,
|
|
|
|
void* ip,
|
|
|
|
void* stack,
|
|
|
|
GcContinuation* continuation,
|
|
|
|
CallTrace* trace)
|
|
|
|
: ip(ip),
|
|
|
|
stack(stack),
|
|
|
|
continuation(continuation),
|
|
|
|
trace(trace),
|
|
|
|
protector(t, this)
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
}
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
void* ip;
|
|
|
|
void* stack;
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* continuation;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
CallTrace* trace;
|
|
|
|
MyProtector protector;
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class TraceContext : public Context {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
public:
|
2014-07-11 15:47:57 +00:00
|
|
|
TraceContext(MyThread* t,
|
|
|
|
void* ip,
|
|
|
|
void* stack,
|
|
|
|
GcContinuation* continuation,
|
|
|
|
CallTrace* trace)
|
|
|
|
: Context(t, ip, stack, continuation, trace),
|
|
|
|
t(t),
|
|
|
|
link(0),
|
|
|
|
next(t->traceContext),
|
|
|
|
methodIsMostRecent(false)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
|
|
|
t->traceContext = this;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
TraceContext(MyThread* t, void* link)
|
|
|
|
: Context(t, t->ip, t->stack, t->continuation, t->trace),
|
|
|
|
t(t),
|
|
|
|
link(link),
|
|
|
|
next(t->traceContext),
|
|
|
|
methodIsMostRecent(false)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
|
|
|
t->traceContext = this;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
~TraceContext()
|
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
t->traceContext = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
2011-01-26 00:22:43 +00:00
|
|
|
void* link;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
TraceContext* next;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
bool methodIsMostRecent;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
static void doTransition(MyThread* t,
|
|
|
|
void* ip,
|
|
|
|
void* stack,
|
|
|
|
GcContinuation* continuation,
|
|
|
|
MyThread::CallTrace* trace)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
|
|
|
// in this function, we "atomically" update the thread context
|
|
|
|
// fields in such a way to ensure that another thread may
|
|
|
|
// interrupt us at any time and still get a consistent, accurate
|
2010-09-14 16:49:41 +00:00
|
|
|
// stack trace. See MyProcessor::getStackTrace for details.
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->transition == 0);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
Context c(t, ip, stack, continuation, trace);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->transition = &c;
|
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->ip = ip;
|
|
|
|
t->stack = stack;
|
|
|
|
t->continuation = continuation;
|
|
|
|
t->trace = trace;
|
|
|
|
|
|
|
|
compileTimeMemoryBarrier();
|
|
|
|
|
|
|
|
t->transition = 0;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
MyThread(Machine* m,
|
|
|
|
GcThread* javaThread,
|
|
|
|
MyThread* parent,
|
|
|
|
bool useNativeFeatures)
|
|
|
|
: Thread(m, javaThread, parent),
|
|
|
|
ip(0),
|
|
|
|
stack(0),
|
|
|
|
newStack(0),
|
|
|
|
scratch(0),
|
|
|
|
continuation(0),
|
|
|
|
exceptionStackAdjustment(0),
|
|
|
|
exceptionOffset(0),
|
|
|
|
exceptionHandler(0),
|
|
|
|
tailAddress(0),
|
|
|
|
virtualCallTarget(0),
|
|
|
|
virtualCallIndex(0),
|
|
|
|
heapImage(0),
|
|
|
|
codeImage(0),
|
|
|
|
thunkTable(0),
|
|
|
|
trace(0),
|
|
|
|
reference(0),
|
|
|
|
arch(parent ? parent->arch : avian::codegen::makeArchitectureNative(
|
|
|
|
m->system,
|
|
|
|
useNativeFeatures)),
|
|
|
|
transition(0),
|
|
|
|
traceContext(0),
|
|
|
|
stackLimit(0),
|
|
|
|
referenceFrame(0),
|
|
|
|
methodLockIsClean(true)
|
2008-08-18 15:23:01 +00:00
|
|
|
{
|
|
|
|
arch->acquire();
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
void* ip;
|
2008-08-16 18:46:14 +00:00
|
|
|
void* stack;
|
2012-06-18 14:27:18 +00:00
|
|
|
void* newStack;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
void* scratch;
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* continuation;
|
2009-05-25 04:27:50 +00:00
|
|
|
uintptr_t exceptionStackAdjustment;
|
2009-05-03 20:57:11 +00:00
|
|
|
uintptr_t exceptionOffset;
|
|
|
|
void* exceptionHandler;
|
2009-04-07 00:34:12 +00:00
|
|
|
void* tailAddress;
|
2009-05-03 20:57:11 +00:00
|
|
|
void* virtualCallTarget;
|
2009-04-07 00:34:12 +00:00
|
|
|
uintptr_t virtualCallIndex;
|
2011-09-20 22:30:30 +00:00
|
|
|
uintptr_t* heapImage;
|
|
|
|
uint8_t* codeImage;
|
|
|
|
void** thunkTable;
|
2007-12-09 22:45:43 +00:00
|
|
|
CallTrace* trace;
|
|
|
|
Reference* reference;
|
2013-02-24 06:03:01 +00:00
|
|
|
avian::codegen::Architecture* arch;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
Context* transition;
|
|
|
|
TraceContext* traceContext;
|
2010-12-19 22:23:19 +00:00
|
|
|
uintptr_t stackLimit;
|
2013-12-11 04:36:55 +00:00
|
|
|
List<Reference*>* referenceFrame;
|
2011-03-26 00:37:02 +00:00
|
|
|
bool methodLockIsClean;
|
2007-12-09 22:45:43 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void transition(MyThread* t,
|
|
|
|
void* ip,
|
|
|
|
void* stack,
|
|
|
|
GcContinuation* continuation,
|
|
|
|
MyThread::CallTrace* trace)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
MyThread::doTransition(t, ip, stack, continuation, trace);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
object resolveThisPointer(MyThread* t, void* stack)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
return reinterpret_cast<object*>(
|
|
|
|
stack)[t->arch->frameFooterSize() + t->arch->frameReturnAddressSize()];
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-23 19:26:35 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* findMethod(Thread* t, GcMethod* method, object instance)
|
2009-08-13 15:17:05 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
if ((method->flags() & ACC_STATIC) == 0) {
|
2014-06-21 04:16:33 +00:00
|
|
|
if (method->class_()->flags() & ACC_INTERFACE) {
|
2009-08-13 15:17:05 +00:00
|
|
|
return findInterfaceMethod(t, method, objectClass(t, instance));
|
2010-09-10 21:05:29 +00:00
|
|
|
} else if (methodVirtual(t, method)) {
|
2014-05-29 04:17:25 +00:00
|
|
|
return findVirtualMethod(t, method, objectClass(t, instance));
|
2009-08-13 15:17:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return method;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* resolveTarget(MyThread* t, void* stack, GcMethod* method)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
GcClass* class_ = objectClass(t, resolveThisPointer(t, stack));
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (class_->vmFlags() & BootstrapFlag) {
|
2008-04-07 23:47:41 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
PROTECT(t, class_);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2014-06-30 01:44:41 +00:00
|
|
|
resolveSystemClass(t, roots(t)->bootLoader(), class_->name());
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2014-06-21 04:16:33 +00:00
|
|
|
if (method->class_()->flags() & ACC_INTERFACE) {
|
2008-04-07 23:47:41 +00:00
|
|
|
return findInterfaceMethod(t, method, class_);
|
|
|
|
} else {
|
2009-08-13 15:17:05 +00:00
|
|
|
return findVirtualMethod(t, method, class_);
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* resolveTarget(MyThread* t, GcClass* class_, unsigned index)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
if (class_->vmFlags() & BootstrapFlag) {
|
2009-04-05 21:42:10 +00:00
|
|
|
PROTECT(t, class_);
|
|
|
|
|
2014-06-30 01:44:41 +00:00
|
|
|
resolveSystemClass(t, roots(t)->bootLoader(), class_->name());
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
return cast<GcMethod>(
|
|
|
|
t, cast<GcArray>(t, class_->virtualTable())->body()[index]);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcCompileRoots* compileRoots(Thread* t);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
intptr_t methodCompiled(Thread* t UNUSED, GcMethod* method)
|
2010-09-17 01:43:27 +00:00
|
|
|
{
|
2014-06-28 04:00:05 +00:00
|
|
|
return method->code()->compiled();
|
2010-09-17 01:43:27 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned methodCompiledSize(Thread* t UNUSED, GcMethod* method)
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
{
|
2014-06-28 04:00:05 +00:00
|
|
|
return method->code()->compiledSize();
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
intptr_t compareIpToMethodBounds(Thread* t, intptr_t ip, object om)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method = cast<GcMethod>(t, om);
|
2008-11-23 23:58:01 +00:00
|
|
|
intptr_t start = methodCompiled(t, method);
|
2008-04-10 23:48:28 +00:00
|
|
|
|
|
|
|
if (DebugMethodTree) {
|
2014-07-11 15:50:18 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
"find %p in (%p,%p)\n",
|
2009-08-07 22:27:24 +00:00
|
|
|
reinterpret_cast<void*>(ip),
|
|
|
|
reinterpret_cast<void*>(start),
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
reinterpret_cast<void*>(start + methodCompiledSize(t, method)));
|
2008-04-10 23:48:28 +00:00
|
|
|
}
|
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
if (ip < start) {
|
|
|
|
return -1;
|
2014-07-11 15:50:18 +00:00
|
|
|
} else if (ip < start
|
|
|
|
+ static_cast<intptr_t>(methodCompiledSize(t, method))) {
|
2008-04-07 23:47:41 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* methodForIp(MyThread* t, void* ip)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2008-04-10 23:48:28 +00:00
|
|
|
if (DebugMethodTree) {
|
|
|
|
fprintf(stderr, "query for method containing %p\n", ip);
|
|
|
|
}
|
|
|
|
|
2009-03-03 01:40:06 +00:00
|
|
|
// we must use a version of the method tree at least as recent as the
|
|
|
|
// compiled form of the method containing the specified address (see
|
2011-02-28 06:03:13 +00:00
|
|
|
// compile(MyThread*, FixedAllocator*, BootContext*, object)):
|
2009-11-30 15:38:16 +00:00
|
|
|
loadMemoryBarrier();
|
2009-03-03 01:40:06 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
return cast<GcMethod>(t,
|
|
|
|
treeQuery(t,
|
|
|
|
compileRoots(t)->methodTree(),
|
|
|
|
reinterpret_cast<intptr_t>(ip),
|
|
|
|
compileRoots(t)->methodTreeSentinal(),
|
|
|
|
compareIpToMethodBounds));
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned localSize(MyThread* t UNUSED, GcMethod* method)
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
{
|
2014-06-28 04:00:05 +00:00
|
|
|
unsigned size = method->code()->maxLocals();
|
2014-07-11 15:47:57 +00:00
|
|
|
if ((method->flags() & (ACC_SYNCHRONIZED | ACC_STATIC)) == ACC_SYNCHRONIZED) {
|
2014-07-11 15:50:18 +00:00
|
|
|
++size;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned alignedFrameSize(MyThread* t, GcMethod* method)
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
return t->arch->alignFrameSize(
|
|
|
|
localSize(t, method) - method->parameterFootprint()
|
|
|
|
+ method->code()->maxStack()
|
|
|
|
+ t->arch->frameFootprint(MaxNativeCallFootprint));
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void nextFrame(MyThread* t,
|
|
|
|
void** ip,
|
|
|
|
void** sp,
|
|
|
|
GcMethod* method,
|
|
|
|
GcMethod* target,
|
|
|
|
bool mostRecent)
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcCode* code = method->code();
|
|
|
|
intptr_t start = code->compiled();
|
2011-01-26 00:22:43 +00:00
|
|
|
void* link;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
bool methodIsMostRecent;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
if (t->traceContext) {
|
|
|
|
link = t->traceContext->link;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
methodIsMostRecent = mostRecent and t->traceContext->methodIsMostRecent;
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
} else {
|
2011-01-26 00:22:43 +00:00
|
|
|
link = 0;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
methodIsMostRecent = false;
|
2011-01-26 00:22:43 +00:00
|
|
|
}
|
|
|
|
|
2014-07-12 16:16:03 +00:00
|
|
|
if (false) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"nextFrame %s.%s%s target %s.%s%s ip %p sp %p\n",
|
|
|
|
method->class_()->name()->body().begin(),
|
|
|
|
method->name()->body().begin(),
|
|
|
|
method->spec()->body().begin(),
|
|
|
|
target ? target->class_()->name()->body().begin() : 0,
|
|
|
|
target ? target->name()->body().begin() : 0,
|
|
|
|
target ? target->spec()->body().begin() : 0,
|
|
|
|
*ip,
|
|
|
|
*sp);
|
|
|
|
}
|
2011-01-26 00:22:43 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
t->arch->nextFrame(reinterpret_cast<void*>(start),
|
|
|
|
code->compiledSize(),
|
|
|
|
alignedFrameSize(t, method),
|
|
|
|
link,
|
|
|
|
methodIsMostRecent,
|
|
|
|
target ? target->parameterFootprint() : -1,
|
|
|
|
ip,
|
|
|
|
sp);
|
2011-01-29 18:11:27 +00:00
|
|
|
|
2014-07-12 16:16:03 +00:00
|
|
|
if (false) {
|
|
|
|
fprintf(stderr, "next frame ip %p sp %p\n", *ip, *sp);
|
|
|
|
}
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void* getIp(MyThread* t, void* ip, void* stack)
|
2011-02-20 03:33:26 +00:00
|
|
|
{
|
|
|
|
// Here we use the convention that, if the return address is neither
|
|
|
|
// pushed on to the stack automatically as part of the call nor
|
|
|
|
// stored in the caller's frame, it will be saved in MyThread::ip
|
|
|
|
// instead of on the stack. See the various implementations of
|
|
|
|
// Assembler::saveFrame for details on how this is done.
|
2011-02-20 20:31:29 +00:00
|
|
|
return t->arch->returnAddressOffset() < 0 ? ip : t->arch->frameIp(stack);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void* getIp(MyThread* t)
|
2011-02-20 20:31:29 +00:00
|
|
|
{
|
|
|
|
return getIp(t, t->ip, t->stack);
|
2011-02-20 03:33:26 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyStackWalker : public Processor::StackWalker {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
enum State { Start, Next, Trace, Continuation, Method, NativeMethod, Finish };
|
2008-04-23 16:33:31 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyProtector : public Thread::Protector {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyProtector(MyStackWalker* walker) : Protector(walker->t), walker(walker)
|
|
|
|
{
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void visit(Heap::Visitor* v)
|
|
|
|
{
|
2008-04-07 23:47:41 +00:00
|
|
|
v->visit(&(walker->method_));
|
2011-01-26 00:22:43 +00:00
|
|
|
v->visit(&(walker->target));
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(walker->continuation));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyStackWalker* walker;
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
MyStackWalker(MyThread* t)
|
|
|
|
: t(t), state(Start), method_(0), target(0), count_(0), protector(this)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
|
|
|
if (t->traceContext) {
|
|
|
|
ip_ = t->traceContext->ip;
|
|
|
|
stack = t->traceContext->stack;
|
|
|
|
trace = t->traceContext->trace;
|
|
|
|
continuation = t->traceContext->continuation;
|
|
|
|
} else {
|
2011-02-20 03:33:26 +00:00
|
|
|
ip_ = getIp(t);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
stack = t->stack;
|
|
|
|
trace = t->trace;
|
2014-05-29 04:17:25 +00:00
|
|
|
continuation = t->continuation;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
MyStackWalker(MyStackWalker* w)
|
|
|
|
: t(w->t),
|
|
|
|
state(w->state),
|
|
|
|
ip_(w->ip_),
|
|
|
|
stack(w->stack),
|
|
|
|
trace(w->trace),
|
|
|
|
method_(w->method_),
|
|
|
|
target(w->target),
|
|
|
|
continuation(w->continuation),
|
|
|
|
count_(w->count_),
|
|
|
|
protector(this)
|
|
|
|
{
|
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void walk(Processor::StackVisitor* v)
|
|
|
|
{
|
2008-04-23 16:33:31 +00:00
|
|
|
for (MyStackWalker it(this); it.valid();) {
|
2009-07-10 14:33:38 +00:00
|
|
|
MyStackWalker walker(&it);
|
2008-04-23 16:33:31 +00:00
|
|
|
if (not v->visit(&walker)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
it.next();
|
2007-12-14 18:27:56 +00:00
|
|
|
}
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool valid()
|
|
|
|
{
|
2008-04-23 16:33:31 +00:00
|
|
|
while (true) {
|
2014-07-12 16:16:03 +00:00
|
|
|
if (false) {
|
|
|
|
fprintf(stderr, "state: %d\n", state);
|
|
|
|
}
|
2008-04-23 16:33:31 +00:00
|
|
|
switch (state) {
|
|
|
|
case Start:
|
|
|
|
if (trace and trace->nativeMethod) {
|
|
|
|
method_ = trace->nativeMethod;
|
|
|
|
state = NativeMethod;
|
|
|
|
} else {
|
2008-08-14 18:13:05 +00:00
|
|
|
state = Next;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Next:
|
|
|
|
if (stack) {
|
2011-01-26 00:22:43 +00:00
|
|
|
target = method_;
|
2008-04-23 16:33:31 +00:00
|
|
|
method_ = methodForIp(t, ip_);
|
|
|
|
if (method_) {
|
|
|
|
state = Method;
|
2009-05-03 20:57:11 +00:00
|
|
|
} else if (continuation) {
|
2014-06-29 03:50:32 +00:00
|
|
|
method_ = continuation->method();
|
2009-05-03 20:57:11 +00:00
|
|
|
state = Continuation;
|
2008-04-23 16:33:31 +00:00
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
state = Trace;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
state = Trace;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2008-04-23 16:33:31 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
case Trace: {
|
|
|
|
if (trace) {
|
|
|
|
continuation = trace->continuation;
|
|
|
|
stack = trace->stack;
|
2011-02-20 20:31:29 +00:00
|
|
|
ip_ = trace->ip;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
trace = trace->next;
|
|
|
|
|
|
|
|
state = Start;
|
|
|
|
} else {
|
|
|
|
state = Finish;
|
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
|
|
|
case NativeMethod:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case Finish:
|
|
|
|
return false;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void next()
|
|
|
|
{
|
2012-05-05 00:59:15 +00:00
|
|
|
expect(t, count_ <= stackSizeInWords(t));
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
switch (state) {
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
2014-06-29 03:50:32 +00:00
|
|
|
continuation = continuation->next();
|
2009-05-03 20:57:11 +00:00
|
|
|
break;
|
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
nextFrame(t, &ip_, &stack, method_, target, count_ == 0);
|
2008-04-23 16:33:31 +00:00
|
|
|
break;
|
2008-04-22 16:21:54 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case NativeMethod:
|
|
|
|
break;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2008-04-22 16:21:54 +00:00
|
|
|
}
|
2008-08-14 18:13:05 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
++count_;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
|
2008-08-14 18:13:05 +00:00
|
|
|
state = Next;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual GcMethod* method()
|
|
|
|
{
|
2014-07-12 16:16:03 +00:00
|
|
|
if (false) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"method %s.%s\n",
|
|
|
|
method_->class_()->name()->body().begin(),
|
|
|
|
method_->name()->body().begin());
|
|
|
|
}
|
2008-08-14 18:13:05 +00:00
|
|
|
return method_;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual int ip()
|
|
|
|
{
|
2008-04-23 16:33:31 +00:00
|
|
|
switch (state) {
|
2009-05-03 20:57:11 +00:00
|
|
|
case Continuation:
|
2014-06-29 03:50:32 +00:00
|
|
|
return reinterpret_cast<intptr_t>(continuation->address())
|
2014-07-11 15:47:57 +00:00
|
|
|
- methodCompiled(t, continuation->method());
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case Method:
|
2008-11-23 23:58:01 +00:00
|
|
|
return reinterpret_cast<intptr_t>(ip_) - methodCompiled(t, method_);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
case NativeMethod:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual unsigned count()
|
|
|
|
{
|
2008-04-23 16:33:31 +00:00
|
|
|
unsigned count = 0;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
for (MyStackWalker walker(this); walker.valid();) {
|
|
|
|
walker.next();
|
2014-07-11 15:50:18 +00:00
|
|
|
++count;
|
2008-04-23 16:33:31 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-04-23 16:33:31 +00:00
|
|
|
return count;
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
2008-04-23 16:33:31 +00:00
|
|
|
State state;
|
2008-04-07 23:47:41 +00:00
|
|
|
void* ip_;
|
2007-12-09 22:45:43 +00:00
|
|
|
void* stack;
|
|
|
|
MyThread::CallTrace* trace;
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method_;
|
|
|
|
GcMethod* target;
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* continuation;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
unsigned count_;
|
2007-12-09 22:45:43 +00:00
|
|
|
MyProtector protector;
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int localOffset(MyThread* t, int v, GcMethod* method)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
int parameterFootprint = method->parameterFootprint();
|
2008-09-09 00:31:19 +00:00
|
|
|
int frameSize = alignedFrameSize(t, method);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
int offset
|
|
|
|
= ((v < parameterFootprint)
|
|
|
|
? (frameSize + parameterFootprint + t->arch->frameFooterSize()
|
|
|
|
+ t->arch->frameHeaderSize() - v - 1)
|
|
|
|
: (frameSize + parameterFootprint - v - 1));
|
2008-09-09 00:31:19 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, offset >= 0);
|
2008-09-09 00:31:19 +00:00
|
|
|
return offset;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int localOffsetFromStack(MyThread* t, int index, GcMethod* method)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
return localOffset(t, index, method) + t->arch->frameReturnAddressSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object* localObject(MyThread* t, void* stack, GcMethod* method, unsigned index)
|
2007-12-28 00:02:05 +00:00
|
|
|
{
|
2009-05-17 23:43:48 +00:00
|
|
|
return static_cast<object*>(stack) + localOffsetFromStack(t, index, method);
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int stackOffsetFromFrame(MyThread* t, GcMethod* method)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
|
|
|
return alignedFrameSize(t, method) + t->arch->frameHeaderSize();
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void* stackForFrame(MyThread* t, void* frame, GcMethod* method)
|
2009-04-26 21:55:35 +00:00
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
return static_cast<void**>(frame) - stackOffsetFromFrame(t, method);
|
2009-04-26 21:55:35 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class PoolElement : public avian::codegen::Promise {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
PoolElement(Thread* t, object target, PoolElement* next)
|
|
|
|
: t(t), target(target), address(0), next(next)
|
|
|
|
{
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual int64_t value()
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resolved());
|
2008-11-23 23:58:01 +00:00
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual bool resolved()
|
|
|
|
{
|
2008-11-23 23:58:01 +00:00
|
|
|
return address != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
Thread* t;
|
|
|
|
object target;
|
|
|
|
intptr_t address;
|
2007-12-31 22:40:56 +00:00
|
|
|
PoolElement* next;
|
2007-12-09 22:45:43 +00:00
|
|
|
};
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
class Subroutine {
|
|
|
|
public:
|
2014-05-04 01:09:55 +00:00
|
|
|
Subroutine(unsigned index,
|
|
|
|
unsigned returnAddress,
|
|
|
|
unsigned methodSize,
|
|
|
|
Subroutine* outer)
|
|
|
|
: index(index),
|
|
|
|
outer(outer),
|
|
|
|
returnAddress(returnAddress),
|
|
|
|
duplicatedBaseIp(methodSize * index),
|
|
|
|
visited(false)
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
// Index of this subroutine, in the (unmaterialized) list of subroutines in
|
|
|
|
// this method.
|
2014-06-01 03:28:27 +00:00
|
|
|
// Note that in the presence of nested finallys, this could theoretically end
|
2014-05-04 01:09:55 +00:00
|
|
|
// up being greater than the number of jsr instructions (but this will be
|
|
|
|
// extremely rare - I don't think we've seen this in practice).
|
|
|
|
const unsigned index;
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
// Subroutine outer to this one (if, for instance, we have nested finallys)
|
|
|
|
Subroutine* const outer;
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
// Starting ip in the original bytecode (always < original bytecode size)
|
|
|
|
const unsigned returnAddress;
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
// Starting ip for this subroutine's copy of the method bytecode
|
|
|
|
const unsigned duplicatedBaseIp;
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
bool visited;
|
2009-06-26 21:36:04 +00:00
|
|
|
};
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
class Context;
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class TraceElement : public avian::codegen::TraceHandler {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2009-04-07 00:34:12 +00:00
|
|
|
static const unsigned VirtualCall = 1 << 0;
|
2014-07-11 15:50:18 +00:00
|
|
|
static const unsigned TailCall = 1 << 1;
|
|
|
|
static const unsigned LongCall = 1 << 2;
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
TraceElement(Context* context,
|
|
|
|
unsigned ip,
|
|
|
|
GcMethod* target,
|
|
|
|
unsigned flags,
|
|
|
|
TraceElement* next,
|
|
|
|
unsigned mapSize)
|
|
|
|
: context(context),
|
|
|
|
address(0),
|
|
|
|
next(next),
|
|
|
|
target(target),
|
|
|
|
ip(ip),
|
|
|
|
argumentIndex(0),
|
|
|
|
flags(flags),
|
|
|
|
watch(false)
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
{
|
2012-12-12 22:54:15 +00:00
|
|
|
memset(map, 0xFF, mapSize * BytesPerWord);
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void handleTrace(avian::codegen::Promise* address,
|
|
|
|
unsigned argumentIndex)
|
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (this->address == 0) {
|
|
|
|
this->address = address;
|
2009-04-27 01:53:42 +00:00
|
|
|
this->argumentIndex = argumentIndex;
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Context* context;
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Promise* address;
|
2008-11-09 23:56:37 +00:00
|
|
|
TraceElement* next;
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
unsigned ip;
|
2009-04-27 01:53:42 +00:00
|
|
|
unsigned argumentIndex;
|
2009-03-31 20:15:08 +00:00
|
|
|
unsigned flags;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
bool watch;
|
2007-12-31 22:40:56 +00:00
|
|
|
uintptr_t map[0];
|
2007-10-03 00:22:48 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class TraceElementPromise : public avian::codegen::Promise {
|
2009-03-31 20:15:08 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
TraceElementPromise(System* s, TraceElement* trace) : s(s), trace(trace)
|
|
|
|
{
|
|
|
|
}
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual int64_t value()
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(s, resolved());
|
2009-04-22 01:39:25 +00:00
|
|
|
return trace->address->value();
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual bool resolved()
|
|
|
|
{
|
2009-04-22 01:39:25 +00:00
|
|
|
return trace->address != 0 and trace->address->resolved();
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
System* s;
|
|
|
|
TraceElement* trace;
|
|
|
|
};
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
enum Event {
|
2008-07-05 20:21:13 +00:00
|
|
|
PushContextEvent,
|
|
|
|
PopContextEvent,
|
2008-01-07 14:51:07 +00:00
|
|
|
IpEvent,
|
|
|
|
MarkEvent,
|
|
|
|
ClearEvent,
|
2009-07-13 23:49:15 +00:00
|
|
|
PushExceptionHandlerEvent,
|
2009-06-26 21:36:04 +00:00
|
|
|
TraceEvent,
|
2008-01-07 14:51:07 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned frameMapSizeInBits(MyThread* t, GcMethod* method)
|
2009-04-27 14:46:43 +00:00
|
|
|
{
|
2014-06-28 04:00:05 +00:00
|
|
|
return localSize(t, method) + method->code()->maxStack();
|
2009-04-27 14:46:43 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned frameMapSizeInWords(MyThread* t, GcMethod* method)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2013-02-11 01:06:15 +00:00
|
|
|
return ceilingDivide(frameMapSizeInBits(t, method), BitsPerWord);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2008-05-31 22:14:27 +00:00
|
|
|
enum Thunk {
|
|
|
|
#define THUNK(s) s##Thunk,
|
|
|
|
|
|
|
|
#include "thunks.cpp"
|
|
|
|
|
|
|
|
#undef THUNK
|
|
|
|
};
|
|
|
|
|
2013-12-13 17:39:36 +00:00
|
|
|
const unsigned ThunkCount = idleIfNecessaryThunk + 1;
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
intptr_t getThunk(MyThread* t, Thunk thunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
class BootContext {
|
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyProtector : public Thread::Protector {
|
2008-11-23 23:58:01 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyProtector(Thread* t, BootContext* c) : Protector(t), c(c)
|
|
|
|
{
|
|
|
|
}
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void visit(Heap::Visitor* v)
|
|
|
|
{
|
2008-11-27 20:59:40 +00:00
|
|
|
v->visit(&(c->constants));
|
|
|
|
v->visit(&(c->calls));
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
BootContext* c;
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
BootContext(Thread* t,
|
|
|
|
GcTriple* constants,
|
|
|
|
GcTriple* calls,
|
|
|
|
avian::codegen::DelayedPromise* addresses,
|
|
|
|
Zone* zone,
|
|
|
|
OffsetResolver* resolver)
|
|
|
|
: protector(t, this),
|
|
|
|
constants(constants),
|
|
|
|
calls(calls),
|
|
|
|
addresses(addresses),
|
|
|
|
addressSentinal(addresses),
|
|
|
|
zone(zone),
|
|
|
|
resolver(resolver)
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
}
|
2008-11-23 23:58:01 +00:00
|
|
|
|
|
|
|
MyProtector protector;
|
2014-06-29 04:57:07 +00:00
|
|
|
GcTriple* constants;
|
|
|
|
GcTriple* calls;
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::DelayedPromise* addresses;
|
|
|
|
avian::codegen::DelayedPromise* addressSentinal;
|
2008-11-23 23:58:01 +00:00
|
|
|
Zone* zone;
|
2011-09-01 03:18:00 +00:00
|
|
|
OffsetResolver* resolver;
|
2008-11-23 23:58:01 +00:00
|
|
|
};
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
class Context {
|
2007-10-10 22:39:40 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyResource : public Thread::AutoResource {
|
2010-12-27 22:55:23 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyResource(Context* c) : AutoResource(c->thread), c(c)
|
|
|
|
{
|
|
|
|
}
|
2010-12-27 22:55:23 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void release()
|
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
c->dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
Context* c;
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyProtector : public Thread::Protector {
|
2007-12-09 22:45:43 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyProtector(Context* c) : Protector(c->thread), c(c)
|
|
|
|
{
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void visit(Heap::Visitor* v)
|
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
v->visit(&(c->method));
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
for (PoolElement* p = c->objectPool; p; p = p->next) {
|
2008-11-23 23:58:01 +00:00
|
|
|
v->visit(&(p->target));
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
for (TraceElement* p = c->traceLog; p; p = p->next) {
|
|
|
|
v->visit(&(p->target));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* c;
|
2007-10-10 22:39:40 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyClient : public Compiler::Client {
|
2008-05-31 22:14:27 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyClient(MyThread* t) : t(t)
|
|
|
|
{
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual intptr_t getThunk(avian::codegen::lir::UnaryOperation, unsigned)
|
|
|
|
{
|
2008-05-31 22:14:27 +00:00
|
|
|
abort(t);
|
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual intptr_t getThunk(avian::codegen::lir::BinaryOperation op,
|
|
|
|
unsigned size,
|
2009-09-20 21:43:32 +00:00
|
|
|
unsigned resultSize)
|
|
|
|
{
|
2009-10-18 01:26:14 +00:00
|
|
|
if (size == 8) {
|
2014-07-11 15:50:18 +00:00
|
|
|
switch (op) {
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Absolute:
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 8);
|
2009-12-01 16:21:33 +00:00
|
|
|
return local::getThunk(t, absoluteLongThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatNegate:
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 8);
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, negateDoubleThunk);
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatSquareRoot:
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 8);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, squareRootDoubleThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Float2Float:
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 4);
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, doubleToFloatThunk);
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Float2Int:
|
2009-10-18 01:26:14 +00:00
|
|
|
if (resultSize == 8) {
|
|
|
|
return local::getThunk(t, doubleToLongThunk);
|
|
|
|
} else {
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, doubleToIntThunk);
|
|
|
|
}
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Int2Float:
|
2009-10-18 01:26:14 +00:00
|
|
|
if (resultSize == 8) {
|
|
|
|
return local::getThunk(t, longToDoubleThunk);
|
|
|
|
} else {
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, longToFloatThunk);
|
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2009-10-18 01:26:14 +00:00
|
|
|
} else {
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, size == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
switch (op) {
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Absolute:
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 4);
|
2009-12-01 16:21:33 +00:00
|
|
|
return local::getThunk(t, absoluteIntThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatNegate:
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, negateFloatThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatAbsolute:
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 4);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, absoluteFloatThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Float2Float:
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 8);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, floatToDoubleThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Float2Int:
|
2009-10-18 01:26:14 +00:00
|
|
|
if (resultSize == 4) {
|
|
|
|
return local::getThunk(t, floatToIntThunk);
|
|
|
|
} else {
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 8);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, floatToLongThunk);
|
|
|
|
}
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Int2Float:
|
2009-10-18 01:26:14 +00:00
|
|
|
if (resultSize == 4) {
|
|
|
|
return local::getThunk(t, intToFloatThunk);
|
|
|
|
} else {
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, resultSize == 8);
|
2009-10-18 01:26:14 +00:00
|
|
|
return local::getThunk(t, intToDoubleThunk);
|
|
|
|
}
|
2014-05-04 01:09:55 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2009-10-18 01:26:14 +00:00
|
|
|
}
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual intptr_t getThunk(avian::codegen::lir::TernaryOperation op,
|
|
|
|
unsigned size,
|
|
|
|
unsigned,
|
2010-12-20 00:47:21 +00:00
|
|
|
bool* threadParameter)
|
|
|
|
{
|
|
|
|
*threadParameter = false;
|
|
|
|
|
2009-10-10 23:46:43 +00:00
|
|
|
if (size == 8) {
|
|
|
|
switch (op) {
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Divide:
|
2010-12-20 00:47:21 +00:00
|
|
|
*threadParameter = true;
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::getThunk(t, divideLongThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Remainder:
|
2010-12-20 00:47:21 +00:00
|
|
|
*threadParameter = true;
|
|
|
|
return local::getThunk(t, moduloLongThunk);
|
2009-10-10 23:46:43 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatAdd:
|
2009-10-10 23:46:43 +00:00
|
|
|
return local::getThunk(t, addDoubleThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatSubtract:
|
2009-10-10 23:46:43 +00:00
|
|
|
return local::getThunk(t, subtractDoubleThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatMultiply:
|
2009-10-10 23:46:43 +00:00
|
|
|
return local::getThunk(t, multiplyDoubleThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatDivide:
|
2009-10-10 23:46:43 +00:00
|
|
|
return local::getThunk(t, divideDoubleThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatRemainder:
|
2009-10-25 01:29:20 +00:00
|
|
|
return local::getThunk(t, moduloDoubleThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::JumpIfFloatEqual:
|
|
|
|
case avian::codegen::lir::JumpIfFloatNotEqual:
|
|
|
|
case avian::codegen::lir::JumpIfFloatLess:
|
|
|
|
case avian::codegen::lir::JumpIfFloatGreater:
|
|
|
|
case avian::codegen::lir::JumpIfFloatLessOrEqual:
|
|
|
|
case avian::codegen::lir::JumpIfFloatGreaterOrUnordered:
|
|
|
|
case avian::codegen::lir::JumpIfFloatGreaterOrEqualOrUnordered:
|
2009-10-10 23:46:43 +00:00
|
|
|
return local::getThunk(t, compareDoublesGThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::JumpIfFloatGreaterOrEqual:
|
|
|
|
case avian::codegen::lir::JumpIfFloatLessOrUnordered:
|
|
|
|
case avian::codegen::lir::JumpIfFloatLessOrEqualOrUnordered:
|
2009-10-10 23:46:43 +00:00
|
|
|
return local::getThunk(t, compareDoublesLThunk);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
2009-10-18 01:26:14 +00:00
|
|
|
} else {
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, size == 4);
|
2009-10-10 23:46:43 +00:00
|
|
|
switch (op) {
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Divide:
|
2010-12-20 00:47:21 +00:00
|
|
|
*threadParameter = true;
|
2009-10-29 20:23:20 +00:00
|
|
|
return local::getThunk(t, divideIntThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::Remainder:
|
2010-12-20 00:47:21 +00:00
|
|
|
*threadParameter = true;
|
2009-10-29 20:14:44 +00:00
|
|
|
return local::getThunk(t, moduloIntThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatAdd:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, addFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatSubtract:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, subtractFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatMultiply:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, multiplyFloatThunk);
|
2009-10-25 01:29:20 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatDivide:
|
2009-09-20 21:43:32 +00:00
|
|
|
return local::getThunk(t, divideFloatThunk);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::FloatRemainder:
|
2009-10-25 01:29:20 +00:00
|
|
|
return local::getThunk(t, moduloFloatThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::JumpIfFloatEqual:
|
|
|
|
case avian::codegen::lir::JumpIfFloatNotEqual:
|
|
|
|
case avian::codegen::lir::JumpIfFloatLess:
|
|
|
|
case avian::codegen::lir::JumpIfFloatGreater:
|
|
|
|
case avian::codegen::lir::JumpIfFloatLessOrEqual:
|
|
|
|
case avian::codegen::lir::JumpIfFloatGreaterOrUnordered:
|
|
|
|
case avian::codegen::lir::JumpIfFloatGreaterOrEqualOrUnordered:
|
2009-10-10 23:46:43 +00:00
|
|
|
return local::getThunk(t, compareFloatsGThunk);
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
case avian::codegen::lir::JumpIfFloatGreaterOrEqual:
|
|
|
|
case avian::codegen::lir::JumpIfFloatLessOrUnordered:
|
|
|
|
case avian::codegen::lir::JumpIfFloatLessOrEqualOrUnordered:
|
2009-10-10 23:46:43 +00:00
|
|
|
return local::getThunk(t, compareFloatsLThunk);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2009-10-10 23:46:43 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
|
|
|
};
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
Context(MyThread* t, BootContext* bootContext, GcMethod* method)
|
2014-05-01 22:40:57 +00:00
|
|
|
: thread(t),
|
2014-05-05 04:02:47 +00:00
|
|
|
zone(t->m->heap, InitialZoneCapacityInBytes),
|
2014-05-01 22:40:57 +00:00
|
|
|
assembler(t->arch->makeAssembler(t->m->heap, &zone)),
|
|
|
|
client(t),
|
|
|
|
compiler(makeCompiler(t->m->system, assembler, &zone, &client)),
|
|
|
|
method(method),
|
|
|
|
bootContext(bootContext),
|
|
|
|
objectPool(0),
|
2014-05-04 01:09:55 +00:00
|
|
|
subroutineCount(0),
|
2014-05-01 22:40:57 +00:00
|
|
|
traceLog(0),
|
2014-05-04 01:09:55 +00:00
|
|
|
visitTable(
|
2014-07-11 15:47:57 +00:00
|
|
|
Slice<uint16_t>::allocAndSet(&zone, method->code()->length(), 0)),
|
|
|
|
rootTable(Slice<uintptr_t>::allocAndSet(
|
|
|
|
&zone,
|
|
|
|
method->code()->length() * frameMapSizeInWords(t, method),
|
|
|
|
~(uintptr_t)0)),
|
2014-05-01 22:40:57 +00:00
|
|
|
executableAllocator(0),
|
|
|
|
executableStart(0),
|
|
|
|
executableSize(0),
|
|
|
|
objectPoolCount(0),
|
|
|
|
traceLogCount(0),
|
|
|
|
dirtyRoots(false),
|
|
|
|
leaf(true),
|
|
|
|
eventLog(t->m->system, t->m->heap, 1024),
|
|
|
|
protector(this),
|
|
|
|
resource(this),
|
|
|
|
argumentBuffer(
|
|
|
|
(ir::Value**)t->m->heap->allocate(256 * sizeof(ir::Value*)),
|
|
|
|
256) // below the maximal allowed parameter count for Java
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2014-05-01 22:40:57 +00:00
|
|
|
Context(MyThread* t)
|
|
|
|
: thread(t),
|
2014-05-05 04:02:47 +00:00
|
|
|
zone(t->m->heap, InitialZoneCapacityInBytes),
|
2014-05-01 22:40:57 +00:00
|
|
|
assembler(t->arch->makeAssembler(t->m->heap, &zone)),
|
|
|
|
client(t),
|
|
|
|
compiler(0),
|
|
|
|
method(0),
|
|
|
|
bootContext(0),
|
|
|
|
objectPool(0),
|
2014-05-04 01:09:55 +00:00
|
|
|
subroutineCount(0),
|
2014-05-01 22:40:57 +00:00
|
|
|
traceLog(0),
|
2014-05-04 01:09:55 +00:00
|
|
|
visitTable(0, 0),
|
|
|
|
rootTable(0, 0),
|
2014-05-01 22:40:57 +00:00
|
|
|
executableAllocator(0),
|
|
|
|
executableStart(0),
|
|
|
|
executableSize(0),
|
|
|
|
objectPoolCount(0),
|
|
|
|
traceLogCount(0),
|
|
|
|
dirtyRoots(false),
|
|
|
|
leaf(true),
|
|
|
|
eventLog(t->m->system, t->m->heap, 0),
|
|
|
|
protector(this),
|
|
|
|
resource(this),
|
|
|
|
argumentBuffer(0, 0)
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
~Context()
|
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
dispose();
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dispose()
|
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
if (compiler) {
|
|
|
|
compiler->dispose();
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
assembler->dispose();
|
2010-12-27 22:55:23 +00:00
|
|
|
|
|
|
|
if (executableAllocator) {
|
|
|
|
executableAllocator->free(executableStart, executableSize);
|
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
eventLog.dispose();
|
|
|
|
|
|
|
|
zone.dispose();
|
2014-05-01 22:40:57 +00:00
|
|
|
|
|
|
|
if (argumentBuffer.begin()) {
|
|
|
|
thread->m->heap->free(argumentBuffer.begin(), 256 * sizeof(ir::Value*));
|
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
void extendLogicalCode(unsigned more)
|
|
|
|
{
|
|
|
|
compiler->extendLogicalCode(more);
|
|
|
|
visitTable = visitTable.cloneAndSet(&zone, visitTable.count + more, 0);
|
|
|
|
rootTable = rootTable.cloneAndSet(
|
|
|
|
&zone,
|
|
|
|
rootTable.count + more * frameMapSizeInWords(thread, method),
|
|
|
|
~(uintptr_t)0);
|
|
|
|
}
|
|
|
|
|
2008-02-11 17:21:41 +00:00
|
|
|
MyThread* thread;
|
2007-12-31 22:40:56 +00:00
|
|
|
Zone zone;
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* assembler;
|
2008-05-31 22:14:27 +00:00
|
|
|
MyClient client;
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* compiler;
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method;
|
2008-11-23 23:58:01 +00:00
|
|
|
BootContext* bootContext;
|
2007-12-31 22:40:56 +00:00
|
|
|
PoolElement* objectPool;
|
2014-05-04 01:09:55 +00:00
|
|
|
unsigned subroutineCount;
|
2007-12-31 22:40:56 +00:00
|
|
|
TraceElement* traceLog;
|
2014-05-04 01:09:55 +00:00
|
|
|
Slice<uint16_t> visitTable;
|
|
|
|
Slice<uintptr_t> rootTable;
|
2014-05-05 04:43:27 +00:00
|
|
|
Alloc* executableAllocator;
|
2010-12-27 22:55:23 +00:00
|
|
|
void* executableStart;
|
|
|
|
unsigned executableSize;
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned objectPoolCount;
|
|
|
|
unsigned traceLogCount;
|
2008-03-05 21:44:17 +00:00
|
|
|
bool dirtyRoots;
|
2010-12-19 22:23:19 +00:00
|
|
|
bool leaf;
|
2008-01-07 14:51:07 +00:00
|
|
|
Vector eventLog;
|
2007-12-31 22:40:56 +00:00
|
|
|
MyProtector protector;
|
2011-03-15 23:52:02 +00:00
|
|
|
MyResource resource;
|
2014-05-01 22:40:57 +00:00
|
|
|
Slice<ir::Value*> argumentBuffer;
|
2007-12-31 22:40:56 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned translateLocalIndex(Context* context,
|
|
|
|
unsigned footprint,
|
|
|
|
unsigned index)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned parameterFootprint = context->method->parameterFootprint();
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
if (index < parameterFootprint) {
|
|
|
|
return parameterFootprint - index - footprint;
|
|
|
|
} else {
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* loadLocal(Context* context,
|
|
|
|
unsigned footprint,
|
|
|
|
ir::Type type,
|
|
|
|
unsigned index)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2014-05-02 16:05:19 +00:00
|
|
|
ir::Value* result = context->compiler->loadLocal(
|
2014-05-01 13:18:12 +00:00
|
|
|
type, translateLocalIndex(context, footprint, index));
|
2014-05-02 16:05:19 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(context->thread, type == result->type);
|
2014-05-02 16:05:19 +00:00
|
|
|
return result;
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
void storeLocal(Context* context,
|
|
|
|
unsigned footprint,
|
2014-05-02 16:05:19 +00:00
|
|
|
ir::Type type UNUSED,
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* value,
|
|
|
|
unsigned index)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(context->thread, type == value->type);
|
2014-05-04 03:54:10 +00:00
|
|
|
context->compiler->storeLocal(value,
|
|
|
|
translateLocalIndex(context, footprint, index));
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2014-02-25 22:15:37 +00:00
|
|
|
avian::util::FixedAllocator* codeAllocator(MyThread* t);
|
2011-09-20 22:30:30 +00:00
|
|
|
|
2014-05-01 19:56:39 +00:00
|
|
|
ir::Type operandTypeForFieldCode(Thread* t, unsigned code)
|
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case IntField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return ir::Type::i4();
|
2014-05-01 19:56:39 +00:00
|
|
|
case LongField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return ir::Type::i8();
|
2014-05-01 19:56:39 +00:00
|
|
|
|
|
|
|
case ObjectField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return ir::Type::object();
|
2014-05-01 19:56:39 +00:00
|
|
|
|
|
|
|
case FloatField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return ir::Type::f4();
|
2014-05-01 19:56:39 +00:00
|
|
|
case DoubleField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return ir::Type::f8();
|
2014-05-01 19:56:39 +00:00
|
|
|
|
|
|
|
case VoidField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return ir::Type::void_();
|
2014-05-01 19:56:39 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-01 20:30:45 +00:00
|
|
|
unsigned methodReferenceParameterFootprint(Thread* t,
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* reference,
|
2014-05-01 20:30:45 +00:00
|
|
|
bool isStatic)
|
|
|
|
{
|
|
|
|
return parameterFootprint(
|
|
|
|
t,
|
2014-07-11 15:47:57 +00:00
|
|
|
reinterpret_cast<const char*>(reference->spec()->body().begin()),
|
2014-05-01 20:30:45 +00:00
|
|
|
isStatic);
|
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
int methodReferenceReturnCode(Thread* t, GcReference* reference)
|
2014-05-01 20:30:45 +00:00
|
|
|
{
|
|
|
|
unsigned parameterCount;
|
|
|
|
unsigned parameterFootprint;
|
|
|
|
unsigned returnCode;
|
2014-07-11 15:47:57 +00:00
|
|
|
scanMethodSpec(
|
|
|
|
t,
|
|
|
|
reinterpret_cast<const char*>(reference->spec()->body().begin()),
|
|
|
|
true,
|
|
|
|
¶meterCount,
|
|
|
|
¶meterFootprint,
|
|
|
|
&returnCode);
|
2014-05-01 20:30:45 +00:00
|
|
|
|
|
|
|
return returnCode;
|
|
|
|
}
|
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
class Frame {
|
|
|
|
public:
|
2014-05-05 16:49:50 +00:00
|
|
|
Frame(Context* context, ir::Type* stackMap)
|
2014-05-01 02:11:54 +00:00
|
|
|
: context(context),
|
|
|
|
t(context->thread),
|
|
|
|
c(context->compiler),
|
|
|
|
subroutine(0),
|
|
|
|
stackMap(stackMap),
|
|
|
|
ip(0),
|
|
|
|
sp(localSize()),
|
2014-06-01 20:22:14 +00:00
|
|
|
level(0)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
memset(stackMap, 0, context->method->code()->maxStack() * sizeof(ir::Type));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 16:49:50 +00:00
|
|
|
Frame(Frame* f, ir::Type* stackMap)
|
2014-05-01 02:11:54 +00:00
|
|
|
: context(f->context),
|
|
|
|
t(context->thread),
|
|
|
|
c(context->compiler),
|
|
|
|
subroutine(f->subroutine),
|
|
|
|
stackMap(stackMap),
|
|
|
|
ip(f->ip),
|
|
|
|
sp(f->sp),
|
2014-06-01 20:22:14 +00:00
|
|
|
level(f->level + 1)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2014-05-05 16:49:50 +00:00
|
|
|
memcpy(stackMap,
|
|
|
|
f->stackMap,
|
2014-06-28 04:00:05 +00:00
|
|
|
context->method->code()->maxStack() * sizeof(ir::Type));
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
if (level > 1) {
|
2008-07-05 20:21:13 +00:00
|
|
|
context->eventLog.append(PushContextEvent);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
~Frame()
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
dispose();
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dispose()
|
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
if (level > 1) {
|
2014-05-01 18:44:42 +00:00
|
|
|
context->eventLog.append(PopContextEvent);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
ir::Value* append(object o)
|
2014-05-01 18:44:42 +00:00
|
|
|
{
|
2011-09-20 22:30:30 +00:00
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
if (bc) {
|
2014-07-11 15:50:18 +00:00
|
|
|
avian::codegen::Promise* p = new (bc->zone)
|
|
|
|
avian::codegen::ListenPromise(t->m->system, bc->zone);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2008-11-27 20:59:40 +00:00
|
|
|
PROTECT(t, o);
|
2014-07-02 21:11:27 +00:00
|
|
|
object pointer = makePointer(t, p);
|
|
|
|
bc->constants = makeTriple(t, o, pointer, bc->constants);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2014-05-01 04:27:19 +00:00
|
|
|
return c->binaryOp(
|
|
|
|
lir::Add,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::object(),
|
2014-05-01 04:27:19 +00:00
|
|
|
c->memory(
|
2014-06-01 20:22:14 +00:00
|
|
|
c->threadRegister(), ir::Type::object(), TARGET_THREAD_HEAPIMAGE),
|
|
|
|
c->promiseConstant(p, ir::Type::object()));
|
2008-11-23 23:58:01 +00:00
|
|
|
} else {
|
2010-01-28 00:46:04 +00:00
|
|
|
for (PoolElement* e = context->objectPool; e; e = e->next) {
|
2014-07-02 21:11:27 +00:00
|
|
|
if (o == e->target) {
|
2014-06-01 20:22:14 +00:00
|
|
|
return c->address(ir::Type::object(), e);
|
2010-01-28 00:46:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
context->objectPool = new (&context->zone)
|
|
|
|
PoolElement(t, o, context->objectPool);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
++context->objectPoolCount;
|
2008-11-29 01:23:01 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
return c->address(ir::Type::object(), context->objectPool);
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned localSize()
|
|
|
|
{
|
2009-08-27 00:26:44 +00:00
|
|
|
return local::localSize(t, context->method);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned stackSize()
|
|
|
|
{
|
2014-06-28 04:00:05 +00:00
|
|
|
return context->method->code()->maxStack();
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned frameSize()
|
|
|
|
{
|
2008-01-07 14:51:07 +00:00
|
|
|
return localSize() + stackSize();
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-05-05 16:49:50 +00:00
|
|
|
void set(unsigned index, ir::Type type)
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, index < frameSize());
|
2007-10-10 22:39:40 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
if (type == ir::Type::object()) {
|
2008-02-11 17:21:41 +00:00
|
|
|
context->eventLog.append(MarkEvent);
|
|
|
|
context->eventLog.append2(index);
|
|
|
|
} else {
|
|
|
|
context->eventLog.append(ClearEvent);
|
|
|
|
context->eventLog.append2(index);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int si = index - localSize();
|
|
|
|
if (si >= 0) {
|
2008-02-11 17:21:41 +00:00
|
|
|
stackMap[si] = type;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 16:49:50 +00:00
|
|
|
ir::Type get(unsigned index)
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, index < frameSize());
|
2008-01-07 14:51:07 +00:00
|
|
|
int si = index - localSize();
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, si >= 0);
|
2008-02-11 17:21:41 +00:00
|
|
|
return stackMap[si];
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void popped(unsigned count)
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp >= count);
|
|
|
|
assertT(t, sp - count >= localSize());
|
2007-12-12 22:19:13 +00:00
|
|
|
while (count) {
|
2014-06-01 20:22:14 +00:00
|
|
|
set(--sp, ir::Type::i4());
|
2014-07-11 15:50:18 +00:00
|
|
|
--count;
|
2007-12-12 22:19:13 +00:00
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
avian::codegen::Promise* addressPromise(avian::codegen::Promise* p)
|
|
|
|
{
|
2008-12-02 16:45:20 +00:00
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
if (bc) {
|
2014-07-11 15:50:18 +00:00
|
|
|
bc->addresses = new (bc->zone) avian::codegen::DelayedPromise(
|
|
|
|
t->m->system, bc->zone, p, bc->addresses);
|
2008-12-02 16:45:20 +00:00
|
|
|
return bc->addresses;
|
|
|
|
} else {
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* addressOperand(avian::codegen::Promise* p)
|
|
|
|
{
|
2014-06-01 20:22:14 +00:00
|
|
|
return c->promiseConstant(p, ir::Type::iptr());
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* absoluteAddressOperand(avian::codegen::Promise* p)
|
|
|
|
{
|
2011-09-20 22:30:30 +00:00
|
|
|
return context->bootContext
|
2014-02-25 22:46:35 +00:00
|
|
|
? c->binaryOp(
|
|
|
|
lir::Add,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-05-01 04:27:19 +00:00
|
|
|
c->memory(c->threadRegister(),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-02-25 22:46:35 +00:00
|
|
|
TARGET_THREAD_CODEIMAGE),
|
|
|
|
c->promiseConstant(
|
|
|
|
new (&context->zone) avian::codegen::OffsetPromise(
|
|
|
|
p,
|
|
|
|
-reinterpret_cast<intptr_t>(
|
|
|
|
codeAllocator(t)->memory.begin())),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()))
|
2014-02-25 22:46:35 +00:00
|
|
|
: addressOperand(p);
|
2008-12-02 16:45:20 +00:00
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
ir::Value* machineIpValue(unsigned logicalIp)
|
2014-05-01 18:44:42 +00:00
|
|
|
{
|
2014-06-01 20:22:14 +00:00
|
|
|
return c->promiseConstant(machineIp(logicalIp), ir::Type::iptr());
|
2007-12-16 00:24:15 +00:00
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
unsigned duplicatedIp(unsigned bytecodeIp)
|
|
|
|
{
|
|
|
|
if (UNLIKELY(subroutine)) {
|
|
|
|
return bytecodeIp + subroutine->duplicatedBaseIp;
|
|
|
|
} else {
|
|
|
|
return bytecodeIp;
|
|
|
|
}
|
|
|
|
}
|
2008-04-20 00:43:12 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
Promise* machineIp(unsigned bytecodeIp)
|
|
|
|
{
|
|
|
|
return c->machineIp(duplicatedIp(bytecodeIp));
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
void visitLogicalIp(unsigned bytecodeIp)
|
|
|
|
{
|
|
|
|
unsigned dupIp = duplicatedIp(bytecodeIp);
|
|
|
|
c->visitLogicalIp(dupIp);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
context->eventLog.append(IpEvent);
|
|
|
|
context->eventLog.append2(bytecodeIp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void startLogicalIp(unsigned bytecodeIp)
|
|
|
|
{
|
|
|
|
unsigned dupIp = duplicatedIp(bytecodeIp);
|
|
|
|
c->startLogicalIp(dupIp);
|
2007-12-26 16:56:14 +00:00
|
|
|
|
2008-04-20 05:23:08 +00:00
|
|
|
context->eventLog.append(IpEvent);
|
2014-05-04 01:09:55 +00:00
|
|
|
context->eventLog.append2(bytecodeIp);
|
2007-12-26 16:56:14 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
this->ip = bytecodeIp;
|
2007-12-26 16:56:14 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 21:21:48 +00:00
|
|
|
void push(ir::Type type, ir::Value* o)
|
2014-05-01 18:44:42 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, type == o->type);
|
2014-05-05 21:21:48 +00:00
|
|
|
c->push(o->type, o);
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 1 <= frameSize());
|
2014-05-05 21:21:48 +00:00
|
|
|
set(sp++, type);
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void pushObject()
|
|
|
|
{
|
2014-06-01 20:22:14 +00:00
|
|
|
c->pushed(ir::Type::object());
|
2007-12-12 22:19:13 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 1 <= frameSize());
|
2014-06-01 20:22:14 +00:00
|
|
|
set(sp++, ir::Type::object());
|
2008-02-12 02:06:12 +00:00
|
|
|
}
|
2007-12-16 21:30:19 +00:00
|
|
|
|
2014-05-05 21:21:48 +00:00
|
|
|
void pushLarge(ir::Type type, ir::Value* o)
|
2014-05-01 18:44:42 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, o->type == type);
|
2014-05-05 21:21:48 +00:00
|
|
|
c->push(type, o);
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 2 <= frameSize());
|
2014-05-05 21:21:48 +00:00
|
|
|
set(sp++, type);
|
|
|
|
set(sp++, type);
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
void popFootprint(unsigned count)
|
|
|
|
{
|
2007-12-12 22:19:13 +00:00
|
|
|
popped(count);
|
2009-05-15 02:08:01 +00:00
|
|
|
c->popped(count);
|
2007-10-11 22:43:03 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 21:21:48 +00:00
|
|
|
ir::Value* pop(ir::Type type)
|
2014-05-01 18:44:42 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp >= 1);
|
|
|
|
assertT(t, sp - 1 >= localSize());
|
|
|
|
assertT(t, get(sp - 1) == type);
|
2014-06-01 20:22:14 +00:00
|
|
|
set(--sp, ir::Type::i4());
|
2014-05-05 21:21:48 +00:00
|
|
|
return c->pop(type);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2014-05-01 18:44:42 +00:00
|
|
|
|
2014-05-05 21:21:48 +00:00
|
|
|
ir::Value* popLarge(ir::Type type)
|
2014-05-01 18:44:42 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp >= 1);
|
|
|
|
assertT(t, sp - 2 >= localSize());
|
|
|
|
assertT(t, get(sp - 1) == type);
|
|
|
|
assertT(t, get(sp - 2) == type);
|
2014-05-05 21:21:48 +00:00
|
|
|
sp -= 2;
|
|
|
|
return c->pop(type);
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 21:21:48 +00:00
|
|
|
void load(ir::Type type, unsigned index)
|
2014-05-02 15:01:57 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, index < localSize());
|
2014-05-05 21:21:48 +00:00
|
|
|
push(type, loadLocal(context, 1, type, index));
|
2014-05-02 15:01:57 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void loadLarge(ir::Type type, unsigned index)
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, index < static_cast<unsigned>(localSize() - 1));
|
2014-05-05 21:49:25 +00:00
|
|
|
pushLarge(type, loadLocal(context, 2, type, index));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-05-05 21:21:48 +00:00
|
|
|
void store(ir::Type type, unsigned index)
|
2014-05-02 16:05:19 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t,
|
2014-07-11 15:47:57 +00:00
|
|
|
type == ir::Type::i4() || type == ir::Type::f4()
|
|
|
|
|| type == ir::Type::object());
|
2014-05-05 21:21:48 +00:00
|
|
|
storeLocal(context, 1, type, pop(type), index);
|
|
|
|
unsigned ti = translateLocalIndex(context, 1, index);
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, ti < localSize());
|
2014-05-05 21:21:48 +00:00
|
|
|
set(ti, type);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void storeLarge(ir::Type type, unsigned index)
|
|
|
|
{
|
2014-07-22 21:38:43 +00:00
|
|
|
assertT(t, type.rawSize() == 8);
|
2014-05-05 21:49:25 +00:00
|
|
|
storeLocal(context, 2, type, popLarge(type), index);
|
|
|
|
unsigned ti = translateLocalIndex(context, 2, index);
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, ti + 1 < localSize());
|
2014-05-05 21:49:25 +00:00
|
|
|
set(ti, type);
|
|
|
|
set(ti + 1, type);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dup()
|
|
|
|
{
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i4(), c->peek(1, 0));
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 1 <= frameSize());
|
|
|
|
assertT(t, sp - 1 >= localSize());
|
2014-05-05 21:49:25 +00:00
|
|
|
set(sp, get(sp - 1));
|
2014-07-11 15:50:18 +00:00
|
|
|
++sp;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dupX1()
|
|
|
|
{
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s0 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s1 = c->pop(ir::Type::i4());
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i4(), s0);
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 1 <= frameSize());
|
|
|
|
assertT(t, sp - 2 >= localSize());
|
2014-05-05 21:49:25 +00:00
|
|
|
|
|
|
|
ir::Type b2 = get(sp - 2);
|
|
|
|
ir::Type b1 = get(sp - 1);
|
|
|
|
|
|
|
|
set(sp - 1, b2);
|
|
|
|
set(sp - 2, b1);
|
2014-07-11 15:50:18 +00:00
|
|
|
set(sp, b1);
|
2014-05-05 21:49:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
++sp;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dupX2()
|
|
|
|
{
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s0 = c->pop(ir::Type::i4());
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2014-07-22 21:38:43 +00:00
|
|
|
if (get(sp - 2).rawSize() == 8) {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s1 = c->pop(ir::Type::i8());
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i4(), s0);
|
|
|
|
c->push(ir::Type::i8(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s1 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s2 = c->pop(ir::Type::i4());
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i4(), s0);
|
|
|
|
c->push(ir::Type::i4(), s2);
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-11 22:43:03 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 1 <= frameSize());
|
|
|
|
assertT(t, sp - 3 >= localSize());
|
2014-05-05 21:49:25 +00:00
|
|
|
|
|
|
|
ir::Type b3 = get(sp - 3);
|
|
|
|
ir::Type b2 = get(sp - 2);
|
|
|
|
ir::Type b1 = get(sp - 1);
|
|
|
|
|
|
|
|
set(sp - 2, b3);
|
|
|
|
set(sp - 1, b2);
|
|
|
|
set(sp - 3, b1);
|
2014-07-11 15:50:18 +00:00
|
|
|
set(sp, b1);
|
2014-05-05 21:49:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
++sp;
|
2007-10-10 22:39:40 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dup2()
|
|
|
|
{
|
2014-07-22 21:38:43 +00:00
|
|
|
if (get(sp - 1).rawSize() == 8) {
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i8(), c->peek(2, 0));
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s0 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s1 = c->pop(ir::Type::i4());
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 2 <= frameSize());
|
|
|
|
assertT(t, sp - 2 >= localSize());
|
2014-05-05 21:49:25 +00:00
|
|
|
|
|
|
|
ir::Type b2 = get(sp - 2);
|
|
|
|
ir::Type b1 = get(sp - 1);
|
|
|
|
|
|
|
|
set(sp, b2);
|
|
|
|
set(sp + 1, b1);
|
|
|
|
|
|
|
|
sp += 2;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dup2X1()
|
|
|
|
{
|
2014-07-22 21:38:43 +00:00
|
|
|
if (get(sp - 1).rawSize() == 8) {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s0 = c->pop(ir::Type::i8());
|
|
|
|
ir::Value* s1 = c->pop(ir::Type::i4());
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i8(), s0);
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i8(), s0);
|
2008-02-12 02:06:12 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s0 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s1 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s2 = c->pop(ir::Type::i4());
|
|
|
|
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
|
|
|
c->push(ir::Type::i4(), s2);
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 2 <= frameSize());
|
|
|
|
assertT(t, sp - 3 >= localSize());
|
2014-05-05 21:49:25 +00:00
|
|
|
|
|
|
|
ir::Type b3 = get(sp - 3);
|
|
|
|
ir::Type b2 = get(sp - 2);
|
|
|
|
ir::Type b1 = get(sp - 1);
|
|
|
|
|
|
|
|
set(sp - 1, b3);
|
|
|
|
set(sp - 3, b2);
|
2014-07-11 15:50:18 +00:00
|
|
|
set(sp, b2);
|
2014-05-05 21:49:25 +00:00
|
|
|
set(sp - 2, b1);
|
|
|
|
set(sp + 1, b1);
|
|
|
|
|
|
|
|
sp += 2;
|
2007-10-18 00:41:49 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dup2X2()
|
|
|
|
{
|
2014-07-22 21:38:43 +00:00
|
|
|
if (get(sp - 1).rawSize() == 8) {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s0 = c->pop(ir::Type::i8());
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2014-07-22 21:38:43 +00:00
|
|
|
if (get(sp - 3).rawSize() == 8) {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s1 = c->pop(ir::Type::i8());
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i8(), s0);
|
|
|
|
c->push(ir::Type::i8(), s1);
|
|
|
|
c->push(ir::Type::i8(), s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s1 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s2 = c->pop(ir::Type::i4());
|
2008-02-11 17:21:41 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i8(), s0);
|
|
|
|
c->push(ir::Type::i4(), s2);
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i8(), s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s0 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s1 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s2 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s3 = c->pop(ir::Type::i4());
|
|
|
|
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
|
|
|
c->push(ir::Type::i4(), s3);
|
|
|
|
c->push(ir::Type::i4(), s2);
|
|
|
|
c->push(ir::Type::i4(), s1);
|
|
|
|
c->push(ir::Type::i4(), s0);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp + 2 <= frameSize());
|
|
|
|
assertT(t, sp - 4 >= localSize());
|
2014-05-05 21:49:25 +00:00
|
|
|
|
|
|
|
ir::Type b4 = get(sp - 4);
|
|
|
|
ir::Type b3 = get(sp - 3);
|
|
|
|
ir::Type b2 = get(sp - 2);
|
|
|
|
ir::Type b1 = get(sp - 1);
|
|
|
|
|
|
|
|
set(sp - 2, b4);
|
|
|
|
set(sp - 1, b3);
|
|
|
|
set(sp - 4, b2);
|
2014-07-11 15:50:18 +00:00
|
|
|
set(sp, b2);
|
2014-05-05 21:49:25 +00:00
|
|
|
set(sp - 3, b1);
|
|
|
|
set(sp + 1, b1);
|
|
|
|
|
|
|
|
sp += 2;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void swap()
|
|
|
|
{
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* s0 = c->pop(ir::Type::i4());
|
|
|
|
ir::Value* s1 = c->pop(ir::Type::i4());
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->push(ir::Type::i4(), s0);
|
|
|
|
c->push(ir::Type::i4(), s1);
|
2007-10-02 00:08:17 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, sp - 2 >= localSize());
|
2014-05-05 21:49:25 +00:00
|
|
|
|
|
|
|
ir::Type saved = get(sp - 1);
|
|
|
|
|
|
|
|
set(sp - 1, get(sp - 2));
|
|
|
|
set(sp - 2, saved);
|
2007-10-02 00:08:17 +00:00
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
TraceElement* trace(GcMethod* target, unsigned flags)
|
|
|
|
{
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned mapSize = frameMapSizeInWords(t, context->method);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
TraceElement* e = context->traceLog = new (
|
|
|
|
context->zone.allocate(sizeof(TraceElement) + (mapSize * BytesPerWord)))
|
|
|
|
TraceElement(context,
|
|
|
|
duplicatedIp(ip),
|
|
|
|
target,
|
|
|
|
flags,
|
|
|
|
context->traceLog,
|
|
|
|
mapSize);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
++context->traceLogCount;
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
context->eventLog.append(TraceEvent);
|
|
|
|
context->eventLog.appendAddress(e);
|
2007-12-31 22:40:56 +00:00
|
|
|
|
|
|
|
return e;
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-05-01 20:21:42 +00:00
|
|
|
void pushReturnValue(unsigned code, ir::Value* result)
|
2014-05-01 19:56:39 +00:00
|
|
|
{
|
2014-05-01 20:21:42 +00:00
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case IntField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return push(ir::Type::i4(), result);
|
2014-05-02 15:01:57 +00:00
|
|
|
case FloatField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return push(ir::Type::f4(), result);
|
2014-05-01 20:21:42 +00:00
|
|
|
|
|
|
|
case ObjectField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return push(ir::Type::object(), result);
|
2014-05-01 20:21:42 +00:00
|
|
|
|
|
|
|
case LongField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return pushLarge(ir::Type::i8(), result);
|
2014-05-02 15:01:57 +00:00
|
|
|
case DoubleField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return pushLarge(ir::Type::f8(), result);
|
2014-05-01 20:21:42 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
2014-05-01 19:56:39 +00:00
|
|
|
|
2014-05-02 02:56:47 +00:00
|
|
|
Slice<ir::Value*> peekMethodArguments(unsigned footprint)
|
2014-05-01 22:40:57 +00:00
|
|
|
{
|
|
|
|
ir::Value** ptr = context->argumentBuffer.items;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < footprint; i++) {
|
|
|
|
*(ptr++) = c->peek(1, footprint - i - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Slice<ir::Value*>(context->argumentBuffer.items, footprint);
|
|
|
|
}
|
|
|
|
|
2014-05-01 20:21:42 +00:00
|
|
|
void stackCall(ir::Value* methodValue,
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* methodObject,
|
2014-05-01 20:21:42 +00:00
|
|
|
unsigned flags,
|
|
|
|
TraceElement* trace)
|
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned footprint = methodObject->parameterFootprint();
|
|
|
|
unsigned returnCode = methodObject->returnCode();
|
2014-05-01 20:21:42 +00:00
|
|
|
ir::Value* result = c->stackCall(methodValue,
|
|
|
|
flags,
|
|
|
|
trace,
|
|
|
|
operandTypeForFieldCode(t, returnCode),
|
2014-05-01 22:40:57 +00:00
|
|
|
peekMethodArguments(footprint));
|
2014-05-01 20:21:42 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
popFootprint(footprint);
|
2014-05-01 20:21:42 +00:00
|
|
|
|
|
|
|
if (returnCode != VoidField) {
|
|
|
|
pushReturnValue(returnCode, result);
|
|
|
|
}
|
2014-05-01 19:56:39 +00:00
|
|
|
}
|
|
|
|
|
2014-05-01 20:30:45 +00:00
|
|
|
void referenceStackCall(bool isStatic,
|
|
|
|
ir::Value* methodValue,
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* methodReference,
|
2014-05-01 20:30:45 +00:00
|
|
|
unsigned flags,
|
|
|
|
TraceElement* trace)
|
|
|
|
{
|
|
|
|
unsigned footprint
|
|
|
|
= methodReferenceParameterFootprint(t, methodReference, isStatic);
|
|
|
|
unsigned returnCode = methodReferenceReturnCode(t, methodReference);
|
|
|
|
ir::Value* result = c->stackCall(methodValue,
|
|
|
|
flags,
|
|
|
|
trace,
|
|
|
|
operandTypeForFieldCode(t, returnCode),
|
2014-05-01 22:40:57 +00:00
|
|
|
peekMethodArguments(footprint));
|
2014-05-01 20:30:45 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
popFootprint(footprint);
|
2014-05-01 20:30:45 +00:00
|
|
|
|
|
|
|
if (returnCode != VoidField) {
|
|
|
|
pushReturnValue(returnCode, result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
void startSubroutine(unsigned ip, unsigned returnAddress)
|
|
|
|
{
|
2014-05-04 04:01:28 +00:00
|
|
|
// Push a dummy value to the stack, representing the return address (which
|
|
|
|
// we don't need, since we're expanding everything statically).
|
2014-05-04 01:09:55 +00:00
|
|
|
// TODO: in the future, push a value that we can track through type checking
|
2014-06-01 20:22:14 +00:00
|
|
|
push(ir::Type::object(), c->constant(0, ir::Type::object()));
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
if (DebugInstructions) {
|
|
|
|
fprintf(stderr, "startSubroutine %u %u\n", ip, returnAddress);
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
Subroutine* subroutine = new (&context->zone)
|
|
|
|
Subroutine(context->subroutineCount++,
|
|
|
|
returnAddress,
|
2014-06-28 04:00:05 +00:00
|
|
|
context->method->code()->length(),
|
2014-05-04 01:09:55 +00:00
|
|
|
this->subroutine);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2014-06-28 04:00:05 +00:00
|
|
|
context->extendLogicalCode(context->method->code()->length());
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
this->subroutine = subroutine;
|
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
unsigned endSubroutine(unsigned returnAddressLocal UNUSED)
|
|
|
|
{
|
|
|
|
// TODO: use returnAddressLocal to decide which subroutine we're returning
|
|
|
|
// from (in case it's ever not the most recent one entered). I'm unsure of
|
|
|
|
// whether such a subroutine pattern would pass bytecode verification.
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
unsigned returnAddress = subroutine->returnAddress;
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
if (DebugInstructions) {
|
|
|
|
fprintf(stderr, "endSubroutine %u %u\n", ip, returnAddress);
|
|
|
|
}
|
2009-07-08 14:18:40 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
subroutine = subroutine->outer;
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
return returnAddress;
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
2014-05-04 01:09:55 +00:00
|
|
|
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* context;
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t;
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c;
|
2014-05-04 01:09:55 +00:00
|
|
|
|
|
|
|
// Innermost subroutine we're compiling code for
|
2009-06-26 21:36:04 +00:00
|
|
|
Subroutine* subroutine;
|
2014-05-04 01:09:55 +00:00
|
|
|
|
2014-05-05 16:49:50 +00:00
|
|
|
ir::Type* stackMap;
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned ip;
|
|
|
|
unsigned sp;
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned level;
|
2007-12-30 22:24:48 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned savedTargetIndex(MyThread* t UNUSED, GcMethod* method)
|
2008-01-20 18:55:08 +00:00
|
|
|
{
|
2014-06-28 04:00:05 +00:00
|
|
|
return method->code()->maxLocals();
|
2008-01-20 18:55:08 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcCallNode* findCallNode(MyThread* t, void* address);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void* findExceptionHandler(Thread* t, GcMethod* method, void* ip)
|
2008-04-11 19:03:40 +00:00
|
|
|
{
|
2009-05-03 20:57:11 +00:00
|
|
|
if (t->exception) {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcArray* table = cast<GcArray>(t, method->code()->exceptionHandlerTable());
|
2009-05-03 20:57:11 +00:00
|
|
|
if (table) {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcIntArray* index = cast<GcIntArray>(t, table->body()[0]);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint8_t* compiled = reinterpret_cast<uint8_t*>(methodCompiled(t, method));
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
for (unsigned i = 0; i < table->length() - 1; ++i) {
|
|
|
|
unsigned start = index->body()[i * 3];
|
|
|
|
unsigned end = index->body()[(i * 3) + 1];
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned key = difference(ip, compiled) - 1;
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
if (key >= start and key < end) {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcClass* catchType = cast<GcClass>(t, table->body()[i + 1]);
|
2008-04-11 19:03:40 +00:00
|
|
|
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
if (exceptionMatch(t, catchType, t->exception)) {
|
2014-06-29 04:57:07 +00:00
|
|
|
return compiled + index->body()[(i * 3) + 2];
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void releaseLock(MyThread* t, GcMethod* method, void* stack)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_SYNCHRONIZED) {
|
2011-03-26 00:37:02 +00:00
|
|
|
if (t->methodLockIsClean) {
|
|
|
|
object lock;
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_STATIC) {
|
2014-07-02 21:11:27 +00:00
|
|
|
lock = method->class_();
|
2011-03-26 00:37:02 +00:00
|
|
|
} else {
|
2014-07-11 15:50:18 +00:00
|
|
|
lock = *localObject(t,
|
|
|
|
stackForFrame(t, stack, method),
|
|
|
|
method,
|
|
|
|
savedTargetIndex(t, method));
|
2011-03-26 00:37:02 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2011-03-26 00:37:02 +00:00
|
|
|
release(t, lock);
|
2009-05-03 20:57:11 +00:00
|
|
|
} else {
|
2011-03-26 00:37:02 +00:00
|
|
|
// got an exception while trying to acquire the lock for a
|
|
|
|
// synchronized method -- don't try to release it, since we
|
|
|
|
// never succeeded in acquiring it.
|
|
|
|
t->methodLockIsClean = true;
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void findUnwindTarget(MyThread* t,
|
|
|
|
void** targetIp,
|
|
|
|
void** targetFrame,
|
|
|
|
void** targetStack,
|
|
|
|
GcContinuation** targetContinuation)
|
2007-09-29 21:08:29 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
void* ip;
|
|
|
|
void* stack;
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* continuation;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
if (t->traceContext) {
|
|
|
|
ip = t->traceContext->ip;
|
|
|
|
stack = t->traceContext->stack;
|
|
|
|
continuation = t->traceContext->continuation;
|
|
|
|
} else {
|
2011-02-20 03:33:26 +00:00
|
|
|
ip = getIp(t);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
stack = t->stack;
|
2014-05-29 04:17:25 +00:00
|
|
|
continuation = t->continuation;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = t->trace->targetMethod;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
bool mostRecent = true;
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2008-01-01 17:08:47 +00:00
|
|
|
*targetIp = 0;
|
|
|
|
while (*targetIp == 0) {
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method = methodForIp(t, ip);
|
2008-04-07 23:47:41 +00:00
|
|
|
if (method) {
|
2009-05-24 01:49:14 +00:00
|
|
|
void* handler = findExceptionHandler(t, method, ip);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2009-04-26 22:06:15 +00:00
|
|
|
if (handler) {
|
|
|
|
*targetIp = handler;
|
2009-04-26 21:55:35 +00:00
|
|
|
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
nextFrame(t, &ip, &stack, method, target, mostRecent);
|
2009-04-26 21:55:35 +00:00
|
|
|
|
2009-04-26 22:06:15 +00:00
|
|
|
void** sp = static_cast<void**>(stackForFrame(t, stack, method))
|
2014-07-11 15:50:18 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
*targetFrame = static_cast<void**>(stack)
|
|
|
|
+ t->arch->framePointerOffset();
|
2009-04-26 22:06:15 +00:00
|
|
|
*targetStack = sp;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
*targetContinuation = continuation;
|
2009-04-26 22:06:15 +00:00
|
|
|
|
2009-05-17 23:43:48 +00:00
|
|
|
sp[localOffset(t, localSize(t, method), method)] = t->exception;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
t->exception = 0;
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
nextFrame(t, &ip, &stack, method, target, mostRecent);
|
2009-04-26 22:06:15 +00:00
|
|
|
|
2009-05-23 22:15:06 +00:00
|
|
|
if (t->exception) {
|
|
|
|
releaseLock(t, method, stack);
|
|
|
|
}
|
2009-05-17 00:39:08 +00:00
|
|
|
|
|
|
|
target = method;
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
} else {
|
2011-02-25 18:04:23 +00:00
|
|
|
expect(t, ip);
|
2009-05-03 20:57:11 +00:00
|
|
|
*targetIp = ip;
|
2011-01-28 04:06:01 +00:00
|
|
|
*targetFrame = 0;
|
2009-05-03 20:57:11 +00:00
|
|
|
*targetStack = static_cast<void**>(stack)
|
2014-07-11 15:50:18 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
*targetContinuation = continuation;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
while (Continuations and *targetContinuation) {
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* c = *targetContinuation;
|
2009-05-25 04:27:50 +00:00
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
GcMethod* method = c->method();
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void* handler = findExceptionHandler(t, method, c->address());
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
if (handler) {
|
|
|
|
t->exceptionHandler = handler;
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
t->exceptionStackAdjustment
|
2014-07-11 15:47:57 +00:00
|
|
|
= (stackOffsetFromFrame(t, method)
|
|
|
|
- ((c->framePointerOffset() / BytesPerWord)
|
|
|
|
- t->arch->framePointerOffset()
|
|
|
|
+ t->arch->frameReturnAddressSize())) * BytesPerWord;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
t->exceptionOffset = localOffset(t, localSize(t, method), method)
|
|
|
|
* BytesPerWord;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
break;
|
2009-05-23 22:15:06 +00:00
|
|
|
} else if (t->exception) {
|
2014-07-11 15:47:57 +00:00
|
|
|
releaseLock(t,
|
|
|
|
method,
|
|
|
|
reinterpret_cast<uint8_t*>(c) + ContinuationBody
|
2014-06-29 03:50:32 +00:00
|
|
|
+ c->returnAddressOffset()
|
2009-05-16 08:03:03 +00:00
|
|
|
- t->arch->returnAddressOffset());
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
*targetContinuation = c->next();
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
|
|
|
|
mostRecent = false;
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcContinuation* makeCurrentContinuation(MyThread* t,
|
|
|
|
void** targetIp,
|
|
|
|
void** targetStack)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2011-02-20 20:31:29 +00:00
|
|
|
void* ip = getIp(t);
|
2009-05-03 20:57:11 +00:00
|
|
|
void* stack = t->stack;
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcContinuationContext* context
|
|
|
|
= t->continuation
|
|
|
|
? t->continuation->context()
|
|
|
|
: makeContinuationContext(t, 0, 0, 0, 0, t->trace->originalMethod);
|
2009-05-25 04:27:50 +00:00
|
|
|
PROTECT(t, context);
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = t->trace->targetMethod;
|
2009-05-03 20:57:11 +00:00
|
|
|
PROTECT(t, target);
|
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* first = 0;
|
2009-05-03 20:57:11 +00:00
|
|
|
PROTECT(t, first);
|
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* last = 0;
|
2009-05-03 20:57:11 +00:00
|
|
|
PROTECT(t, last);
|
|
|
|
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
bool mostRecent = true;
|
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
*targetIp = 0;
|
|
|
|
while (*targetIp == 0) {
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method = methodForIp(t, ip);
|
2009-05-03 20:57:11 +00:00
|
|
|
if (method) {
|
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-05-16 08:03:03 +00:00
|
|
|
void** top = static_cast<void**>(stack)
|
2014-07-11 15:50:18 +00:00
|
|
|
+ t->arch->frameReturnAddressSize()
|
|
|
|
+ t->arch->frameFooterSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned argumentFootprint
|
2014-07-11 15:47:57 +00:00
|
|
|
= t->arch->argumentFootprint(target->parameterFootprint());
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned alignment = t->arch->stackAlignmentInWords();
|
2013-02-11 15:07:46 +00:00
|
|
|
if (avian::codegen::TailCalls and argumentFootprint > alignment) {
|
2009-05-03 20:57:11 +00:00
|
|
|
top += argumentFootprint - alignment;
|
|
|
|
}
|
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
void* nextIp = ip;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
nextFrame(t, &nextIp, &stack, method, target, mostRecent);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-16 08:03:03 +00:00
|
|
|
void** bottom = static_cast<void**>(stack)
|
2014-07-11 15:50:18 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned frameSize = bottom - top;
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned totalSize
|
|
|
|
= frameSize + t->arch->frameFooterSize()
|
|
|
|
+ t->arch->argumentFootprint(method->parameterFootprint());
|
|
|
|
|
|
|
|
GcContinuation* c = makeContinuation(
|
|
|
|
t,
|
|
|
|
0,
|
|
|
|
context,
|
|
|
|
method,
|
|
|
|
ip,
|
|
|
|
(frameSize + t->arch->frameFooterSize()
|
|
|
|
+ t->arch->returnAddressOffset() - t->arch->frameReturnAddressSize())
|
|
|
|
* BytesPerWord,
|
|
|
|
(frameSize + t->arch->frameFooterSize()
|
|
|
|
+ t->arch->framePointerOffset() - t->arch->frameReturnAddressSize())
|
|
|
|
* BytesPerWord,
|
|
|
|
totalSize);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
memcpy(c->body().begin(), top, totalSize * BytesPerWord);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
if (last) {
|
2014-06-25 20:38:13 +00:00
|
|
|
last->setNext(t, c);
|
2009-05-03 20:57:11 +00:00
|
|
|
} else {
|
|
|
|
first = c;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
last = c;
|
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
ip = nextIp;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
target = method;
|
2007-10-12 22:06:33 +00:00
|
|
|
} else {
|
2008-01-01 17:08:47 +00:00
|
|
|
*targetIp = ip;
|
2008-11-09 23:56:37 +00:00
|
|
|
*targetStack = static_cast<void**>(stack)
|
2014-07-11 15:50:18 +00:00
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
|
|
|
|
mostRecent = false;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
expect(t, last);
|
2014-06-25 20:38:13 +00:00
|
|
|
last->setNext(t, t->continuation);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
return first;
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void NO_RETURN unwind(MyThread* t)
|
2008-01-01 17:08:47 +00:00
|
|
|
{
|
|
|
|
void* ip;
|
2011-01-28 04:06:01 +00:00
|
|
|
void* frame;
|
2008-01-01 17:08:47 +00:00
|
|
|
void* stack;
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* continuation;
|
2011-01-28 04:06:01 +00:00
|
|
|
findUnwindTarget(t, &ip, &frame, &stack, &continuation);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2011-07-14 00:06:02 +00:00
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
transition(t, ip, stack, continuation, t->trace);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2011-01-28 04:06:01 +00:00
|
|
|
vmJump(ip, frame, stack, t, 0, 0);
|
2008-01-01 17:08:47 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyCheckpoint : public Thread::Checkpoint {
|
2010-12-27 22:55:23 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyCheckpoint(MyThread* t) : Checkpoint(t)
|
|
|
|
{
|
|
|
|
}
|
2010-12-27 22:55:23 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void unwind()
|
|
|
|
{
|
2010-12-27 22:55:23 +00:00
|
|
|
local::unwind(static_cast<MyThread*>(t));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t defaultThunk(MyThread* t);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t nativeThunk(MyThread* t);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t bootNativeThunk(MyThread* t);
|
2011-10-02 00:11:02 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t virtualThunk(MyThread* t, unsigned index);
|
2009-04-07 00:34:12 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool unresolved(MyThread* t, uintptr_t methodAddress);
|
2009-10-18 00:18:03 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uintptr_t methodAddress(Thread* t, GcMethod* method)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_NATIVE) {
|
2011-10-03 14:05:49 +00:00
|
|
|
return bootNativeThunk(static_cast<MyThread*>(t));
|
2008-12-02 02:38:00 +00:00
|
|
|
} else {
|
|
|
|
return methodCompiled(t, method);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void tryInitClass(MyThread* t, GcClass* class_)
|
2009-02-28 21:20:43 +00:00
|
|
|
{
|
|
|
|
initClass(t, class_);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void compile(MyThread* t,
|
|
|
|
FixedAllocator* allocator,
|
|
|
|
BootContext* bootContext,
|
|
|
|
GcMethod* method);
|
2009-04-07 00:34:12 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* resolveMethod(Thread* t, GcPair* pair)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* reference = cast<GcReference>(t, pair->second());
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* class_ = resolveClassInObject(
|
|
|
|
t,
|
|
|
|
cast<GcMethod>(t, pair->first())->class_()->loader(),
|
|
|
|
reference,
|
|
|
|
ReferenceClass);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
return cast<GcMethod>(t,
|
|
|
|
findInHierarchy(t,
|
|
|
|
class_,
|
|
|
|
reference->name(),
|
|
|
|
reference->spec(),
|
|
|
|
findMethodInClass,
|
|
|
|
GcNoSuchMethodError::Type));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
bool methodAbstract(Thread* t UNUSED, GcMethod* method)
|
2011-08-12 20:19:21 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
return method->code() == 0 and (method->flags() & ACC_NATIVE) == 0;
|
2011-08-12 20:19:21 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int64_t prepareMethodForCall(MyThread* t, GcMethod* target)
|
2007-10-16 17:21:26 +00:00
|
|
|
{
|
2011-08-12 20:19:21 +00:00
|
|
|
if (methodAbstract(t, target)) {
|
2014-07-11 15:47:57 +00:00
|
|
|
throwNew(t,
|
|
|
|
GcAbstractMethodError::Type,
|
|
|
|
"%s.%s%s",
|
2014-06-21 04:16:33 +00:00
|
|
|
target->class_()->name()->body().begin(),
|
|
|
|
target->name()->body().begin(),
|
|
|
|
target->spec()->body().begin());
|
2014-05-29 04:17:25 +00:00
|
|
|
} else {
|
2011-08-12 20:19:21 +00:00
|
|
|
if (unresolved(t, methodAddress(t, target))) {
|
|
|
|
PROTECT(t, target);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2011-08-12 20:19:21 +00:00
|
|
|
compile(t, codeAllocator(t), 0, target);
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (target->flags() & ACC_NATIVE) {
|
2011-08-12 20:19:21 +00:00
|
|
|
t->trace->nativeMethod = target;
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-08-12 20:19:21 +00:00
|
|
|
return methodAddress(t, target);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int64_t findInterfaceMethodFromInstance(MyThread* t,
|
|
|
|
GcMethod* method,
|
|
|
|
object instance)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
if (instance) {
|
2014-07-11 15:50:18 +00:00
|
|
|
return prepareMethodForCall(
|
|
|
|
t, findInterfaceMethod(t, method, objectClass(t, instance)));
|
2007-12-30 22:24:48 +00:00
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcNullPointerException::Type);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int64_t findInterfaceMethodFromInstanceAndReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
object instance)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method = resolveMethod(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
return findInterfaceMethodFromInstance(t, method, instance);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void checkMethod(Thread* t, GcMethod* method, bool shouldBeStatic)
|
2012-05-11 19:19:55 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
if (((method->flags() & ACC_STATIC) == 0) == shouldBeStatic) {
|
2014-07-11 15:47:57 +00:00
|
|
|
throwNew(t,
|
|
|
|
GcIncompatibleClassChangeError::Type,
|
2012-05-11 19:19:55 +00:00
|
|
|
"expected %s.%s%s to be %s",
|
2014-06-21 04:16:33 +00:00
|
|
|
method->class_()->name()->body().begin(),
|
|
|
|
method->name()->body().begin(),
|
|
|
|
method->spec()->body().begin(),
|
2012-05-11 19:19:55 +00:00
|
|
|
shouldBeStatic ? "static" : "non-static");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int64_t findSpecialMethodFromReference(MyThread* t, GcPair* pair)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, pair);
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = resolveMethod(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcClass* class_ = cast<GcMethod>(t, pair->first())->class_();
|
2011-03-15 23:52:02 +00:00
|
|
|
if (isSpecialMethod(t, target, class_)) {
|
2014-06-21 04:16:33 +00:00
|
|
|
target = findVirtualMethod(t, target, class_->super());
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2012-05-11 19:19:55 +00:00
|
|
|
checkMethod(t, target, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
return prepareMethodForCall(t, target);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int64_t findStaticMethodFromReference(MyThread* t, GcPair* pair)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = resolveMethod(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2012-05-11 19:19:55 +00:00
|
|
|
checkMethod(t, target, true);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
return prepareMethodForCall(t, target);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int64_t findVirtualMethodFromReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
object instance)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = resolveMethod(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
target = findVirtualMethod(t, target, objectClass(t, instance));
|
|
|
|
|
2012-05-11 19:19:55 +00:00
|
|
|
checkMethod(t, target, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
return prepareMethodForCall(t, target);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int64_t getMethodAddress(MyThread* t, GcMethod* target)
|
2011-07-18 01:54:55 +00:00
|
|
|
{
|
2011-08-12 20:19:21 +00:00
|
|
|
return prepareMethodForCall(t, target);
|
2011-07-18 01:54:55 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int64_t getJClassFromReference(MyThread* t, GcPair* pair)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<intptr_t>(getJClass(
|
|
|
|
t,
|
|
|
|
resolveClass(t,
|
2014-07-11 15:47:57 +00:00
|
|
|
cast<GcMethod>(t, pair->first())->class_()->loader(),
|
|
|
|
cast<GcReference>(t, pair->second())->name())));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned traceSize(Thread* t)
|
2011-02-01 04:18:55 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
class Counter : public Processor::StackVisitor {
|
2011-02-01 04:18:55 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
Counter() : count(0)
|
|
|
|
{
|
|
|
|
}
|
2011-02-01 04:18:55 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual bool visit(Processor::StackWalker*)
|
|
|
|
{
|
|
|
|
++count;
|
2011-02-01 04:18:55 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned count;
|
|
|
|
} counter;
|
|
|
|
|
|
|
|
t->m->processor->walkStack(t, &counter);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
return pad(GcArray::FixedSize)
|
|
|
|
+ (counter.count * pad(ArrayElementSizeOfArray))
|
|
|
|
+ (counter.count * pad(GcTraceElement::FixedSize));
|
2011-02-01 04:18:55 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void NO_RETURN throwArithmetic(MyThread* t)
|
2011-02-01 04:18:55 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
if (ensure(t, GcArithmeticException::FixedSize + traceSize(t))) {
|
2014-08-20 15:49:00 +00:00
|
|
|
t->setFlag(Thread::TracingFlag);
|
|
|
|
THREAD_RESOURCE0(t, t->clearFlag(Thread::TracingFlag));
|
2011-02-01 04:18:55 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcArithmeticException::Type);
|
2011-02-01 04:18:55 +00:00
|
|
|
} else {
|
|
|
|
// not enough memory available for a new exception and stack trace
|
|
|
|
// -- use a preallocated instance instead
|
2014-06-30 01:44:41 +00:00
|
|
|
throw_(t, roots(t)->arithmeticException());
|
2011-02-01 04:18:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-19 05:46:58 +00:00
|
|
|
int64_t divideLong(MyThread* t, int64_t b, int64_t a)
|
2007-10-08 21:41:41 +00:00
|
|
|
{
|
2010-12-20 00:47:21 +00:00
|
|
|
if (LIKELY(b)) {
|
|
|
|
return a / b;
|
|
|
|
} else {
|
2011-02-01 04:18:55 +00:00
|
|
|
throwArithmetic(t);
|
2010-12-20 00:47:21 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
|
|
|
|
2013-12-19 05:46:58 +00:00
|
|
|
int64_t divideInt(MyThread* t, int32_t b, int32_t a)
|
2009-10-29 16:12:30 +00:00
|
|
|
{
|
2010-12-20 00:47:21 +00:00
|
|
|
if (LIKELY(b)) {
|
|
|
|
return a / b;
|
|
|
|
} else {
|
2011-02-01 04:18:55 +00:00
|
|
|
throwArithmetic(t);
|
2010-12-20 00:47:21 +00:00
|
|
|
}
|
2009-10-29 16:12:30 +00:00
|
|
|
}
|
|
|
|
|
2013-12-19 05:46:58 +00:00
|
|
|
int64_t moduloLong(MyThread* t, int64_t b, int64_t a)
|
2007-10-08 21:41:41 +00:00
|
|
|
{
|
2010-12-20 00:47:21 +00:00
|
|
|
if (LIKELY(b)) {
|
|
|
|
return a % b;
|
|
|
|
} else {
|
2011-02-01 04:18:55 +00:00
|
|
|
throwArithmetic(t);
|
2010-12-20 00:47:21 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
|
|
|
|
2013-12-19 05:46:58 +00:00
|
|
|
int64_t moduloInt(MyThread* t, int32_t b, int32_t a)
|
2010-12-20 00:47:21 +00:00
|
|
|
{
|
|
|
|
if (LIKELY(b)) {
|
|
|
|
return a % b;
|
|
|
|
} else {
|
2011-02-01 04:18:55 +00:00
|
|
|
throwArithmetic(t);
|
2010-12-20 00:47:21 +00:00
|
|
|
}
|
2009-10-29 20:14:44 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t makeBlankObjectArray(MyThread* t, GcClass* class_, int32_t length)
|
2007-09-30 02:48:27 +00:00
|
|
|
{
|
2008-04-26 20:56:03 +00:00
|
|
|
if (length >= 0) {
|
2010-09-14 16:49:41 +00:00
|
|
|
return reinterpret_cast<uint64_t>(makeObjectArray(t, class_, length));
|
2008-04-26 20:56:03 +00:00
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcNegativeArraySizeException::Type, "%d", length);
|
2008-04-26 20:56:03 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t makeBlankObjectArrayFromReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
int32_t length)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
return makeBlankObjectArray(
|
|
|
|
t,
|
|
|
|
resolveClass(t,
|
2014-07-11 15:47:57 +00:00
|
|
|
cast<GcMethod>(t, pair->first())->class_()->loader(),
|
|
|
|
cast<GcReference>(t, pair->second())->name()),
|
2014-05-29 04:17:25 +00:00
|
|
|
length);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint64_t makeBlankArray(MyThread* t, unsigned type, int32_t length)
|
2007-09-30 02:48:27 +00:00
|
|
|
{
|
2008-04-26 20:56:03 +00:00
|
|
|
if (length >= 0) {
|
2008-11-30 01:39:42 +00:00
|
|
|
switch (type) {
|
|
|
|
case T_BOOLEAN:
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeBooleanArray(t, length));
|
2008-11-30 01:39:42 +00:00
|
|
|
case T_CHAR:
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeCharArray(t, length));
|
2008-11-30 01:39:42 +00:00
|
|
|
case T_FLOAT:
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeFloatArray(t, length));
|
2008-11-30 01:39:42 +00:00
|
|
|
case T_DOUBLE:
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeDoubleArray(t, length));
|
2008-11-30 01:39:42 +00:00
|
|
|
case T_BYTE:
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeByteArray(t, length));
|
2008-11-30 01:39:42 +00:00
|
|
|
case T_SHORT:
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeShortArray(t, length));
|
2008-11-30 01:39:42 +00:00
|
|
|
case T_INT:
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeIntArray(t, length));
|
2008-11-30 01:39:42 +00:00
|
|
|
case T_LONG:
|
2014-05-29 04:17:25 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeLongArray(t, length));
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2008-11-30 01:39:42 +00:00
|
|
|
}
|
2008-04-26 20:56:03 +00:00
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcNegativeArraySizeException::Type, "%d", length);
|
2008-04-26 20:56:03 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint64_t lookUpAddress(int32_t key,
|
|
|
|
uintptr_t* start,
|
|
|
|
int32_t count,
|
|
|
|
uintptr_t default_)
|
2007-10-01 15:19:15 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t bottom = 0;
|
|
|
|
int32_t top = count;
|
|
|
|
for (int32_t span = top - bottom; span; span = top - bottom) {
|
|
|
|
int32_t middle = bottom + (span / 2);
|
|
|
|
uintptr_t* p = start + (middle * 2);
|
|
|
|
int32_t k = *p;
|
|
|
|
|
|
|
|
if (key < k) {
|
|
|
|
top = middle;
|
|
|
|
} else if (key > k) {
|
|
|
|
bottom = middle + 1;
|
|
|
|
} else {
|
|
|
|
return p[1];
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-17 20:55:31 +00:00
|
|
|
return default_;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void setMaybeNull(MyThread* t, object o, unsigned offset, object value)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2014-06-26 02:17:27 +00:00
|
|
|
setField(t, o, offset, value);
|
2007-12-30 22:24:48 +00:00
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcNullPointerException::Type);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void acquireMonitorForObject(MyThread* t, object o)
|
2007-12-30 22:24:48 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-30 22:24:48 +00:00
|
|
|
acquire(t, o);
|
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcNullPointerException::Type);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void acquireMonitorForObjectOnEntrance(MyThread* t, object o)
|
2011-03-26 00:37:02 +00:00
|
|
|
{
|
|
|
|
if (LIKELY(o)) {
|
|
|
|
t->methodLockIsClean = false;
|
|
|
|
acquire(t, o);
|
|
|
|
t->methodLockIsClean = true;
|
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcNullPointerException::Type);
|
2011-03-26 00:37:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void releaseMonitorForObject(MyThread* t, object o)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2007-12-30 22:24:48 +00:00
|
|
|
release(t, o);
|
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcNullPointerException::Type);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object makeMultidimensionalArray2(MyThread* t,
|
|
|
|
GcClass* class_,
|
|
|
|
uintptr_t* countStack,
|
|
|
|
int32_t dimensions)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, class_);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, int32_t, counts, dimensions);
|
2007-12-09 22:45:43 +00:00
|
|
|
for (int i = dimensions - 1; i >= 0; --i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(counts)[i] = countStack[dimensions - i - 1];
|
|
|
|
if (UNLIKELY(RUNTIME_ARRAY_BODY(counts)[i] < 0)) {
|
2014-07-11 15:47:57 +00:00
|
|
|
throwNew(t,
|
|
|
|
GcNegativeArraySizeException::Type,
|
|
|
|
"%d",
|
2010-12-27 22:55:23 +00:00
|
|
|
RUNTIME_ARRAY_BODY(counts)[i]);
|
2007-12-09 22:45:43 +00:00
|
|
|
return 0;
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
object array = makeArray(t, RUNTIME_ARRAY_BODY(counts)[0]);
|
2007-12-09 22:45:43 +00:00
|
|
|
setObjectClass(t, array, class_);
|
|
|
|
PROTECT(t, array);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
populateMultiArray(t, array, RUNTIME_ARRAY_BODY(counts), 0, dimensions);
|
2007-10-01 15:19:15 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
return array;
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t makeMultidimensionalArray(MyThread* t,
|
|
|
|
GcClass* class_,
|
|
|
|
int32_t dimensions,
|
|
|
|
int32_t offset)
|
2007-10-01 15:19:15 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeMultidimensionalArray2(
|
|
|
|
t, class_, static_cast<uintptr_t*>(t->stack) + offset, dimensions));
|
2007-10-01 15:19:15 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t makeMultidimensionalArrayFromReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
int32_t dimensions,
|
|
|
|
int32_t offset)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
return makeMultidimensionalArray(
|
|
|
|
t,
|
|
|
|
resolveClass(t,
|
|
|
|
cast<GcMethod>(t, pair->first())->class_()->loader(),
|
|
|
|
cast<GcReference>(t, pair->second())->name()),
|
|
|
|
dimensions,
|
|
|
|
offset);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void NO_RETURN throwArrayIndexOutOfBounds(MyThread* t)
|
2008-01-03 18:37:00 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
if (ensure(t, GcArrayIndexOutOfBoundsException::FixedSize + traceSize(t))) {
|
2014-08-20 15:49:00 +00:00
|
|
|
t->setFlag(Thread::TracingFlag);
|
|
|
|
THREAD_RESOURCE0(t, t->clearFlag(Thread::TracingFlag));
|
2010-12-27 22:55:23 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcArrayIndexOutOfBoundsException::Type);
|
2010-06-19 22:40:21 +00:00
|
|
|
} else {
|
|
|
|
// not enough memory available for a new exception and stack trace
|
|
|
|
// -- use a preallocated instance instead
|
2014-06-30 01:44:41 +00:00
|
|
|
throw_(t, roots(t)->arrayIndexOutOfBoundsException());
|
2010-06-19 22:40:21 +00:00
|
|
|
}
|
2007-09-27 00:01:38 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void NO_RETURN throwStackOverflow(MyThread* t)
|
2010-12-19 22:23:19 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcStackOverflowError::Type);
|
2010-12-19 22:23:19 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void NO_RETURN throw_(MyThread* t, GcThrowable* o)
|
2007-10-09 17:15:40 +00:00
|
|
|
{
|
2007-12-31 22:40:56 +00:00
|
|
|
if (LIKELY(o)) {
|
2010-12-27 22:55:23 +00:00
|
|
|
vm::throw_(t, o);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcNullPointerException::Type);
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void checkCast(MyThread* t, GcClass* class_, object o)
|
2007-12-23 00:00:35 +00:00
|
|
|
{
|
|
|
|
if (UNLIKELY(o and not isAssignableFrom(t, class_, objectClass(t, o)))) {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcByteArray* classNameFrom = objectClass(t, o)->name();
|
2014-07-11 15:47:57 +00:00
|
|
|
GcByteArray* classNameTo = class_->name();
|
2014-06-29 04:57:07 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, char, classFrom, classNameFrom->length());
|
2014-07-11 15:47:57 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, char, classTo, classNameTo->length());
|
|
|
|
replace('/',
|
|
|
|
'.',
|
|
|
|
RUNTIME_ARRAY_BODY(classFrom),
|
2014-06-29 04:57:07 +00:00
|
|
|
reinterpret_cast<char*>(classNameFrom->body().begin()));
|
2014-07-11 15:47:57 +00:00
|
|
|
replace('/',
|
|
|
|
'.',
|
|
|
|
RUNTIME_ARRAY_BODY(classTo),
|
2014-06-28 23:24:24 +00:00
|
|
|
reinterpret_cast<char*>(classNameTo->body().begin()));
|
2014-07-11 15:47:57 +00:00
|
|
|
throwNew(t,
|
|
|
|
GcClassCastException::Type,
|
|
|
|
"%s cannot be cast to %s",
|
|
|
|
RUNTIME_ARRAY_BODY(classFrom),
|
|
|
|
RUNTIME_ARRAY_BODY(classTo));
|
2007-12-23 00:00:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void checkCastFromReference(MyThread* t, GcPair* pair, object o)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, o);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* c
|
|
|
|
= resolveClass(t,
|
|
|
|
cast<GcMethod>(t, pair->first())->class_()->loader(),
|
|
|
|
cast<GcReference>(t, pair->second())->name());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
checkCast(t, c, o);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcField* resolveField(Thread* t, GcPair* pair)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* reference = cast<GcReference>(t, pair->second());
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* class_ = resolveClassInObject(
|
|
|
|
t,
|
|
|
|
cast<GcMethod>(t, pair->first())->class_()->loader(),
|
|
|
|
reference,
|
|
|
|
ReferenceClass);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
return cast<GcField>(t,
|
|
|
|
findInHierarchy(t,
|
|
|
|
class_,
|
|
|
|
reference->name(),
|
|
|
|
reference->spec(),
|
|
|
|
findFieldInClass,
|
|
|
|
GcNoSuchFieldError::Type));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t getFieldValue(Thread* t, object target, GcField* field)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
switch (field->code()) {
|
2011-03-15 23:52:02 +00:00
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
2014-06-29 04:57:07 +00:00
|
|
|
return fieldAtOffset<int8_t>(target, field->offset());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
2014-06-29 04:57:07 +00:00
|
|
|
return fieldAtOffset<int16_t>(target, field->offset());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
2014-06-29 04:57:07 +00:00
|
|
|
return fieldAtOffset<int32_t>(target, field->offset());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
case DoubleField:
|
|
|
|
case LongField:
|
2014-06-29 04:57:07 +00:00
|
|
|
return fieldAtOffset<int64_t>(target, field->offset());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
case ObjectField:
|
2014-06-29 04:57:07 +00:00
|
|
|
return fieldAtOffset<intptr_t>(target, field->offset());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t getStaticFieldValueFromReference(MyThread* t, GcPair* pair)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
initClass(t, field->class_());
|
2011-03-17 14:46:46 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
ACQUIRE_FIELD_FOR_READ(t, field);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
return getFieldValue(t, field->class_()->staticTable(), field);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t getFieldValueFromReference(MyThread* t, GcPair* pair, object instance)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
ACQUIRE_FIELD_FOR_READ(t, field);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
return getFieldValue(t, instance, field);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void setStaticLongFieldValueFromReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
uint64_t value)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
initClass(t, field->class_());
|
2011-03-17 14:46:46 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
fieldAtOffset<int64_t>(field->class_()->staticTable(), field->offset())
|
|
|
|
= value;
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void setLongFieldValueFromReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
object instance,
|
|
|
|
uint64_t value)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
fieldAtOffset<int64_t>(instance, field->offset()) = value;
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void setStaticObjectFieldValueFromReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
object value)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, value);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
initClass(t, field->class_());
|
2011-03-17 14:46:46 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
setField(t, field->class_()->staticTable(), field->offset(), value);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void setObjectFieldValueFromReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
object instance,
|
|
|
|
object value)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
|
|
|
PROTECT(t, value);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-06-26 02:17:27 +00:00
|
|
|
setField(t, instance, field->offset(), value);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void setFieldValue(MyThread* t, object target, GcField* field, uint32_t value)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
switch (field->code()) {
|
2011-03-15 23:52:02 +00:00
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
2014-06-29 04:57:07 +00:00
|
|
|
fieldAtOffset<int8_t>(target, field->offset()) = value;
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
2014-06-29 04:57:07 +00:00
|
|
|
fieldAtOffset<int16_t>(target, field->offset()) = value;
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
2014-06-29 04:57:07 +00:00
|
|
|
fieldAtOffset<int32_t>(target, field->offset()) = value;
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void setStaticFieldValueFromReference(MyThread* t, GcPair* pair, uint32_t value)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
initClass(t, field->class_());
|
2011-03-17 14:46:46 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
setFieldValue(t, field->class_()->staticTable(), field, value);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void setFieldValueFromReference(MyThread* t,
|
|
|
|
GcPair* pair,
|
|
|
|
object instance,
|
|
|
|
uint32_t value)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, instance);
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, pair);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
ACQUIRE_FIELD_FOR_WRITE(t, field);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
setFieldValue(t, instance, field, value);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t instanceOf64(Thread* t, GcClass* class_, object o)
|
2009-02-28 21:20:43 +00:00
|
|
|
{
|
|
|
|
return instanceOf(t, class_, o);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t instanceOfFromReference(Thread* t, GcPair* pair, object o)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, o);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* c
|
|
|
|
= resolveClass(t,
|
|
|
|
cast<GcMethod>(t, pair->first())->class_()->loader(),
|
|
|
|
cast<GcReference>(t, pair->second())->name());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
return instanceOf64(t, c, o);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t makeNewGeneral64(Thread* t, GcClass* class_)
|
2009-02-28 21:20:43 +00:00
|
|
|
{
|
2013-05-13 19:12:58 +00:00
|
|
|
PROTECT(t, class_);
|
|
|
|
|
|
|
|
initClass(t, class_);
|
|
|
|
|
2009-07-22 00:57:55 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeNewGeneral(t, class_));
|
2009-02-28 21:20:43 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t makeNew64(Thread* t, GcClass* class_)
|
2009-02-28 21:20:43 +00:00
|
|
|
{
|
2013-05-13 19:12:58 +00:00
|
|
|
PROTECT(t, class_);
|
|
|
|
|
|
|
|
initClass(t, class_);
|
|
|
|
|
2009-02-28 21:20:43 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(makeNew(t, class_));
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t makeNewFromReference(Thread* t, GcPair* pair)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* class_
|
|
|
|
= resolveClass(t,
|
|
|
|
cast<GcMethod>(t, pair->first())->class_()->loader(),
|
|
|
|
cast<GcReference>(t, pair->second())->name());
|
2013-05-13 19:12:58 +00:00
|
|
|
|
|
|
|
PROTECT(t, class_);
|
|
|
|
|
|
|
|
initClass(t, class_);
|
|
|
|
|
|
|
|
return makeNewGeneral64(t, class_);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t getJClass64(Thread* t, GcClass* class_)
|
2010-12-03 20:42:13 +00:00
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(getJClass(t, class_));
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void gcIfNecessary(MyThread* t)
|
2008-04-09 19:08:13 +00:00
|
|
|
{
|
2010-09-17 01:43:27 +00:00
|
|
|
stress(t);
|
|
|
|
|
2014-08-20 15:49:00 +00:00
|
|
|
if (UNLIKELY(t->getFlags() & Thread::UseBackupHeapFlag)) {
|
2008-04-09 19:08:13 +00:00
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-13 17:39:36 +00:00
|
|
|
void idleIfNecessary(MyThread* t)
|
|
|
|
{
|
2014-06-05 13:49:49 +00:00
|
|
|
if (UNLIKELY(t->m->exclusive)) {
|
|
|
|
ENTER(t, Thread::IdleState);
|
|
|
|
}
|
2013-12-13 17:39:36 +00:00
|
|
|
}
|
|
|
|
|
2014-09-22 17:10:35 +00:00
|
|
|
bool useLongJump(MyThread* t, uintptr_t target)
|
|
|
|
{
|
|
|
|
uintptr_t reach = t->arch->maximumImmediateJump();
|
|
|
|
FixedAllocator* a = codeAllocator(t);
|
|
|
|
uintptr_t start = reinterpret_cast<uintptr_t>(a->memory.begin());
|
|
|
|
uintptr_t end = reinterpret_cast<uintptr_t>(a->memory.begin())
|
|
|
|
+ a->memory.count;
|
|
|
|
assertT(t, end - start < reach);
|
|
|
|
|
|
|
|
return (target > end && (target - start) > reach)
|
|
|
|
or (target < start && (end - target) > reach);
|
|
|
|
}
|
|
|
|
|
|
|
|
FILE* compileLog = 0;
|
|
|
|
|
|
|
|
void logCompile(MyThread* t,
|
|
|
|
const void* code,
|
|
|
|
unsigned size,
|
|
|
|
const char* class_,
|
|
|
|
const char* name,
|
|
|
|
const char* spec);
|
|
|
|
|
|
|
|
unsigned simpleFrameMapTableSize(MyThread* t, GcMethod* method, GcIntArray* map)
|
|
|
|
{
|
|
|
|
int size = frameMapSizeInBits(t, method);
|
|
|
|
return ceilingDivide(map->length() * size, 32 + size);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef AVIAN_AOT_ONLY
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned resultSize(MyThread* t, unsigned code)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
2008-02-11 17:21:41 +00:00
|
|
|
case IntField:
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
case ObjectField:
|
2011-08-30 01:00:17 +00:00
|
|
|
return TargetBytesPerWord;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case LongField:
|
2008-02-11 17:21:41 +00:00
|
|
|
case DoubleField:
|
|
|
|
return 8;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case VoidField:
|
2008-02-11 17:21:41 +00:00
|
|
|
return 0;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-17 01:21:35 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* popField(MyThread* t, Frame* frame, int code)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
|
|
|
switch (code) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case IntField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return frame->pop(ir::Type::i4());
|
2014-05-05 21:21:48 +00:00
|
|
|
case FloatField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return frame->pop(ir::Type::f4());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
case LongField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return frame->popLarge(ir::Type::i8());
|
2014-05-05 21:21:48 +00:00
|
|
|
case DoubleField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return frame->popLarge(ir::Type::f8());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
case ObjectField:
|
2014-06-01 20:22:14 +00:00
|
|
|
return frame->pop(ir::Type::object());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void compileSafePoint(MyThread* t, Compiler* c, Frame* frame)
|
|
|
|
{
|
2014-07-17 00:07:56 +00:00
|
|
|
c->nativeCall(
|
|
|
|
c->constant(getThunk(t, idleIfNecessaryThunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister()));
|
2013-12-13 17:39:36 +00:00
|
|
|
}
|
|
|
|
|
2014-05-01 20:21:42 +00:00
|
|
|
void compileDirectInvoke(MyThread* t,
|
|
|
|
Frame* frame,
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target,
|
2014-05-01 20:21:42 +00:00
|
|
|
bool tailCall,
|
|
|
|
bool useThunk,
|
|
|
|
avian::codegen::Promise* addressPromise)
|
2009-03-31 20:15:08 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2009-04-07 00:34:12 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned flags
|
|
|
|
= (avian::codegen::TailCalls and tailCall ? Compiler::TailJump : 0);
|
2009-10-18 00:18:03 +00:00
|
|
|
unsigned traceFlags;
|
|
|
|
|
|
|
|
if (addressPromise == 0 and useLongJump(t, methodAddress(t, target))) {
|
|
|
|
flags |= Compiler::LongJumpOrCall;
|
|
|
|
traceFlags = TraceElement::LongCall;
|
|
|
|
} else {
|
|
|
|
traceFlags = 0;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
if (useThunk or (avian::codegen::TailCalls and tailCall
|
|
|
|
and (target->flags() & ACC_NATIVE))) {
|
2011-09-20 22:30:30 +00:00
|
|
|
if (frame->context->bootContext == 0) {
|
|
|
|
flags |= Compiler::Aligned;
|
|
|
|
}
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
if (avian::codegen::TailCalls and tailCall) {
|
2009-10-18 00:18:03 +00:00
|
|
|
traceFlags |= TraceElement::TailCall;
|
|
|
|
|
|
|
|
TraceElement* trace = frame->trace(target, traceFlags);
|
2010-06-26 03:13:59 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
avian::codegen::Promise* returnAddressPromise
|
|
|
|
= new (frame->context->zone.allocate(sizeof(TraceElementPromise)))
|
|
|
|
TraceElementPromise(t->m->system, trace);
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->stackCall(
|
|
|
|
c->promiseConstant(returnAddressPromise, ir::Type::iptr()),
|
|
|
|
target,
|
|
|
|
flags,
|
|
|
|
trace);
|
2014-05-01 03:54:52 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->store(frame->absoluteAddressOperand(returnAddressPromise),
|
|
|
|
c->memory(c->threadRegister(),
|
|
|
|
ir::Type::iptr(),
|
|
|
|
TARGET_THREAD_TAILADDRESS));
|
2014-05-01 03:54:52 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
c->exit(c->constant(
|
|
|
|
(target->flags() & ACC_NATIVE) ? nativeThunk(t) : defaultThunk(t),
|
|
|
|
ir::Type::iptr()));
|
2009-04-19 22:36:11 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
return frame->stackCall(c->constant(defaultThunk(t), ir::Type::iptr()),
|
2014-05-01 19:56:39 +00:00
|
|
|
target,
|
|
|
|
flags,
|
|
|
|
frame->trace(target, traceFlags));
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
2009-04-19 22:36:11 +00:00
|
|
|
} else {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address
|
2014-05-01 03:54:52 +00:00
|
|
|
= (addressPromise
|
2014-06-01 20:22:14 +00:00
|
|
|
? c->promiseConstant(addressPromise, ir::Type::iptr())
|
|
|
|
: c->constant(methodAddress(t, target), ir::Type::iptr()));
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2014-05-01 20:21:42 +00:00
|
|
|
frame->stackCall(
|
2014-05-01 19:56:39 +00:00
|
|
|
address,
|
|
|
|
target,
|
|
|
|
flags,
|
2014-07-11 15:47:57 +00:00
|
|
|
tailCall ? 0 : frame->trace((target->flags() & ACC_NATIVE) ? target : 0,
|
|
|
|
0));
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
bool compileDirectInvoke(MyThread* t,
|
|
|
|
Frame* frame,
|
|
|
|
GcMethod* target,
|
|
|
|
bool tailCall)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2014-02-09 21:58:47 +00:00
|
|
|
// don't bother calling an empty method unless calling it might
|
|
|
|
// cause the class to be initialized, which may have side effects
|
2014-07-11 15:47:57 +00:00
|
|
|
if (emptyMethod(t, target) and (not classNeedsInit(t, target->class_()))) {
|
2014-05-29 04:17:25 +00:00
|
|
|
frame->popFootprint(target->parameterFootprint());
|
2009-04-22 01:39:25 +00:00
|
|
|
tailCall = false;
|
|
|
|
} else {
|
2008-11-30 04:58:09 +00:00
|
|
|
BootContext* bc = frame->context->bootContext;
|
|
|
|
if (bc) {
|
2014-05-29 04:17:25 +00:00
|
|
|
if ((target->class_() == frame->context->method->class_()
|
2014-06-21 04:16:33 +00:00
|
|
|
or (not classNeedsInit(t, target->class_())))
|
2014-07-11 15:47:57 +00:00
|
|
|
and (not(avian::codegen::TailCalls and tailCall
|
|
|
|
and (target->flags() & ACC_NATIVE)))) {
|
2014-07-11 15:50:18 +00:00
|
|
|
avian::codegen::Promise* p = new (bc->zone)
|
|
|
|
avian::codegen::ListenPromise(t->m->system, bc->zone);
|
2008-11-30 04:58:09 +00:00
|
|
|
|
|
|
|
PROTECT(t, target);
|
2014-07-02 21:11:27 +00:00
|
|
|
object pointer = makePointer(t, p);
|
|
|
|
bc->calls = makeTriple(t, target, pointer, bc->calls);
|
2008-11-30 04:58:09 +00:00
|
|
|
|
2014-05-01 20:21:42 +00:00
|
|
|
compileDirectInvoke(t, frame, target, tailCall, false, p);
|
2008-11-27 20:59:40 +00:00
|
|
|
} else {
|
2014-05-01 20:21:42 +00:00
|
|
|
compileDirectInvoke(t, frame, target, tailCall, true, 0);
|
2008-11-27 20:59:40 +00:00
|
|
|
}
|
2009-10-18 00:18:03 +00:00
|
|
|
} else if (unresolved(t, methodAddress(t, target))
|
2014-07-11 15:47:57 +00:00
|
|
|
or classNeedsInit(t, target->class_())) {
|
2014-05-01 20:21:42 +00:00
|
|
|
compileDirectInvoke(t, frame, target, tailCall, true, 0);
|
2008-04-13 19:48:20 +00:00
|
|
|
} else {
|
2014-05-01 20:21:42 +00:00
|
|
|
compileDirectInvoke(t, frame, target, tailCall, false, 0);
|
2008-04-13 19:48:20 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2009-04-22 01:39:25 +00:00
|
|
|
return tailCall;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2014-05-01 20:30:45 +00:00
|
|
|
void compileReferenceInvoke(Frame* frame,
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* method,
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* reference,
|
2014-05-01 18:44:42 +00:00
|
|
|
bool isStatic,
|
|
|
|
bool tailCall)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2014-05-01 20:30:45 +00:00
|
|
|
frame->referenceStackCall(isStatic,
|
|
|
|
method,
|
|
|
|
reference,
|
2014-05-01 18:44:42 +00:00
|
|
|
tailCall ? Compiler::TailJump : 0,
|
2014-05-01 20:30:45 +00:00
|
|
|
frame->trace(0, 0));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void compileDirectReferenceInvoke(MyThread* t,
|
|
|
|
Frame* frame,
|
|
|
|
Thunk thunk,
|
|
|
|
GcReference* reference,
|
|
|
|
bool isStatic,
|
|
|
|
bool tailCall)
|
2011-03-15 23:52:02 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
GcPair* pair = makePair(t, frame->context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
compileReferenceInvoke(
|
|
|
|
frame,
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, thunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::iptr(),
|
|
|
|
args(c->threadRegister(), frame->append(pair))),
|
2014-06-01 20:22:14 +00:00
|
|
|
reference,
|
|
|
|
isStatic,
|
|
|
|
tailCall);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-05-01 20:21:42 +00:00
|
|
|
void compileAbstractInvoke(Frame* frame,
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* method,
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target,
|
2014-05-01 18:44:42 +00:00
|
|
|
bool tailCall)
|
2011-07-18 01:54:55 +00:00
|
|
|
{
|
2014-05-01 20:21:42 +00:00
|
|
|
frame->stackCall(
|
2014-05-01 19:56:39 +00:00
|
|
|
method, target, tailCall ? Compiler::TailJump : 0, frame->trace(0, 0));
|
2011-07-18 01:54:55 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void compileDirectAbstractInvoke(MyThread* t,
|
|
|
|
Frame* frame,
|
|
|
|
Thunk thunk,
|
|
|
|
GcMethod* target,
|
|
|
|
bool tailCall)
|
2011-07-18 01:54:55 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2011-07-18 01:54:55 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
compileAbstractInvoke(
|
|
|
|
frame,
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, thunk), ir::Type::iptr()),
|
2014-07-17 00:07:56 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::iptr(),
|
|
|
|
args(c->threadRegister(), frame->append(target))),
|
2014-07-11 15:47:57 +00:00
|
|
|
target,
|
|
|
|
tailCall);
|
2011-07-18 01:54:55 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void handleMonitorEvent(MyThread* t, Frame* frame, intptr_t function)
|
2007-12-28 00:02:05 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method = frame->context->method;
|
2007-12-28 00:02:05 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_SYNCHRONIZED) {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* lock;
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_STATIC) {
|
2010-11-26 19:41:31 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
lock = frame->append(method->class_());
|
2007-12-28 00:02:05 +00:00
|
|
|
} else {
|
2014-05-01 13:18:12 +00:00
|
|
|
lock = loadLocal(
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->context, 1, ir::Type::object(), savedTargetIndex(t, method));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
2014-05-01 03:54:52 +00:00
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(function, ir::Type::iptr()),
|
2014-07-17 00:07:56 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), lock));
|
2008-01-11 22:16:24 +00:00
|
|
|
}
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void handleEntrance(MyThread* t, Frame* frame)
|
2007-12-28 00:02:05 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method = frame->context->method;
|
2008-01-20 18:55:08 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
if ((method->flags() & (ACC_SYNCHRONIZED | ACC_STATIC)) == ACC_SYNCHRONIZED) {
|
2008-01-20 18:55:08 +00:00
|
|
|
// save 'this' pointer in case it is overwritten.
|
|
|
|
unsigned index = savedTargetIndex(t, method);
|
2014-05-01 13:18:12 +00:00
|
|
|
storeLocal(frame->context,
|
|
|
|
1,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::object(),
|
|
|
|
loadLocal(frame->context, 1, ir::Type::object(), 0),
|
2014-05-01 13:18:12 +00:00
|
|
|
index);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->set(index, ir::Type::object());
|
2008-01-20 18:55:08 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
handleMonitorEvent(
|
|
|
|
t, frame, getThunk(t, acquireMonitorForObjectOnEntranceThunk));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void handleExit(MyThread* t, Frame* frame)
|
2007-12-28 00:02:05 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
handleMonitorEvent(t, frame, getThunk(t, releaseMonitorForObjectThunk));
|
2007-12-28 00:02:05 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
bool inTryBlock(MyThread* t UNUSED, GcCode* code, unsigned ip)
|
2008-11-25 17:34:48 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
GcExceptionHandlerTable* table
|
|
|
|
= cast<GcExceptionHandlerTable>(t, code->exceptionHandlerTable());
|
2008-11-25 17:34:48 +00:00
|
|
|
if (table) {
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned length = table->length();
|
2008-11-25 17:34:48 +00:00
|
|
|
for (unsigned i = 0; i < length; ++i) {
|
2014-06-29 04:57:07 +00:00
|
|
|
uint64_t eh = table->body()[i];
|
2014-07-11 15:50:18 +00:00
|
|
|
if (ip >= exceptionHandlerStart(eh) and ip < exceptionHandlerEnd(eh)) {
|
2008-11-25 17:34:48 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
bool needsReturnBarrier(MyThread* t UNUSED, GcMethod* method)
|
2009-03-31 20:15:08 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
return (method->flags() & ConstructorFlag)
|
2014-07-11 15:47:57 +00:00
|
|
|
and (method->class_()->vmFlags() & HasFinalMemberFlag);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
bool returnsNext(MyThread* t, GcCode* code, unsigned ip)
|
2009-03-31 20:15:08 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
switch (code->body()[ip]) {
|
2009-04-26 01:51:33 +00:00
|
|
|
case return_:
|
|
|
|
case areturn:
|
|
|
|
case ireturn:
|
|
|
|
case freturn:
|
|
|
|
case lreturn:
|
|
|
|
case dreturn:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case goto_: {
|
|
|
|
uint32_t offset = codeReadInt16(t, code, ++ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2009-04-26 01:51:33 +00:00
|
|
|
|
|
|
|
return returnsNext(t, code, newIp);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
2009-04-26 01:51:33 +00:00
|
|
|
|
|
|
|
case goto_w: {
|
|
|
|
uint32_t offset = codeReadInt32(t, code, ++ip);
|
|
|
|
uint32_t newIp = (ip - 5) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2009-04-26 01:51:33 +00:00
|
|
|
return returnsNext(t, code, newIp);
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
bool isTailCall(MyThread* t,
|
|
|
|
GcCode* code,
|
|
|
|
unsigned ip,
|
|
|
|
GcMethod* caller,
|
|
|
|
int calleeReturnCode,
|
|
|
|
GcByteArray* calleeClassName,
|
|
|
|
GcByteArray* calleeMethodName,
|
|
|
|
GcByteArray* calleeMethodSpec)
|
2009-04-26 01:51:33 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
return avian::codegen::TailCalls
|
2014-07-11 15:47:57 +00:00
|
|
|
and ((caller->flags() & ACC_SYNCHRONIZED) == 0)
|
|
|
|
and (not inTryBlock(t, code, ip - 1))
|
|
|
|
and (not needsReturnBarrier(t, caller))
|
|
|
|
and (caller->returnCode() == VoidField
|
|
|
|
or caller->returnCode() == calleeReturnCode)
|
|
|
|
and returnsNext(t, code, ip)
|
|
|
|
and t->m->classpath->canTailCall(t,
|
|
|
|
caller,
|
|
|
|
calleeClassName,
|
|
|
|
calleeMethodName,
|
|
|
|
calleeMethodSpec);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isTailCall(MyThread* t,
|
|
|
|
GcCode* code,
|
|
|
|
unsigned ip,
|
|
|
|
GcMethod* caller,
|
|
|
|
GcMethod* callee)
|
|
|
|
{
|
|
|
|
return isTailCall(t,
|
|
|
|
code,
|
|
|
|
ip,
|
|
|
|
caller,
|
|
|
|
callee->returnCode(),
|
|
|
|
callee->class_()->name(),
|
|
|
|
callee->name(),
|
|
|
|
callee->spec());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isReferenceTailCall(MyThread* t,
|
|
|
|
GcCode* code,
|
|
|
|
unsigned ip,
|
|
|
|
GcMethod* caller,
|
|
|
|
GcReference* calleeReference)
|
|
|
|
{
|
|
|
|
return isTailCall(t,
|
|
|
|
code,
|
|
|
|
ip,
|
|
|
|
caller,
|
|
|
|
methodReferenceReturnCode(t, calleeReference),
|
|
|
|
calleeReference->class_(),
|
|
|
|
calleeReference->name(),
|
|
|
|
calleeReference->spec());
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
lir::TernaryOperation toCompilerJumpOp(MyThread* t, unsigned instruction)
|
|
|
|
{
|
|
|
|
switch (instruction) {
|
2013-12-19 05:20:18 +00:00
|
|
|
case ifeq:
|
|
|
|
case if_icmpeq:
|
|
|
|
case if_acmpeq:
|
|
|
|
case ifnull:
|
|
|
|
return lir::JumpIfEqual;
|
|
|
|
case ifne:
|
|
|
|
case if_icmpne:
|
|
|
|
case if_acmpne:
|
|
|
|
case ifnonnull:
|
|
|
|
return lir::JumpIfNotEqual;
|
|
|
|
case ifgt:
|
|
|
|
case if_icmpgt:
|
|
|
|
return lir::JumpIfGreater;
|
|
|
|
case ifge:
|
|
|
|
case if_icmpge:
|
|
|
|
return lir::JumpIfGreaterOrEqual;
|
|
|
|
case iflt:
|
|
|
|
case if_icmplt:
|
|
|
|
return lir::JumpIfLess;
|
|
|
|
case ifle:
|
|
|
|
case if_icmple:
|
|
|
|
return lir::JumpIfLessOrEqual;
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-01 18:14:27 +00:00
|
|
|
bool integerBranch(MyThread* t,
|
|
|
|
Frame* frame,
|
2014-06-29 04:57:07 +00:00
|
|
|
GcCode* code,
|
2014-05-01 18:14:27 +00:00
|
|
|
unsigned& ip,
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* a,
|
|
|
|
ir::Value* b,
|
2014-05-01 18:14:27 +00:00
|
|
|
unsigned* newIpp)
|
2009-10-07 00:50:32 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
if (ip + 3 > code->length()) {
|
2009-10-07 00:50:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned instruction = code->body()[ip++];
|
2009-10-07 00:50:32 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2014-05-01 18:44:42 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
ir::Value* target = frame->machineIpValue(newIp);
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
switch (instruction) {
|
|
|
|
case ifeq:
|
|
|
|
case ifne:
|
|
|
|
case ifgt:
|
|
|
|
case ifge:
|
|
|
|
case iflt:
|
|
|
|
case ifle:
|
2014-05-04 02:34:46 +00:00
|
|
|
c->condJump(toCompilerJumpOp(t, instruction), a, b, target);
|
2009-10-10 21:03:23 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
ip -= 3;
|
|
|
|
return false;
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
|
2012-10-13 15:46:12 +00:00
|
|
|
*newIpp = newIp;
|
2010-12-27 22:55:23 +00:00
|
|
|
return true;
|
2009-10-07 00:50:32 +00:00
|
|
|
}
|
|
|
|
|
2013-12-19 05:20:18 +00:00
|
|
|
lir::TernaryOperation toCompilerFloatJumpOp(MyThread* t,
|
|
|
|
unsigned instruction,
|
|
|
|
bool lessIfUnordered)
|
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
switch (instruction) {
|
2013-12-19 05:20:18 +00:00
|
|
|
case ifeq:
|
|
|
|
return lir::JumpIfFloatEqual;
|
|
|
|
case ifne:
|
|
|
|
return lir::JumpIfFloatNotEqual;
|
|
|
|
case ifgt:
|
|
|
|
if (lessIfUnordered) {
|
|
|
|
return lir::JumpIfFloatGreater;
|
|
|
|
} else {
|
|
|
|
return lir::JumpIfFloatGreaterOrUnordered;
|
|
|
|
}
|
|
|
|
case ifge:
|
|
|
|
if (lessIfUnordered) {
|
|
|
|
return lir::JumpIfFloatGreaterOrEqual;
|
|
|
|
} else {
|
|
|
|
return lir::JumpIfFloatGreaterOrEqualOrUnordered;
|
|
|
|
}
|
|
|
|
case iflt:
|
|
|
|
if (lessIfUnordered) {
|
|
|
|
return lir::JumpIfFloatLessOrUnordered;
|
|
|
|
} else {
|
|
|
|
return lir::JumpIfFloatLess;
|
|
|
|
}
|
|
|
|
case ifle:
|
|
|
|
if (lessIfUnordered) {
|
|
|
|
return lir::JumpIfFloatLessOrEqualOrUnordered;
|
|
|
|
} else {
|
|
|
|
return lir::JumpIfFloatLessOrEqual;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-01 18:14:27 +00:00
|
|
|
bool floatBranch(MyThread* t,
|
|
|
|
Frame* frame,
|
2014-06-29 04:57:07 +00:00
|
|
|
GcCode* code,
|
2014-05-01 18:14:27 +00:00
|
|
|
unsigned& ip,
|
|
|
|
bool lessIfUnordered,
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* a,
|
|
|
|
ir::Value* b,
|
2014-05-01 18:14:27 +00:00
|
|
|
unsigned* newIpp)
|
2009-08-06 16:01:57 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
if (ip + 3 > code->length()) {
|
2009-10-07 00:50:32 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned instruction = code->body()[ip++];
|
2009-10-07 00:50:32 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2014-05-01 18:44:42 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
ir::Value* target = frame->machineIpValue(newIp);
|
2009-10-07 00:50:32 +00:00
|
|
|
|
|
|
|
switch (instruction) {
|
2009-08-06 16:01:57 +00:00
|
|
|
case ifeq:
|
|
|
|
case ifne:
|
|
|
|
case ifgt:
|
|
|
|
case ifge:
|
|
|
|
case iflt:
|
2009-10-07 00:50:32 +00:00
|
|
|
case ifle:
|
2014-07-11 15:50:18 +00:00
|
|
|
c->condJump(
|
|
|
|
toCompilerFloatJumpOp(t, instruction, lessIfUnordered), a, b, target);
|
2014-01-03 19:23:36 +00:00
|
|
|
break;
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2009-08-06 16:01:57 +00:00
|
|
|
default:
|
2009-10-07 00:50:32 +00:00
|
|
|
ip -= 3;
|
2009-08-06 16:01:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
2009-10-10 21:03:23 +00:00
|
|
|
|
2012-10-13 15:46:12 +00:00
|
|
|
*newIpp = newIp;
|
2010-12-27 22:55:23 +00:00
|
|
|
return true;
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* popLongAddress(Frame* frame)
|
2012-03-06 20:07:59 +00:00
|
|
|
{
|
2014-05-05 21:21:48 +00:00
|
|
|
return TargetBytesPerWord == 8
|
2014-06-01 20:22:14 +00:00
|
|
|
? frame->popLarge(ir::Type::i8())
|
2014-07-12 17:54:19 +00:00
|
|
|
: frame->c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->popLarge(ir::Type::i8()),
|
|
|
|
ir::Type::iptr());
|
2012-03-06 20:07:59 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
bool intrinsic(MyThread* t UNUSED, Frame* frame, GcMethod* target)
|
2009-10-18 01:26:14 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
#define MATCH(name, constant) \
|
|
|
|
(name->length() == sizeof(constant) \
|
|
|
|
and ::strcmp(reinterpret_cast<char*>(name->body().begin()), constant) == 0)
|
2009-10-18 01:26:14 +00:00
|
|
|
|
2014-06-21 04:16:33 +00:00
|
|
|
GcByteArray* className = target->class_()->name();
|
2009-10-18 01:26:14 +00:00
|
|
|
if (UNLIKELY(MATCH(className, "java/lang/Math"))) {
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2014-05-29 04:17:25 +00:00
|
|
|
if (MATCH(target->name(), "sqrt") and MATCH(target->spec(), "(D)D")) {
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->pushLarge(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::f8(),
|
|
|
|
c->unaryOp(lir::FloatSquareRoot, frame->popLarge(ir::Type::f8())));
|
2009-10-18 01:26:14 +00:00
|
|
|
return true;
|
2014-05-29 04:17:25 +00:00
|
|
|
} else if (MATCH(target->name(), "abs")) {
|
|
|
|
if (MATCH(target->spec(), "(I)I")) {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
|
|
|
c->unaryOp(lir::Absolute, frame->pop(ir::Type::i4())));
|
2009-10-18 01:26:14 +00:00
|
|
|
return true;
|
2014-05-29 04:17:25 +00:00
|
|
|
} else if (MATCH(target->spec(), "(J)J")) {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(
|
|
|
|
ir::Type::i8(),
|
|
|
|
c->unaryOp(lir::Absolute, frame->popLarge(ir::Type::i8())));
|
2009-10-18 01:26:14 +00:00
|
|
|
return true;
|
2014-05-29 04:17:25 +00:00
|
|
|
} else if (MATCH(target->spec(), "(F)F")) {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
|
|
|
c->unaryOp(lir::FloatAbsolute, frame->pop(ir::Type::f4())));
|
2009-10-18 01:26:14 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2012-03-06 20:07:59 +00:00
|
|
|
} else if (UNLIKELY(MATCH(className, "sun/misc/Unsafe"))) {
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2014-05-29 04:17:25 +00:00
|
|
|
if (MATCH(target->name(), "getByte") and MATCH(target->spec(), "(J)B")) {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(address, ir::Type::i1()),
|
|
|
|
ir::Type::i4()));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if (MATCH(target->name(), "putByte")
|
|
|
|
and MATCH(target->spec(), "(JB)V")) {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* value = frame->pop(ir::Type::i4());
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
|
|
|
c->store(value, c->memory(address, ir::Type::i1()));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if ((MATCH(target->name(), "getShort")
|
|
|
|
and MATCH(target->spec(), "(J)S"))
|
|
|
|
or (MATCH(target->name(), "getChar")
|
|
|
|
and MATCH(target->spec(), "(J)C"))) {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(address, ir::Type::i2()),
|
|
|
|
ir::Type::i4()));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if ((MATCH(target->name(), "putShort")
|
|
|
|
and MATCH(target->spec(), "(JS)V"))
|
|
|
|
or (MATCH(target->name(), "putChar")
|
|
|
|
and MATCH(target->spec(), "(JC)V"))) {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* value = frame->pop(ir::Type::i4());
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
|
|
|
c->store(value, c->memory(address, ir::Type::i2()));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if ((MATCH(target->name(), "getInt")
|
|
|
|
and MATCH(target->spec(), "(J)I"))
|
|
|
|
or (MATCH(target->name(), "getFloat")
|
|
|
|
and MATCH(target->spec(), "(J)F"))) {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
2014-05-29 04:17:25 +00:00
|
|
|
ir::Type type = MATCH(target->name(), "getInt") ? ir::Type::i4()
|
|
|
|
: ir::Type::f4();
|
2014-07-17 00:07:56 +00:00
|
|
|
frame->push(
|
|
|
|
type,
|
|
|
|
c->load(ir::ExtendMode::Signed, c->memory(address, type), type));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if ((MATCH(target->name(), "putInt")
|
|
|
|
and MATCH(target->spec(), "(JI)V"))
|
|
|
|
or (MATCH(target->name(), "putFloat")
|
|
|
|
and MATCH(target->spec(), "(JF)V"))) {
|
2014-05-29 04:17:25 +00:00
|
|
|
ir::Type type = MATCH(target->name(), "putInt") ? ir::Type::i4()
|
|
|
|
: ir::Type::f4();
|
2014-05-05 21:21:48 +00:00
|
|
|
ir::Value* value = frame->pop(type);
|
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
2014-05-04 02:16:19 +00:00
|
|
|
c->store(value, c->memory(address, type));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if ((MATCH(target->name(), "getLong")
|
|
|
|
and MATCH(target->spec(), "(J)J"))
|
|
|
|
or (MATCH(target->name(), "getDouble")
|
|
|
|
and MATCH(target->spec(), "(J)D"))) {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
2014-05-29 04:17:25 +00:00
|
|
|
ir::Type type = MATCH(target->name(), "getLong") ? ir::Type::i8()
|
|
|
|
: ir::Type::f8();
|
2014-07-17 00:07:56 +00:00
|
|
|
frame->pushLarge(
|
|
|
|
type,
|
|
|
|
c->load(ir::ExtendMode::Signed, c->memory(address, type), type));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if ((MATCH(target->name(), "putLong")
|
|
|
|
and MATCH(target->spec(), "(JJ)V"))
|
|
|
|
or (MATCH(target->name(), "putDouble")
|
|
|
|
and MATCH(target->spec(), "(JD)V"))) {
|
2014-05-29 04:17:25 +00:00
|
|
|
ir::Type type = MATCH(target->name(), "putLong") ? ir::Type::i8()
|
|
|
|
: ir::Type::f8();
|
2014-05-05 21:21:48 +00:00
|
|
|
ir::Value* value = frame->popLarge(type);
|
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
2014-05-04 02:16:19 +00:00
|
|
|
c->store(value, c->memory(address, type));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if (MATCH(target->name(), "getAddress")
|
|
|
|
and MATCH(target->spec(), "(J)J")) {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
|
|
|
frame->pushLarge(ir::Type::i8(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(address, ir::Type::iptr()),
|
|
|
|
ir::Type::i8()));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if (MATCH(target->name(), "putAddress")
|
|
|
|
and MATCH(target->spec(), "(JJ)V")) {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* value = frame->popLarge(ir::Type::i8());
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* address = popLongAddress(frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pop(ir::Type::object());
|
|
|
|
c->store(value, c->memory(address, ir::Type::iptr()));
|
2012-03-06 20:07:59 +00:00
|
|
|
return true;
|
|
|
|
}
|
2009-10-18 01:26:14 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned targetFieldOffset(Context* context, GcField* field)
|
2011-09-01 03:18:00 +00:00
|
|
|
{
|
|
|
|
if (context->bootContext) {
|
2014-06-29 06:35:49 +00:00
|
|
|
return context->bootContext->resolver->fieldOffset(context->thread, field);
|
2011-09-01 03:18:00 +00:00
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
return field->offset();
|
2011-09-01 03:18:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-13 15:46:12 +00:00
|
|
|
class Stack {
|
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyResource : public Thread::AutoResource {
|
2012-10-13 15:46:12 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyResource(Stack* s) : AutoResource(s->thread), s(s)
|
|
|
|
{
|
|
|
|
}
|
2012-10-13 15:46:12 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void release()
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
s->zone.dispose();
|
|
|
|
}
|
|
|
|
|
|
|
|
Stack* s;
|
|
|
|
};
|
|
|
|
|
2014-05-05 04:02:47 +00:00
|
|
|
Stack(MyThread* t) : thread(t), zone(t->m->heap, 0), resource(this)
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
}
|
2012-10-13 15:46:12 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
~Stack()
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
zone.dispose();
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void pushValue(uintptr_t v)
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
*static_cast<uintptr_t*>(push(BytesPerWord)) = v;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t peekValue(unsigned offset)
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
return *static_cast<uintptr_t*>(peek((offset + 1) * BytesPerWord));
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t popValue()
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
uintptr_t v = peekValue(0);
|
|
|
|
pop(BytesPerWord);
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void* push(unsigned size)
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
return zone.allocate(size);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void* peek(unsigned size)
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
return zone.peek(size);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void pop(unsigned size)
|
|
|
|
{
|
2012-10-13 15:46:12 +00:00
|
|
|
zone.pop(size);
|
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* thread;
|
|
|
|
Zone zone;
|
|
|
|
MyResource resource;
|
|
|
|
};
|
|
|
|
|
|
|
|
class SwitchState {
|
|
|
|
public:
|
|
|
|
SwitchState(Compiler::State* state,
|
|
|
|
unsigned count,
|
|
|
|
unsigned defaultIp,
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* key,
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Promise* start,
|
2012-10-13 15:46:12 +00:00
|
|
|
int bottom,
|
2014-05-01 18:44:42 +00:00
|
|
|
int top)
|
|
|
|
: state(state),
|
|
|
|
count(count),
|
|
|
|
defaultIp(defaultIp),
|
|
|
|
key(key),
|
|
|
|
start(start),
|
|
|
|
bottom(bottom),
|
|
|
|
top(top),
|
|
|
|
index(0)
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
}
|
2012-10-13 15:46:12 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
Frame* frame()
|
|
|
|
{
|
|
|
|
return reinterpret_cast<Frame*>(reinterpret_cast<uint8_t*>(this)
|
|
|
|
- pad(count * 4) - pad(sizeof(Frame)));
|
2012-10-13 15:46:12 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint32_t* ipTable()
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this)
|
|
|
|
- pad(count * 4));
|
2012-10-13 15:46:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Compiler::State* state;
|
|
|
|
unsigned count;
|
|
|
|
unsigned defaultIp;
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* key;
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Promise* start;
|
2012-10-13 15:46:12 +00:00
|
|
|
int bottom;
|
|
|
|
int top;
|
|
|
|
unsigned index;
|
|
|
|
};
|
|
|
|
|
2013-12-19 05:03:42 +00:00
|
|
|
lir::TernaryOperation toCompilerBinaryOp(MyThread* t, unsigned instruction)
|
|
|
|
{
|
|
|
|
switch (instruction) {
|
|
|
|
case iadd:
|
|
|
|
case ladd:
|
|
|
|
return lir::Add;
|
|
|
|
case ior:
|
|
|
|
case lor:
|
|
|
|
return lir::Or;
|
|
|
|
case ishl:
|
|
|
|
case lshl:
|
|
|
|
return lir::ShiftLeft;
|
|
|
|
case ishr:
|
|
|
|
case lshr:
|
|
|
|
return lir::ShiftRight;
|
|
|
|
case iushr:
|
|
|
|
case lushr:
|
|
|
|
return lir::UnsignedShiftRight;
|
|
|
|
case fadd:
|
|
|
|
case dadd:
|
|
|
|
return lir::FloatAdd;
|
|
|
|
case fsub:
|
|
|
|
case dsub:
|
|
|
|
return lir::FloatSubtract;
|
|
|
|
case fmul:
|
|
|
|
case dmul:
|
|
|
|
return lir::FloatMultiply;
|
|
|
|
case fdiv:
|
|
|
|
case ddiv:
|
|
|
|
return lir::FloatDivide;
|
|
|
|
case frem:
|
|
|
|
case vm::drem:
|
|
|
|
return lir::FloatRemainder;
|
|
|
|
case iand:
|
|
|
|
case land:
|
|
|
|
return lir::And;
|
|
|
|
case isub:
|
|
|
|
case lsub:
|
|
|
|
return lir::Subtract;
|
|
|
|
case ixor:
|
|
|
|
case lxor:
|
|
|
|
return lir::Xor;
|
|
|
|
case imul:
|
|
|
|
case lmul:
|
|
|
|
return lir::Multiply;
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-22 17:10:35 +00:00
|
|
|
uintptr_t aioobThunk(MyThread* t);
|
|
|
|
|
|
|
|
uintptr_t stackOverflowThunk(MyThread* t);
|
|
|
|
|
|
|
|
void checkField(Thread* t, GcField* field, bool shouldBeStatic)
|
|
|
|
{
|
|
|
|
if (((field->flags() & ACC_STATIC) == 0) == shouldBeStatic) {
|
|
|
|
throwNew(t,
|
|
|
|
GcIncompatibleClassChangeError::Type,
|
|
|
|
"expected %s.%s to be %s",
|
|
|
|
field->class_()->name()->body().begin(),
|
|
|
|
field->name()->body().begin(),
|
|
|
|
shouldBeStatic ? "static" : "non-static");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void compile(MyThread* t,
|
|
|
|
Frame* initialFrame,
|
|
|
|
unsigned initialIp,
|
|
|
|
int exceptionHandlerStart = -1)
|
2012-10-13 15:46:12 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
enum { Return, Unbranch, Unsubroutine, Untable0, Untable1, Unswitch };
|
2012-10-13 15:46:12 +00:00
|
|
|
|
|
|
|
Frame* frame = initialFrame;
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = frame->c;
|
2007-12-31 22:40:56 +00:00
|
|
|
Context* context = frame->context;
|
2014-06-28 04:00:05 +00:00
|
|
|
unsigned stackSize = context->method->code()->maxStack();
|
2012-10-13 15:46:12 +00:00
|
|
|
Stack stack(t);
|
|
|
|
unsigned ip = initialIp;
|
2014-01-03 21:04:57 +00:00
|
|
|
unsigned newIp;
|
2012-10-13 15:46:12 +00:00
|
|
|
stack.pushValue(Return);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
start:
|
|
|
|
ir::Type* stackMap
|
|
|
|
= static_cast<ir::Type*>(stack.push(stackSize * sizeof(ir::Type)));
|
2012-10-13 15:46:12 +00:00
|
|
|
frame = new (stack.push(sizeof(Frame))) Frame(frame, stackMap);
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
loop:
|
|
|
|
GcCode* code = context->method->code();
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, code);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
while (ip < code->length()) {
|
2014-05-04 01:09:55 +00:00
|
|
|
if (context->visitTable[frame->duplicatedIp(ip)]++) {
|
2007-12-09 22:45:43 +00:00
|
|
|
// we've already visited this part of the code
|
2008-04-20 05:23:08 +00:00
|
|
|
frame->visitLogicalIp(ip);
|
2012-10-13 15:46:12 +00:00
|
|
|
goto next;
|
2007-09-30 04:07:22 +00:00
|
|
|
}
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->startLogicalIp(ip);
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2008-11-08 23:21:30 +00:00
|
|
|
if (exceptionHandlerStart >= 0) {
|
2008-09-25 00:48:32 +00:00
|
|
|
c->initLocalsFromLogicalIp(exceptionHandlerStart);
|
|
|
|
|
|
|
|
exceptionHandlerStart = -1;
|
2008-04-19 07:03:59 +00:00
|
|
|
|
|
|
|
frame->pushObject();
|
2014-05-01 03:54:52 +00:00
|
|
|
|
2014-07-17 00:07:56 +00:00
|
|
|
c->nativeCall(
|
|
|
|
c->constant(getThunk(t, gcIfNecessaryThunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister()));
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2014-05-04 01:32:40 +00:00
|
|
|
|
|
|
|
if (DebugInstructions) {
|
|
|
|
unsigned startingIp = ip;
|
|
|
|
fprintf(stderr, " stack: [");
|
|
|
|
for (size_t i = frame->localSize(); i < frame->sp; i++) {
|
2014-05-05 16:49:50 +00:00
|
|
|
ir::Type ty = frame->get(i);
|
2014-06-01 20:22:14 +00:00
|
|
|
if (ty == ir::Type::i4()) {
|
2014-05-04 01:32:40 +00:00
|
|
|
fprintf(stderr, "I");
|
2014-06-01 20:22:14 +00:00
|
|
|
} else if (ty == ir::Type::i8()) {
|
2014-05-04 01:32:40 +00:00
|
|
|
fprintf(stderr, "L");
|
2014-06-01 20:22:14 +00:00
|
|
|
} else if (ty == ir::Type::f4()) {
|
2014-05-05 16:49:50 +00:00
|
|
|
fprintf(stderr, "F");
|
2014-06-01 20:22:14 +00:00
|
|
|
} else if (ty == ir::Type::f8()) {
|
2014-05-05 16:49:50 +00:00
|
|
|
fprintf(stderr, "D");
|
2014-06-01 20:22:14 +00:00
|
|
|
} else if (ty == ir::Type::object()) {
|
2014-05-04 01:32:40 +00:00
|
|
|
fprintf(stderr, "O");
|
2014-05-05 16:49:50 +00:00
|
|
|
} else {
|
2014-05-04 01:32:40 +00:00
|
|
|
fprintf(stderr, "?");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fprintf(stderr, "]\n");
|
|
|
|
fprintf(stderr, "% 5d: ", startingIp);
|
2014-06-29 04:57:07 +00:00
|
|
|
avian::jvm::debug::printInstruction(code->body().begin(), startingIp);
|
2014-05-04 01:32:40 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned instruction = code->body()[ip++];
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aaload:
|
|
|
|
case baload:
|
|
|
|
case caload:
|
|
|
|
case daload:
|
|
|
|
case faload:
|
|
|
|
case iaload:
|
|
|
|
case laload:
|
|
|
|
case saload: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* index = frame->pop(ir::Type::i4());
|
|
|
|
ir::Value* array = frame->pop(ir::Type::object());
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-11-25 17:34:48 +00:00
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
2010-09-17 01:43:27 +00:00
|
|
|
frame->trace(0, 0);
|
2008-11-25 17:34:48 +00:00
|
|
|
}
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
if (CheckArrayBounds) {
|
2011-09-01 03:18:00 +00:00
|
|
|
c->checkBounds(array, TargetArrayLength, index, aioobThunk(t));
|
2008-01-08 17:10:24 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aaload:
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->push(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::object(),
|
|
|
|
c->load(
|
2014-07-12 17:54:19 +00:00
|
|
|
ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(array, ir::Type::object(), TargetArrayBody, index),
|
|
|
|
ir::Type::object()));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case faload:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::f4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(array, ir::Type::f4(), TargetArrayBody, index),
|
|
|
|
ir::Type::f4()));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-11-30 15:08:45 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case iaload:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(array, ir::Type::i4(), TargetArrayBody, index),
|
|
|
|
ir::Type::i4()));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case baload:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(array, ir::Type::i1(), TargetArrayBody, index),
|
|
|
|
ir::Type::i4()));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case caload:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Unsigned,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(array, ir::Type::i2(), TargetArrayBody, index),
|
|
|
|
ir::Type::i4()));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case daload:
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->pushLarge(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::f8(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(array, ir::Type::f8(), TargetArrayBody, index),
|
|
|
|
ir::Type::f8()));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case laload:
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->pushLarge(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i8(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(array, ir::Type::i8(), TargetArrayBody, index),
|
|
|
|
ir::Type::i8()));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case saload:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(array, ir::Type::i2(), TargetArrayBody, index),
|
|
|
|
ir::Type::i4()));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aastore:
|
|
|
|
case bastore:
|
|
|
|
case castore:
|
|
|
|
case dastore:
|
|
|
|
case fastore:
|
|
|
|
case iastore:
|
|
|
|
case lastore:
|
|
|
|
case sastore: {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* value;
|
2014-05-05 21:21:48 +00:00
|
|
|
if (instruction == lastore) {
|
2014-06-01 20:22:14 +00:00
|
|
|
value = frame->popLarge(ir::Type::i8());
|
2014-05-05 21:21:48 +00:00
|
|
|
} else if (instruction == dastore) {
|
2014-06-01 20:22:14 +00:00
|
|
|
value = frame->popLarge(ir::Type::f8());
|
2007-12-09 22:45:43 +00:00
|
|
|
} else if (instruction == aastore) {
|
2014-06-01 20:22:14 +00:00
|
|
|
value = frame->pop(ir::Type::object());
|
2014-05-05 21:21:48 +00:00
|
|
|
} else if (instruction == fastore) {
|
2014-06-01 20:22:14 +00:00
|
|
|
value = frame->pop(ir::Type::f4());
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
value = frame->pop(ir::Type::i4());
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* index = frame->pop(ir::Type::i4());
|
|
|
|
ir::Value* array = frame->pop(ir::Type::object());
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-11-25 17:34:48 +00:00
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
2010-09-17 01:43:27 +00:00
|
|
|
frame->trace(0, 0);
|
2008-11-25 17:34:48 +00:00
|
|
|
}
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
if (CheckArrayBounds) {
|
2011-09-01 03:18:00 +00:00
|
|
|
c->checkBounds(array, TargetArrayLength, index, aioobThunk(t));
|
2008-01-08 17:10:24 +00:00
|
|
|
}
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
switch (instruction) {
|
|
|
|
case aastore: {
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
|
|
|
c->constant(getThunk(t, setMaybeNullThunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(),
|
|
|
|
array,
|
|
|
|
c->binaryOp(lir::Add,
|
|
|
|
ir::Type::i4(),
|
|
|
|
c->constant(TargetArrayBody, ir::Type::i4()),
|
|
|
|
c->binaryOp(lir::ShiftLeft,
|
|
|
|
ir::Type::i4(),
|
|
|
|
c->constant(log(TargetBytesPerWord),
|
|
|
|
ir::Type::i4()),
|
|
|
|
index)),
|
|
|
|
value));
|
2008-06-10 14:49:13 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case fastore:
|
2014-06-01 20:22:14 +00:00
|
|
|
c->store(value,
|
|
|
|
c->memory(array, ir::Type::f4(), TargetArrayBody, index));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case iastore:
|
2014-06-01 20:22:14 +00:00
|
|
|
c->store(value,
|
|
|
|
c->memory(array, ir::Type::i4(), TargetArrayBody, index));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case bastore:
|
2014-06-01 20:22:14 +00:00
|
|
|
c->store(value,
|
|
|
|
c->memory(array, ir::Type::i1(), TargetArrayBody, index));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case castore:
|
|
|
|
case sastore:
|
2014-06-01 20:22:14 +00:00
|
|
|
c->store(value,
|
|
|
|
c->memory(array, ir::Type::i2(), TargetArrayBody, index));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case dastore:
|
2014-06-01 20:22:14 +00:00
|
|
|
c->store(value,
|
|
|
|
c->memory(array, ir::Type::f8(), TargetArrayBody, index));
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2008-06-10 14:49:13 +00:00
|
|
|
case lastore:
|
2014-06-01 20:22:14 +00:00
|
|
|
c->store(value,
|
|
|
|
c->memory(array, ir::Type::i8(), TargetArrayBody, index));
|
2008-06-10 14:49:13 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aconst_null:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::object(), c->constant(0, ir::Type::object()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->load(ir::Type::object(), code->body()[ip++]);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::object(), 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 23:13:55 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::object(), 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::object(), 2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::object(), 3);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case anewarray: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* class_
|
|
|
|
= resolveClassInPool(t, context->method, index - 1, false);
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* length = frame->pop(ir::Type::i4());
|
2007-10-08 23:13:55 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = class_;
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = makeBlankObjectArrayThunk;
|
|
|
|
} else {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = makeBlankObjectArrayFromReferenceThunk;
|
|
|
|
}
|
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::object(),
|
|
|
|
c->nativeCall(
|
|
|
|
c->constant(getThunk(t, thunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::object(),
|
|
|
|
args(c->threadRegister(), frame->append(argument), length)));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case areturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
c->return_(frame->pop(ir::Type::object()));
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto next;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case arraylength: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(frame->pop(ir::Type::object()),
|
|
|
|
ir::Type::iptr(),
|
2014-05-05 21:21:48 +00:00
|
|
|
TargetArrayLength),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4()));
|
2007-12-12 22:19:13 +00:00
|
|
|
} break;
|
2007-10-04 03:19:39 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->store(ir::Type::object(), code->body()[ip++]);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::object(), 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-17 17:22:09 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::object(), 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::object(), 2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::object(), 3);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case athrow: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* target = frame->pop(ir::Type::object());
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, throw_Thunk), ir::Type::iptr()),
|
|
|
|
Compiler::NoReturn,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), target));
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
|
2014-03-14 15:54:05 +00:00
|
|
|
c->nullaryOp(lir::Trap);
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto next;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case bipush:
|
2014-07-11 15:47:57 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::i4(),
|
|
|
|
c->constant(static_cast<int8_t>(code->body()[ip++]), ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case checkcast: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* class_
|
|
|
|
= resolveClassInPool(t, context->method, index - 1, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = class_;
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = checkCastThunk;
|
|
|
|
} else {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = checkCastFromReferenceThunk;
|
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* instance = c->peek(1, 0);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
|
|
|
c->constant(getThunk(t, thunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), frame->append(argument), instance));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2f: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
|
|
|
c->f2f(ir::Type::f4(), frame->popLarge(ir::Type::f8())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2i: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
|
|
|
c->f2i(ir::Type::i4(), frame->popLarge(ir::Type::f8())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case d2l: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::i8(),
|
|
|
|
c->f2i(ir::Type::i8(), frame->popLarge(ir::Type::f8())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2013-12-19 05:03:42 +00:00
|
|
|
case dadd:
|
|
|
|
case dsub:
|
|
|
|
case dmul:
|
|
|
|
case ddiv:
|
|
|
|
case vm::drem: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->popLarge(ir::Type::f8());
|
|
|
|
ir::Value* b = frame->popLarge(ir::Type::f8());
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->pushLarge(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::f8(),
|
|
|
|
c->binaryOp(
|
|
|
|
toCompilerBinaryOp(t, instruction), ir::Type::f8(), a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dcmpg: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->popLarge(ir::Type::f8());
|
|
|
|
ir::Value* b = frame->popLarge(ir::Type::f8());
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
if (floatBranch(t, frame, code, ip, false, a, b, &newIp)) {
|
2012-10-13 15:46:12 +00:00
|
|
|
goto branch;
|
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, compareDoublesGThunk),
|
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ir::Type::i4(),
|
|
|
|
args(nullptr, a, nullptr, b)));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 21:34:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dcmpl: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->popLarge(ir::Type::f8());
|
|
|
|
ir::Value* b = frame->popLarge(ir::Type::f8());
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
if (floatBranch(t, frame, code, ip, true, a, b, &newIp)) {
|
2012-10-13 15:46:12 +00:00
|
|
|
goto branch;
|
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, compareDoublesLThunk),
|
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ir::Type::i4(),
|
|
|
|
args(nullptr, a, nullptr, b)));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dconst_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::f8(),
|
|
|
|
c->constant(doubleToBits(0.0), ir::Type::f8()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-05 21:21:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dconst_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::f8(),
|
|
|
|
c->constant(doubleToBits(1.0), ir::Type::f8()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dneg: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(
|
|
|
|
ir::Type::f8(),
|
|
|
|
c->unaryOp(lir::FloatNegate, frame->popLarge(ir::Type::f8())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
case vm::dup:
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->dup();
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup_x1:
|
|
|
|
frame->dupX1();
|
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup_x2:
|
|
|
|
frame->dupX2();
|
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
case vm::dup2:
|
2007-12-09 22:45:43 +00:00
|
|
|
frame->dup2();
|
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup2_x1:
|
|
|
|
frame->dup2X1();
|
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case dup2_x2:
|
|
|
|
frame->dup2X2();
|
|
|
|
break;
|
2007-10-09 17:15:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2d: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::f8(),
|
|
|
|
c->f2f(ir::Type::f8(), frame->pop(ir::Type::f4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2i: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
|
|
|
c->f2i(ir::Type::i4(), frame->pop(ir::Type::f4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case f2l: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::i8(),
|
|
|
|
c->f2i(ir::Type::i8(), frame->pop(ir::Type::f4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2013-12-19 05:03:42 +00:00
|
|
|
case fadd:
|
|
|
|
case fsub:
|
|
|
|
case fmul:
|
|
|
|
case fdiv:
|
|
|
|
case frem: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::f4());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::f4());
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->push(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::f4(),
|
|
|
|
c->binaryOp(
|
|
|
|
toCompilerBinaryOp(t, instruction), ir::Type::f4(), a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fcmpg: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::f4());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::f4());
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
if (floatBranch(t, frame, code, ip, false, a, b, &newIp)) {
|
2012-10-13 15:46:12 +00:00
|
|
|
goto branch;
|
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, compareFloatsGThunk),
|
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ir::Type::i4(),
|
|
|
|
args(a, b)));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fcmpl: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::f4());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::f4());
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
if (floatBranch(t, frame, code, ip, true, a, b, &newIp)) {
|
2012-10-13 15:46:12 +00:00
|
|
|
goto branch;
|
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, compareFloatsLThunk),
|
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ir::Type::i4(),
|
|
|
|
args(a, b)));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fconst_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
|
|
|
c->constant(floatToBits(0.0), ir::Type::f4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fconst_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
|
|
|
c->constant(floatToBits(1.0), ir::Type::f4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fconst_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
|
|
|
c->constant(floatToBits(2.0), ir::Type::f4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case fneg: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
|
|
|
c->unaryOp(lir::FloatNegate, frame->pop(ir::Type::f4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case getfield:
|
|
|
|
case getstatic: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, context->method, index - 1, false);
|
2010-03-02 01:24:25 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (LIKELY(field)) {
|
2014-07-11 15:47:57 +00:00
|
|
|
if ((field->flags() & ACC_VOLATILE) and TargetBytesPerWord == 4
|
|
|
|
and (field->code() == DoubleField or field->code() == LongField)) {
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, acquireMonitorForObjectThunk),
|
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), frame->append(field)));
|
2008-04-23 22:56:02 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* table;
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
if (instruction == getstatic) {
|
2012-05-11 19:19:55 +00:00
|
|
|
checkField(t, field, true);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (classNeedsInit(t, field->class_())) {
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-07-11 15:47:57 +00:00
|
|
|
c->constant(getThunk(t, tryInitClassThunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
2014-07-17 00:07:56 +00:00
|
|
|
args(c->threadRegister(), frame->append(field->class_())));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
table = frame->append(field->class_()->staticTable());
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2012-05-11 19:19:55 +00:00
|
|
|
checkField(t, field, false);
|
2009-01-10 19:25:52 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
table = frame->pop(ir::Type::object());
|
2008-11-25 17:34:48 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (inTryBlock(t, code, ip - 3)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
2008-11-25 17:34:48 +00:00
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
switch (field->code()) {
|
2011-03-15 23:52:02 +00:00
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(table,
|
|
|
|
ir::Type::i1(),
|
|
|
|
targetFieldOffset(context, field)),
|
|
|
|
ir::Type::i4()));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case CharField:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Unsigned,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(table,
|
|
|
|
ir::Type::i2(),
|
|
|
|
targetFieldOffset(context, field)),
|
|
|
|
ir::Type::i4()));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case ShortField:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(table,
|
|
|
|
ir::Type::i2(),
|
|
|
|
targetFieldOffset(context, field)),
|
|
|
|
ir::Type::i4()));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case FloatField:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(table,
|
|
|
|
ir::Type::f4(),
|
|
|
|
targetFieldOffset(context, field)),
|
|
|
|
ir::Type::f4()));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case IntField:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(table,
|
|
|
|
ir::Type::i4(),
|
|
|
|
targetFieldOffset(context, field)),
|
|
|
|
ir::Type::i4()));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case DoubleField:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::f8(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(table,
|
|
|
|
ir::Type::f8(),
|
|
|
|
targetFieldOffset(context, field)),
|
|
|
|
ir::Type::f8()));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case LongField:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::i8(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(table,
|
|
|
|
ir::Type::i8(),
|
|
|
|
targetFieldOffset(context, field)),
|
|
|
|
ir::Type::i8()));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case ObjectField:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::object(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->load(ir::ExtendMode::Signed,
|
2014-05-05 21:21:48 +00:00
|
|
|
c->memory(table,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::object(),
|
2014-05-05 21:21:48 +00:00
|
|
|
targetFieldOffset(context, field)),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::object()));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-03-03 03:18:15 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (field->flags() & ACC_VOLATILE) {
|
2014-07-11 15:47:57 +00:00
|
|
|
if (TargetBytesPerWord == 4 and (field->code() == DoubleField
|
|
|
|
or field->code() == LongField)) {
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, releaseMonitorForObjectThunk),
|
2014-07-17 00:07:56 +00:00
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), frame->append(field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2013-12-18 23:51:20 +00:00
|
|
|
c->nullaryOp(lir::LoadBarrier);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* ref = cast<GcReference>(t, reference);
|
|
|
|
PROTECT(t, ref);
|
2014-07-11 15:47:57 +00:00
|
|
|
int fieldCode = vm::fieldCode(t, ref->spec()->body()[0]);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
GcPair* pair = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-05-01 03:54:52 +00:00
|
|
|
ir::Type rType = operandTypeForFieldCode(t, fieldCode);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* result;
|
2011-03-15 23:52:02 +00:00
|
|
|
if (instruction == getstatic) {
|
2014-07-12 15:41:52 +00:00
|
|
|
result = c->nativeCall(
|
2014-05-01 03:54:52 +00:00
|
|
|
c->constant(getThunk(t, getStaticFieldValueFromReferenceThunk),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()),
|
2014-05-01 03:54:52 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rType,
|
2014-07-17 00:07:56 +00:00
|
|
|
args(c->threadRegister(), frame->append(pair)));
|
2009-03-04 01:02:11 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* instance = frame->pop(ir::Type::object());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
result = c->nativeCall(
|
2014-05-01 03:54:52 +00:00
|
|
|
c->constant(getThunk(t, getFieldValueFromReferenceThunk),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()),
|
2014-05-01 03:54:52 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rType,
|
2014-07-17 00:07:56 +00:00
|
|
|
args(c->threadRegister(), frame->append(pair), instance));
|
2009-03-04 01:02:11 +00:00
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-05-01 20:21:42 +00:00
|
|
|
frame->pushReturnValue(fieldCode, result);
|
2009-03-03 03:18:15 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case goto_: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 3) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (newIp <= ip) {
|
2013-12-13 17:39:36 +00:00
|
|
|
compileSafePoint(t, c, frame);
|
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
c->jmp(frame->machineIpValue(newIp));
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = newIp;
|
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case goto_w: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt32(t, code, ip);
|
|
|
|
uint32_t newIp = (ip - 5) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (newIp <= ip) {
|
2013-12-13 17:39:36 +00:00
|
|
|
compileSafePoint(t, c, frame);
|
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
c->jmp(frame->machineIpValue(newIp));
|
2007-12-09 22:45:43 +00:00
|
|
|
ip = newIp;
|
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2b: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->truncateThenExtend(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4(),
|
|
|
|
ir::Type::i1(),
|
|
|
|
frame->pop(ir::Type::i4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2c: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->truncateThenExtend(ir::ExtendMode::Unsigned,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4(),
|
|
|
|
ir::Type::i2(),
|
|
|
|
frame->pop(ir::Type::i4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2d: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::f8(),
|
|
|
|
c->i2f(ir::Type::f8(), frame->pop(ir::Type::i4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2f: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
|
|
|
c->i2f(ir::Type::f4(), frame->pop(ir::Type::i4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
case i2l:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::i8(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->truncateThenExtend(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i8(),
|
|
|
|
ir::Type::i4(),
|
|
|
|
frame->pop(ir::Type::i4())));
|
2007-12-26 23:59:55 +00:00
|
|
|
break;
|
2007-10-04 03:19:39 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case i2s: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 17:54:19 +00:00
|
|
|
c->truncateThenExtend(ir::ExtendMode::Signed,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4(),
|
|
|
|
ir::Type::i2(),
|
|
|
|
frame->pop(ir::Type::i4())));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2013-12-19 05:03:42 +00:00
|
|
|
case iadd:
|
|
|
|
case iand:
|
|
|
|
case ior:
|
|
|
|
case ishl:
|
|
|
|
case ishr:
|
|
|
|
case iushr:
|
|
|
|
case isub:
|
|
|
|
case ixor:
|
|
|
|
case imul: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::i4());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::i4());
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->push(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4(),
|
|
|
|
c->binaryOp(
|
|
|
|
toCompilerBinaryOp(t, instruction), ir::Type::i4(), a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_m1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(), c->constant(-1, ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(), c->constant(0, ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(), c->constant(1, ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(), c->constant(2, ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(), c->constant(3, ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_4:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(), c->constant(4, ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iconst_5:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(), c->constant(5, ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case idiv: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::i4());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::i4());
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
|
|
|
c->binaryOp(lir::Divide, ir::Type::i4(), a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case if_acmpeq:
|
|
|
|
case if_acmpne: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
2012-10-13 15:46:12 +00:00
|
|
|
newIp = (ip - 3) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2013-12-13 17:39:36 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (newIp <= ip) {
|
2013-12-13 17:39:36 +00:00
|
|
|
compileSafePoint(t, c, frame);
|
|
|
|
}
|
2014-05-01 18:44:42 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::object());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::object());
|
2014-05-04 01:09:55 +00:00
|
|
|
ir::Value* target = frame->machineIpValue(newIp);
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
c->condJump(toCompilerJumpOp(t, instruction), a, b, target);
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto branch;
|
2007-09-28 14:45:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case if_icmpeq:
|
|
|
|
case if_icmpne:
|
|
|
|
case if_icmpgt:
|
|
|
|
case if_icmpge:
|
|
|
|
case if_icmplt:
|
|
|
|
case if_icmple: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
2012-10-13 15:46:12 +00:00
|
|
|
newIp = (ip - 3) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2013-12-13 17:39:36 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (newIp <= ip) {
|
2013-12-13 17:39:36 +00:00
|
|
|
compileSafePoint(t, c, frame);
|
|
|
|
}
|
2014-05-01 18:44:42 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::i4());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::i4());
|
2014-05-04 01:09:55 +00:00
|
|
|
ir::Value* target = frame->machineIpValue(newIp);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
c->condJump(toCompilerJumpOp(t, instruction), a, b, target);
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto branch;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ifeq:
|
|
|
|
case ifne:
|
|
|
|
case ifgt:
|
|
|
|
case ifge:
|
|
|
|
case iflt:
|
|
|
|
case ifle: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
2012-10-13 15:46:12 +00:00
|
|
|
newIp = (ip - 3) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
ir::Value* target = frame->machineIpValue(newIp);
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (newIp <= ip) {
|
2013-12-13 17:39:36 +00:00
|
|
|
compileSafePoint(t, c, frame);
|
|
|
|
}
|
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = c->constant(0, ir::Type::i4());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::i4());
|
2009-10-07 00:50:32 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
c->condJump(toCompilerJumpOp(t, instruction), a, b, target);
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto branch;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ifnull:
|
|
|
|
case ifnonnull: {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
2012-10-13 15:46:12 +00:00
|
|
|
newIp = (ip - 3) + offset;
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (newIp <= ip) {
|
2013-12-13 17:39:36 +00:00
|
|
|
compileSafePoint(t, c, frame);
|
|
|
|
}
|
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = c->constant(0, ir::Type::object());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::object());
|
2014-05-04 01:09:55 +00:00
|
|
|
ir::Value* target = frame->machineIpValue(newIp);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
c->condJump(toCompilerJumpOp(t, instruction), a, b, target);
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto branch;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iinc: {
|
2014-06-29 04:57:07 +00:00
|
|
|
uint8_t index = code->body()[ip++];
|
|
|
|
int8_t count = code->body()[ip++];
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2014-05-01 03:54:52 +00:00
|
|
|
storeLocal(context,
|
|
|
|
1,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4(),
|
2014-05-01 03:54:52 +00:00
|
|
|
c->binaryOp(lir::Add,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4(),
|
|
|
|
c->constant(count, ir::Type::i4()),
|
|
|
|
loadLocal(context, 1, ir::Type::i4(), index)),
|
2014-05-01 03:54:52 +00:00
|
|
|
index);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->load(ir::Type::i4(), code->body()[ip++]);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case fload:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->load(ir::Type::f4(), code->body()[ip++]);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::i4(), 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case fload_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::f4(), 0);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::i4(), 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case fload_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::f4(), 1);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::i4(), 2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case fload_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::f4(), 2);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::i4(), 3);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case fload_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::f4(), 3);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2007-12-26 19:19:45 +00:00
|
|
|
case ineg: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
|
|
|
c->unaryOp(lir::Negate, frame->pop(ir::Type::i4())));
|
2007-12-26 19:19:45 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case instanceof: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-30 04:07:22 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* class_
|
|
|
|
= resolveClassInPool(t, context->method, index - 1, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* instance = frame->pop(ir::Type::object());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = class_;
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = instanceOf64Thunk;
|
|
|
|
} else {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = instanceOfFromReferenceThunk;
|
|
|
|
}
|
2007-09-30 04:07:22 +00:00
|
|
|
|
2014-07-17 00:07:56 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::i4(),
|
|
|
|
c->nativeCall(
|
|
|
|
c->constant(getThunk(t, thunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::i4(),
|
|
|
|
args(c->threadRegister(), frame->append(argument), instance)));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokeinterface: {
|
2010-12-19 22:23:19 +00:00
|
|
|
context->leaf = false;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
|
|
|
ip += 2;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2008-07-12 20:52:14 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = resolveMethod(t, context->method, index - 1, false);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
unsigned parameterFootprint;
|
|
|
|
int returnCode;
|
|
|
|
bool tailCall;
|
|
|
|
if (LIKELY(target)) {
|
2012-05-11 19:19:55 +00:00
|
|
|
checkMethod(t, target, false);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = target;
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = findInterfaceMethodFromInstanceThunk;
|
2014-05-29 04:17:25 +00:00
|
|
|
parameterFootprint = target->parameterFootprint();
|
|
|
|
returnCode = target->returnCode();
|
2011-03-15 23:52:02 +00:00
|
|
|
tailCall = isTailCall(t, code, ip, context->method, target);
|
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* ref = cast<GcReference>(t, reference);
|
|
|
|
PROTECT(t, ref);
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = findInterfaceMethodFromInstanceAndReferenceThunk;
|
2014-07-11 15:47:57 +00:00
|
|
|
parameterFootprint = methodReferenceParameterFootprint(t, ref, false);
|
2014-06-29 04:57:07 +00:00
|
|
|
returnCode = methodReferenceReturnCode(t, ref);
|
2014-07-11 15:47:57 +00:00
|
|
|
tailCall = isReferenceTailCall(t, code, ip, context->method, ref);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
2010-11-26 19:41:31 +00:00
|
|
|
|
|
|
|
unsigned rSize = resultSize(t, returnCode);
|
2008-02-12 02:06:12 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* result = c->stackCall(
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, thunk), ir::Type::iptr()),
|
2014-07-17 00:07:56 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::iptr(),
|
|
|
|
args(c->threadRegister(),
|
|
|
|
frame->append(argument),
|
|
|
|
c->peek(1, parameterFootprint - 1))),
|
2014-06-01 20:22:14 +00:00
|
|
|
tailCall ? Compiler::TailJump : 0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
operandTypeForFieldCode(t, returnCode),
|
|
|
|
frame->peekMethodArguments(parameterFootprint));
|
|
|
|
|
|
|
|
frame->popFootprint(parameterFootprint);
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2008-02-12 02:06:12 +00:00
|
|
|
if (rSize) {
|
2014-05-01 20:21:42 +00:00
|
|
|
frame->pushReturnValue(returnCode, result);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokespecial: {
|
2010-12-19 22:23:19 +00:00
|
|
|
context->leaf = false;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
PROTECT(t, reference);
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = resolveMethod(t, context->method, index - 1, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
if (LIKELY(target)) {
|
2014-06-21 04:16:33 +00:00
|
|
|
GcClass* class_ = context->method->class_();
|
2011-03-15 23:52:02 +00:00
|
|
|
if (isSpecialMethod(t, target, class_)) {
|
2014-06-21 04:16:33 +00:00
|
|
|
target = findVirtualMethod(t, target, class_->super());
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
2008-07-12 20:52:14 +00:00
|
|
|
|
2012-05-11 19:19:55 +00:00
|
|
|
checkMethod(t, target, false);
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
|
|
|
|
2011-07-18 01:54:55 +00:00
|
|
|
if (UNLIKELY(methodAbstract(t, target))) {
|
2014-07-11 15:50:18 +00:00
|
|
|
compileDirectAbstractInvoke(
|
|
|
|
t, frame, getMethodAddressThunk, target, tailCall);
|
2011-07-18 01:54:55 +00:00
|
|
|
} else {
|
|
|
|
compileDirectInvoke(t, frame, target, tailCall);
|
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* ref = cast<GcReference>(t, reference);
|
|
|
|
PROTECT(t, ref);
|
2014-07-11 15:47:57 +00:00
|
|
|
compileDirectReferenceInvoke(
|
|
|
|
t,
|
|
|
|
frame,
|
|
|
|
findSpecialMethodFromReferenceThunk,
|
|
|
|
ref,
|
|
|
|
false,
|
|
|
|
isReferenceTailCall(t, code, ip, context->method, ref));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokestatic: {
|
2010-12-19 22:23:19 +00:00
|
|
|
context->leaf = false;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2008-07-12 20:52:14 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = resolveMethod(t, context->method, index - 1, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
if (LIKELY(target)) {
|
2012-05-11 19:19:55 +00:00
|
|
|
checkMethod(t, target, true);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
if (not intrinsic(t, frame, target)) {
|
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
|
|
|
compileDirectInvoke(t, frame, target, tailCall);
|
|
|
|
}
|
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* ref = cast<GcReference>(t, reference);
|
|
|
|
PROTECT(t, ref);
|
2014-07-11 15:47:57 +00:00
|
|
|
compileDirectReferenceInvoke(
|
|
|
|
t,
|
|
|
|
frame,
|
|
|
|
findStaticMethodFromReferenceThunk,
|
|
|
|
ref,
|
|
|
|
true,
|
|
|
|
isReferenceTailCall(t, code, ip, context->method, ref));
|
2009-08-06 16:01:57 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case invokevirtual: {
|
2010-12-19 22:23:19 +00:00
|
|
|
context->leaf = false;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2008-07-12 20:52:14 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = resolveMethod(t, context->method, index - 1, false);
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (LIKELY(target)) {
|
2012-05-11 19:19:55 +00:00
|
|
|
checkMethod(t, target, false);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
if (not intrinsic(t, frame, target)) {
|
|
|
|
bool tailCall = isTailCall(t, code, ip, context->method, target);
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
if (LIKELY(methodVirtual(t, target))) {
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned parameterFootprint = target->parameterFootprint();
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2012-03-06 20:07:59 +00:00
|
|
|
unsigned offset = TargetClassVtable
|
2014-07-11 15:47:57 +00:00
|
|
|
+ (target->offset() * TargetBytesPerWord);
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* instance = c->peek(1, parameterFootprint - 1);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2014-05-01 20:21:42 +00:00
|
|
|
frame->stackCall(
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(c->binaryOp(
|
|
|
|
lir::And,
|
|
|
|
ir::Type::iptr(),
|
|
|
|
c->constant(TargetPointerMask, ir::Type::iptr()),
|
|
|
|
c->memory(instance, ir::Type::object())),
|
|
|
|
ir::Type::object(),
|
|
|
|
offset),
|
2014-05-01 19:56:39 +00:00
|
|
|
target,
|
2014-05-01 03:54:52 +00:00
|
|
|
tailCall ? Compiler::TailJump : 0,
|
2014-05-01 19:56:39 +00:00
|
|
|
frame->trace(0, 0));
|
2012-03-06 20:07:59 +00:00
|
|
|
} else {
|
|
|
|
// OpenJDK generates invokevirtual calls to private methods
|
|
|
|
// (e.g. readObject and writeObject for serialization), so
|
|
|
|
// we must handle such cases here.
|
2011-03-27 05:13:05 +00:00
|
|
|
|
2014-05-01 05:46:26 +00:00
|
|
|
compileDirectInvoke(t, frame, target, tailCall);
|
2011-03-27 05:13:05 +00:00
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* ref = cast<GcReference>(t, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2014-06-29 04:57:07 +00:00
|
|
|
PROTECT(t, ref);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
GcPair* pair = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-05-01 03:54:52 +00:00
|
|
|
compileReferenceInvoke(
|
|
|
|
frame,
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-05-01 03:54:52 +00:00
|
|
|
c->constant(getThunk(t, findVirtualMethodFromReferenceThunk),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()),
|
2014-05-01 03:54:52 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-07-12 15:41:52 +00:00
|
|
|
args(c->threadRegister(),
|
2014-07-17 00:07:56 +00:00
|
|
|
frame->append(pair),
|
|
|
|
c->peek(1,
|
|
|
|
methodReferenceParameterFootprint(t, ref, false)
|
|
|
|
- 1))),
|
2014-06-29 04:57:07 +00:00
|
|
|
ref,
|
2014-05-01 03:54:52 +00:00
|
|
|
false,
|
2014-06-29 04:57:07 +00:00
|
|
|
isReferenceTailCall(t, code, ip, context->method, ref));
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case irem: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::i4());
|
|
|
|
ir::Value* b = frame->pop(ir::Type::i4());
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
|
|
|
c->binaryOp(lir::Remainder, ir::Type::i4(), a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2014-05-01 02:01:14 +00:00
|
|
|
case ireturn: {
|
|
|
|
handleExit(t, frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
c->return_(frame->pop(ir::Type::i4()));
|
2014-05-01 02:01:14 +00:00
|
|
|
}
|
|
|
|
goto next;
|
|
|
|
|
2007-12-14 18:27:56 +00:00
|
|
|
case freturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
c->return_(frame->pop(ir::Type::f4()));
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto next;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->store(ir::Type::i4(), code->body()[ip++]);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case fstore:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->store(ir::Type::f4(), code->body()[ip++]);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::i4(), 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case fstore_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::f4(), 0);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::i4(), 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case fstore_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::f4(), 1);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::i4(), 2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case fstore_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::f4(), 2);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::i4(), 3);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case fstore_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::f4(), 3);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case jsr:
|
2007-12-26 23:59:55 +00:00
|
|
|
case jsr_w: {
|
2008-11-16 00:28:45 +00:00
|
|
|
uint32_t thisIp;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (instruction == jsr) {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt16(t, code, ip);
|
2008-11-16 00:28:45 +00:00
|
|
|
thisIp = ip - 3;
|
|
|
|
newIp = thisIp + offset;
|
2007-12-26 23:59:55 +00:00
|
|
|
} else {
|
2008-11-11 15:20:49 +00:00
|
|
|
uint32_t offset = codeReadInt32(t, code, ip);
|
2008-11-16 00:28:45 +00:00
|
|
|
thisIp = ip - 5;
|
|
|
|
newIp = thisIp + offset;
|
2007-12-26 23:59:55 +00:00
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2007-12-26 23:59:55 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
frame->startSubroutine(newIp, ip);
|
2008-06-11 00:16:02 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
c->jmp(frame->machineIpValue(newIp));
|
2007-12-26 23:59:55 +00:00
|
|
|
|
2012-10-13 15:46:12 +00:00
|
|
|
ip = newIp;
|
2014-05-04 01:09:55 +00:00
|
|
|
} break;
|
2007-10-15 19:12:38 +00:00
|
|
|
|
2008-03-21 00:37:58 +00:00
|
|
|
case l2d: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::f8(),
|
|
|
|
c->i2f(ir::Type::f8(), frame->popLarge(ir::Type::i8())));
|
2008-03-21 00:37:58 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case l2f: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::f4(),
|
|
|
|
c->i2f(ir::Type::f4(), frame->popLarge(ir::Type::i8())));
|
2008-03-21 00:37:58 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
case l2i:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
|
|
|
c->truncate(ir::Type::i4(), frame->popLarge(ir::Type::i8())));
|
2007-12-26 23:59:55 +00:00
|
|
|
break;
|
|
|
|
|
2013-12-19 05:03:42 +00:00
|
|
|
case ladd:
|
|
|
|
case land:
|
|
|
|
case lor:
|
|
|
|
case lsub:
|
|
|
|
case lxor:
|
|
|
|
case lmul: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->popLarge(ir::Type::i8());
|
|
|
|
ir::Value* b = frame->popLarge(ir::Type::i8());
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->pushLarge(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i8(),
|
|
|
|
c->binaryOp(
|
|
|
|
toCompilerBinaryOp(t, instruction), ir::Type::i8(), a, b));
|
2007-12-26 19:19:45 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lcmp: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->popLarge(ir::Type::i8());
|
|
|
|
ir::Value* b = frame->popLarge(ir::Type::i8());
|
2007-12-12 22:19:13 +00:00
|
|
|
|
2014-05-04 02:34:46 +00:00
|
|
|
if (integerBranch(t, frame, code, ip, a, b, &newIp)) {
|
2012-10-13 15:46:12 +00:00
|
|
|
goto branch;
|
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, compareLongsThunk),
|
2014-07-17 00:07:56 +00:00
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ir::Type::i4(),
|
|
|
|
args(nullptr, a, nullptr, b)));
|
2009-10-07 00:50:32 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lconst_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::i8(), c->constant(0, ir::Type::i8()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lconst_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::i8(), c->constant(1, ir::Type::i8()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldc:
|
|
|
|
case ldc_w: {
|
|
|
|
uint16_t index;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (instruction == ldc) {
|
2014-06-29 04:57:07 +00:00
|
|
|
index = code->body()[ip++];
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
|
|
|
index = codeReadInt16(t, code, ip);
|
|
|
|
}
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcSingleton* pool = code->pool();
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (singletonIsObject(t, pool, index - 1)) {
|
|
|
|
object v = singletonObject(t, pool, index - 1);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-05-02 04:50:29 +00:00
|
|
|
loadMemoryBarrier();
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (objectClass(t, v) == type(t, GcReference::Type)) {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* reference = cast<GcReference>(t, v);
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
v = resolveClassInPool(t, context->method, index - 1, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
if (UNLIKELY(v == 0)) {
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->push(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::object(),
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-05-05 21:21:48 +00:00
|
|
|
c->constant(getThunk(t, getJClassFromReferenceThunk),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()),
|
2014-05-05 21:21:48 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::object(),
|
2014-07-12 15:41:52 +00:00
|
|
|
args(c->threadRegister(),
|
2014-07-17 00:07:56 +00:00
|
|
|
frame->append(
|
|
|
|
makePair(t, context->method, reference)))));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
2010-11-26 19:41:31 +00:00
|
|
|
}
|
2007-09-30 15:52:21 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (v) {
|
2014-05-29 04:17:25 +00:00
|
|
|
if (objectClass(t, v) == type(t, GcClass::Type)) {
|
2014-07-17 00:07:56 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::object(),
|
|
|
|
c->nativeCall(c->constant(getThunk(t, getJClass64Thunk),
|
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::object(),
|
|
|
|
args(c->threadRegister(), frame->append(v))));
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2014-07-02 21:11:27 +00:00
|
|
|
frame->push(ir::Type::object(), frame->append(v));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2014-05-05 21:21:48 +00:00
|
|
|
ir::Type type = singletonBit(t, pool, poolSize(t, pool), index - 1)
|
2014-06-01 20:22:14 +00:00
|
|
|
? ir::Type::f4()
|
|
|
|
: ir::Type::i4();
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->push(type,
|
|
|
|
c->constant(singletonValue(t, pool, index - 1), type));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldc2_w: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcSingleton* pool = code->pool();
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint64_t v;
|
|
|
|
memcpy(&v, &singletonValue(t, pool, index - 1), 8);
|
2014-05-05 21:21:48 +00:00
|
|
|
ir::Type type = singletonBit(t, pool, poolSize(t, pool), index - 1)
|
2014-06-01 20:22:14 +00:00
|
|
|
? ir::Type::f8()
|
|
|
|
: ir::Type::i8();
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->pushLarge(type, c->constant(v, type));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case ldiv_: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->popLarge(ir::Type::i8());
|
|
|
|
ir::Value* b = frame->popLarge(ir::Type::i8());
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::i8(),
|
|
|
|
c->binaryOp(lir::Divide, ir::Type::i8(), a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->loadLarge(ir::Type::i8(), code->body()[ip++]);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case dload:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->loadLarge(ir::Type::f8(), code->body()[ip++]);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::i8(), 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case dload_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::f8(), 0);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::i8(), 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case dload_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::f8(), 1);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::i8(), 2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case dload_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::f8(), 2);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::i8(), 3);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
case dload_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::f8(), 3);
|
2014-05-02 15:01:57 +00:00
|
|
|
break;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lneg:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(
|
|
|
|
ir::Type::i8(),
|
|
|
|
c->unaryOp(lir::Negate, frame->popLarge(ir::Type::i8())));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lookupswitch: {
|
|
|
|
int32_t base = ip - 1;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
ip = (ip + 3) & ~3; // pad to four byte boundary
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* key = frame->pop(ir::Type::i4());
|
2014-05-01 18:44:42 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, defaultIp < code->length());
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t pairCount = codeReadInt32(t, code, ip);
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2009-08-13 01:32:12 +00:00
|
|
|
if (pairCount) {
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* default_ = frame->addressOperand(
|
2014-05-04 01:09:55 +00:00
|
|
|
frame->addressPromise(frame->machineIp(defaultIp)));
|
2012-07-19 19:26:06 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Promise* start = 0;
|
2014-07-11 15:50:18 +00:00
|
|
|
uint32_t* ipTable
|
|
|
|
= static_cast<uint32_t*>(stack.push(sizeof(uint32_t) * pairCount));
|
2009-08-13 01:32:12 +00:00
|
|
|
for (int32_t i = 0; i < pairCount; ++i) {
|
|
|
|
unsigned index = ip + (i * 8);
|
|
|
|
int32_t key = codeReadInt32(t, code, index);
|
|
|
|
uint32_t newIp = base + codeReadInt32(t, code, index);
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2012-10-13 19:09:24 +00:00
|
|
|
ipTable[i] = newIp;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Promise* p = c->poolAppend(key);
|
2009-08-13 01:32:12 +00:00
|
|
|
if (i == 0) {
|
2011-09-20 22:30:30 +00:00
|
|
|
start = p;
|
2009-08-13 01:32:12 +00:00
|
|
|
}
|
2014-05-04 01:09:55 +00:00
|
|
|
c->poolAppendPromise(frame->addressPromise(frame->machineIp(newIp)));
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, start);
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
ir::Value* address = c->nativeCall(
|
2014-06-01 20:22:14 +00:00
|
|
|
c->constant(getThunk(t, lookUpAddressThunk), ir::Type::iptr()),
|
2014-05-01 03:54:52 +00:00
|
|
|
0,
|
|
|
|
0,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-07-12 15:41:52 +00:00
|
|
|
args(key,
|
|
|
|
frame->absoluteAddressOperand(start),
|
|
|
|
c->constant(pairCount, ir::Type::i4()),
|
|
|
|
default_));
|
2014-05-01 03:54:52 +00:00
|
|
|
|
|
|
|
c->jmp(context->bootContext
|
|
|
|
? c->binaryOp(lir::Add,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-05-01 04:27:19 +00:00
|
|
|
c->memory(c->threadRegister(),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-05-01 03:54:52 +00:00
|
|
|
TARGET_THREAD_CODEIMAGE),
|
|
|
|
address)
|
|
|
|
: address);
|
2007-12-16 00:24:15 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
new (stack.push(sizeof(SwitchState)))
|
|
|
|
SwitchState(c->saveState(), pairCount, defaultIp, 0, 0, 0, 0);
|
2008-09-13 21:09:26 +00:00
|
|
|
|
2012-10-13 15:46:12 +00:00
|
|
|
goto switchloop;
|
2009-08-13 01:32:12 +00:00
|
|
|
} else {
|
|
|
|
// a switch statement with no cases, apparently
|
2014-05-04 01:09:55 +00:00
|
|
|
c->jmp(frame->machineIpValue(defaultIp));
|
2012-10-13 15:46:12 +00:00
|
|
|
ip = defaultIp;
|
2007-12-16 00:24:15 +00:00
|
|
|
}
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lrem: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->popLarge(ir::Type::i8());
|
|
|
|
ir::Value* b = frame->popLarge(ir::Type::i8());
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
if (inTryBlock(t, code, ip - 1)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->pushLarge(ir::Type::i8(),
|
|
|
|
c->binaryOp(lir::Remainder, ir::Type::i8(), a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2014-05-01 02:01:14 +00:00
|
|
|
case lreturn: {
|
|
|
|
handleExit(t, frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
c->return_(frame->popLarge(ir::Type::i8()));
|
2014-05-01 02:01:14 +00:00
|
|
|
}
|
|
|
|
goto next;
|
|
|
|
|
2007-12-12 22:19:13 +00:00
|
|
|
case dreturn: {
|
2009-04-26 01:51:33 +00:00
|
|
|
handleExit(t, frame);
|
2014-06-01 20:22:14 +00:00
|
|
|
c->return_(frame->popLarge(ir::Type::f8()));
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto next;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2013-12-19 05:03:42 +00:00
|
|
|
case lshl:
|
|
|
|
case lshr:
|
|
|
|
case lushr: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* a = frame->pop(ir::Type::i4());
|
|
|
|
ir::Value* b = frame->popLarge(ir::Type::i8());
|
2014-05-05 21:21:48 +00:00
|
|
|
frame->pushLarge(
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i8(),
|
|
|
|
c->binaryOp(
|
|
|
|
toCompilerBinaryOp(t, instruction), ir::Type::i8(), a, b));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 20:24:14 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->storeLarge(ir::Type::i8(), code->body()[ip++]);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case dstore:
|
2014-06-29 04:57:07 +00:00
|
|
|
frame->storeLarge(ir::Type::f8(), code->body()[ip++]);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::i8(), 0);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case dstore_0:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::f8(), 0);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::i8(), 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case dstore_1:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::f8(), 1);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
case lstore_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::i8(), 2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case dstore_2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::f8(), 2);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
case lstore_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::i8(), 3);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2014-05-02 16:05:19 +00:00
|
|
|
case dstore_3:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::f8(), 3);
|
2014-05-02 16:05:19 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case monitorenter: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* target = frame->pop(ir::Type::object());
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, acquireMonitorForObjectThunk),
|
2014-07-17 00:07:56 +00:00
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), target));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case monitorexit: {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* target = frame->pop(ir::Type::object());
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, releaseMonitorForObjectThunk),
|
2014-07-17 00:07:56 +00:00
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), target));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-29 21:08:29 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case multianewarray: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2014-06-29 04:57:07 +00:00
|
|
|
uint8_t dimensions = code->body()[ip++];
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* class_
|
|
|
|
= resolveClassInPool(t, context->method, index - 1, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = class_;
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = makeMultidimensionalArrayThunk;
|
|
|
|
} else {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = makeMultidimensionalArrayFromReferenceThunk;
|
|
|
|
}
|
2007-10-13 21:48:40 +00:00
|
|
|
|
2009-03-01 22:39:52 +00:00
|
|
|
unsigned offset
|
2014-07-11 15:50:18 +00:00
|
|
|
= localOffset(t,
|
|
|
|
localSize(t, context->method) + c->topOfStack(),
|
|
|
|
context->method) + t->arch->frameReturnAddressSize();
|
2008-11-11 00:07:44 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* result
|
2014-07-12 15:41:52 +00:00
|
|
|
= c->nativeCall(c->constant(getThunk(t, thunk), ir::Type::iptr()),
|
2014-07-17 00:07:56 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::object(),
|
|
|
|
args(c->threadRegister(),
|
|
|
|
frame->append(argument),
|
|
|
|
c->constant(dimensions, ir::Type::i4()),
|
|
|
|
c->constant(offset, ir::Type::i4())));
|
2007-12-23 00:00:35 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->popFootprint(dimensions);
|
|
|
|
frame->push(ir::Type::object(), result);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-12 00:30:46 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case new_: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* class_
|
|
|
|
= resolveClassInPool(t, context->method, index - 1, false);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
object argument;
|
|
|
|
Thunk thunk;
|
|
|
|
if (LIKELY(class_)) {
|
2014-07-02 21:11:27 +00:00
|
|
|
argument = class_;
|
2014-05-29 04:17:25 +00:00
|
|
|
if (class_->vmFlags() & (WeakReferenceFlag | HasFinalizerFlag)) {
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = makeNewGeneral64Thunk;
|
|
|
|
} else {
|
|
|
|
thunk = makeNew64Thunk;
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2014-07-11 15:47:57 +00:00
|
|
|
argument = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
thunk = makeNewFromReferenceThunk;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-17 00:07:56 +00:00
|
|
|
frame->push(
|
|
|
|
ir::Type::object(),
|
|
|
|
c->nativeCall(c->constant(getThunk(t, thunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::object(),
|
|
|
|
args(c->threadRegister(), frame->append(argument))));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case newarray: {
|
2014-06-29 04:57:07 +00:00
|
|
|
uint8_t type = code->body()[ip++];
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* length = frame->pop(ir::Type::i4());
|
2007-10-16 17:21:26 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::object(),
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, makeBlankArrayThunk),
|
2014-07-17 00:07:56 +00:00
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::object(),
|
|
|
|
args(c->threadRegister(),
|
|
|
|
c->constant(type, ir::Type::i4()),
|
|
|
|
length)));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
case nop:
|
|
|
|
break;
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case pop_:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->popFootprint(1);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case pop2:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->popFootprint(2);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case putfield:
|
|
|
|
case putstatic: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object reference
|
|
|
|
= singletonObject(t, context->method->code()->pool(), index - 1);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, reference);
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcField* field = resolveField(t, context->method, index - 1, false);
|
2009-01-10 19:25:52 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (LIKELY(field)) {
|
2014-06-29 04:57:07 +00:00
|
|
|
int fieldCode = field->code();
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
object staticTable = 0;
|
2007-09-27 22:20:54 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (instruction == putstatic) {
|
2012-05-11 19:19:55 +00:00
|
|
|
checkField(t, field, true);
|
2009-02-09 23:22:01 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (classNeedsInit(t, field->class_())) {
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
2007-10-12 22:06:33 +00:00
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-06-01 20:22:14 +00:00
|
|
|
c->constant(getThunk(t, tryInitClassThunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
2014-07-17 00:07:56 +00:00
|
|
|
args(c->threadRegister(), frame->append(field->class_())));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
staticTable = field->class_()->staticTable();
|
2010-03-02 01:24:25 +00:00
|
|
|
} else {
|
2012-05-11 19:19:55 +00:00
|
|
|
checkField(t, field, false);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
if (inTryBlock(t, code, ip - 3)) {
|
|
|
|
c->saveLocals();
|
|
|
|
frame->trace(0, 0);
|
|
|
|
}
|
|
|
|
}
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (field->flags() & ACC_VOLATILE) {
|
2011-08-30 01:00:17 +00:00
|
|
|
if (TargetBytesPerWord == 4
|
2014-07-11 15:50:18 +00:00
|
|
|
and (fieldCode == DoubleField or fieldCode == LongField)) {
|
2011-03-15 23:52:02 +00:00
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, acquireMonitorForObjectThunk),
|
2014-07-17 00:07:56 +00:00
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), frame->append(field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2013-12-18 23:51:20 +00:00
|
|
|
c->nullaryOp(lir::StoreStoreBarrier);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* value = popField(t, frame, fieldCode);
|
2011-05-23 18:35:01 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* table;
|
2011-05-23 18:35:01 +00:00
|
|
|
|
|
|
|
if (instruction == putstatic) {
|
|
|
|
PROTECT(t, field);
|
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
table = frame->append(staticTable);
|
2011-05-23 18:35:01 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
table = frame->pop(ir::Type::object());
|
2011-05-23 18:35:01 +00:00
|
|
|
}
|
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
switch (fieldCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
2014-05-01 03:54:52 +00:00
|
|
|
c->store(
|
|
|
|
value,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(
|
|
|
|
table, ir::Type::i1(), targetFieldOffset(context, field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
2014-05-01 03:54:52 +00:00
|
|
|
c->store(
|
|
|
|
value,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(
|
|
|
|
table, ir::Type::i2(), targetFieldOffset(context, field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2014-05-02 15:01:57 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case FloatField:
|
2014-05-01 03:54:52 +00:00
|
|
|
c->store(
|
|
|
|
value,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(
|
|
|
|
table, ir::Type::f4(), targetFieldOffset(context, field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case IntField:
|
2014-05-01 03:54:52 +00:00
|
|
|
c->store(
|
|
|
|
value,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(
|
|
|
|
table, ir::Type::i4(), targetFieldOffset(context, field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case DoubleField:
|
2014-05-01 03:54:52 +00:00
|
|
|
c->store(
|
|
|
|
value,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(
|
|
|
|
table, ir::Type::f8(), targetFieldOffset(context, field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2009-09-20 21:43:32 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case LongField:
|
2014-05-01 03:54:52 +00:00
|
|
|
c->store(
|
|
|
|
value,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->memory(
|
|
|
|
table, ir::Type::i8(), targetFieldOffset(context, field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2011-03-15 23:52:02 +00:00
|
|
|
case ObjectField:
|
|
|
|
if (instruction == putfield) {
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-06-01 20:22:14 +00:00
|
|
|
c->constant(getThunk(t, setMaybeNullThunk), ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
2014-07-12 15:41:52 +00:00
|
|
|
args(c->threadRegister(),
|
2014-07-17 00:07:56 +00:00
|
|
|
table,
|
|
|
|
c->constant(targetFieldOffset(context, field),
|
|
|
|
ir::Type::i4()),
|
|
|
|
value));
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-06-28 20:41:27 +00:00
|
|
|
c->constant(getThunk(t, setObjectThunk), ir::Type::iptr()),
|
2014-06-01 20:22:14 +00:00
|
|
|
0,
|
|
|
|
0,
|
|
|
|
ir::Type::void_(),
|
2014-07-12 15:41:52 +00:00
|
|
|
args(c->threadRegister(),
|
2014-07-17 00:07:56 +00:00
|
|
|
table,
|
|
|
|
c->constant(targetFieldOffset(context, field),
|
|
|
|
ir::Type::i4()),
|
|
|
|
value));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (field->flags() & ACC_VOLATILE) {
|
2011-08-30 01:00:17 +00:00
|
|
|
if (TargetBytesPerWord == 4
|
2014-07-11 15:50:18 +00:00
|
|
|
and (fieldCode == DoubleField or fieldCode == LongField)) {
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(c->constant(getThunk(t, releaseMonitorForObjectThunk),
|
2014-07-17 00:07:56 +00:00
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
ir::Type::void_(),
|
|
|
|
args(c->threadRegister(), frame->append(field)));
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2013-12-18 23:51:20 +00:00
|
|
|
c->nullaryOp(lir::StoreLoadBarrier);
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcReference* ref = cast<GcReference>(t, reference);
|
|
|
|
PROTECT(t, ref);
|
2014-07-11 15:47:57 +00:00
|
|
|
int fieldCode = vm::fieldCode(t, ref->spec()->body()[0]);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* value = popField(t, frame, fieldCode);
|
2014-05-01 03:54:52 +00:00
|
|
|
ir::Type rType = operandTypeForFieldCode(t, fieldCode);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
GcPair* pair = makePair(t, context->method, reference);
|
2011-03-15 23:52:02 +00:00
|
|
|
|
|
|
|
switch (fieldCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField: {
|
|
|
|
if (instruction == putstatic) {
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-05-01 03:54:52 +00:00
|
|
|
c->constant(getThunk(t, setStaticFieldValueFromReferenceThunk),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()),
|
2014-05-01 03:54:52 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rType,
|
2014-07-17 00:07:56 +00:00
|
|
|
args(c->threadRegister(), frame->append(pair), value));
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* instance = frame->pop(ir::Type::object());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-17 00:07:56 +00:00
|
|
|
c->nativeCall(
|
|
|
|
c->constant(getThunk(t, setFieldValueFromReferenceThunk),
|
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rType,
|
|
|
|
args(
|
|
|
|
c->threadRegister(), frame->append(pair), instance, value));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case DoubleField:
|
|
|
|
case LongField: {
|
|
|
|
if (instruction == putstatic) {
|
2014-07-17 00:07:56 +00:00
|
|
|
c->nativeCall(
|
|
|
|
c->constant(
|
|
|
|
getThunk(t, setStaticLongFieldValueFromReferenceThunk),
|
|
|
|
ir::Type::iptr()),
|
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rType,
|
|
|
|
args(c->threadRegister(), frame->append(pair), nullptr, value));
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* instance = frame->pop(ir::Type::object());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-05-01 03:54:52 +00:00
|
|
|
c->constant(getThunk(t, setLongFieldValueFromReferenceThunk),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()),
|
2014-05-01 03:54:52 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rType,
|
2014-07-12 15:41:52 +00:00
|
|
|
args(c->threadRegister(),
|
2014-07-17 00:07:56 +00:00
|
|
|
frame->append(pair),
|
|
|
|
instance,
|
|
|
|
nullptr,
|
|
|
|
value));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case ObjectField: {
|
|
|
|
if (instruction == putstatic) {
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-05-01 03:54:52 +00:00
|
|
|
c->constant(
|
|
|
|
getThunk(t, setStaticObjectFieldValueFromReferenceThunk),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()),
|
2014-05-01 03:54:52 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rType,
|
2014-07-17 00:07:56 +00:00
|
|
|
args(c->threadRegister(), frame->append(pair), value));
|
2011-03-15 23:52:02 +00:00
|
|
|
} else {
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* instance = frame->pop(ir::Type::object());
|
2011-03-15 23:52:02 +00:00
|
|
|
|
2014-07-12 15:41:52 +00:00
|
|
|
c->nativeCall(
|
2014-05-01 03:54:52 +00:00
|
|
|
c->constant(getThunk(t, setObjectFieldValueFromReferenceThunk),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()),
|
2014-05-01 03:54:52 +00:00
|
|
|
0,
|
|
|
|
frame->trace(0, 0),
|
|
|
|
rType,
|
2014-07-17 00:07:56 +00:00
|
|
|
args(
|
|
|
|
c->threadRegister(), frame->append(pair), instance, value));
|
2011-03-15 23:52:02 +00:00
|
|
|
}
|
|
|
|
} break;
|
2009-03-03 03:18:15 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2009-03-04 01:02:11 +00:00
|
|
|
}
|
2009-03-03 03:18:15 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
case ret: {
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned index = code->body()[ip];
|
2014-05-04 01:09:55 +00:00
|
|
|
|
|
|
|
unsigned returnAddress = frame->endSubroutine(index);
|
|
|
|
c->jmp(frame->machineIpValue(returnAddress));
|
|
|
|
ip = returnAddress;
|
|
|
|
} break;
|
2007-12-26 23:59:55 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case return_:
|
2009-04-26 01:51:33 +00:00
|
|
|
if (needsReturnBarrier(t, context->method)) {
|
2013-12-18 23:51:20 +00:00
|
|
|
c->nullaryOp(lir::StoreStoreBarrier);
|
2009-04-19 22:36:11 +00:00
|
|
|
}
|
2009-04-26 01:51:33 +00:00
|
|
|
|
|
|
|
handleExit(t, frame);
|
2014-05-01 02:01:14 +00:00
|
|
|
c->return_();
|
2012-10-13 15:46:12 +00:00
|
|
|
goto next;
|
2007-10-10 17:26:28 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case sipush:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->push(ir::Type::i4(),
|
2014-05-05 21:21:48 +00:00
|
|
|
c->constant(static_cast<int16_t>(codeReadInt16(t, code, ip)),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4()));
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case swap:
|
|
|
|
frame->swap();
|
|
|
|
break;
|
2007-10-17 01:21:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case tableswitch: {
|
|
|
|
int32_t base = ip - 1;
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
ip = (ip + 3) & ~3; // pad to four byte boundary
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, defaultIp < code->length());
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
int32_t bottom = codeReadInt32(t, code, ip);
|
|
|
|
int32_t top = codeReadInt32(t, code, ip);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Promise* start = 0;
|
2012-10-13 15:46:12 +00:00
|
|
|
unsigned count = top - bottom + 1;
|
2014-07-11 15:50:18 +00:00
|
|
|
uint32_t* ipTable
|
|
|
|
= static_cast<uint32_t*>(stack.push(sizeof(uint32_t) * count));
|
2007-12-16 00:24:15 +00:00
|
|
|
for (int32_t i = 0; i < top - bottom + 1; ++i) {
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned index = ip + (i * 4);
|
|
|
|
uint32_t newIp = base + codeReadInt32(t, code, index);
|
2014-06-29 04:57:07 +00:00
|
|
|
assertT(t, newIp < code->length());
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2012-10-13 19:09:24 +00:00
|
|
|
ipTable[i] = newIp;
|
2007-12-16 00:24:15 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
avian::codegen::Promise* p = c->poolAppendPromise(
|
|
|
|
frame->addressPromise(frame->machineIp(newIp)));
|
2007-12-09 22:45:43 +00:00
|
|
|
if (i == 0) {
|
2011-09-20 22:30:30 +00:00
|
|
|
start = p;
|
2007-09-26 23:23:03 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, start);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Value* key = frame->pop(ir::Type::i4());
|
2014-05-01 03:54:52 +00:00
|
|
|
|
|
|
|
c->condJump(lir::JumpIfLess,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->constant(bottom, ir::Type::i4()),
|
2014-05-01 03:54:52 +00:00
|
|
|
key,
|
2014-05-04 01:09:55 +00:00
|
|
|
frame->machineIpValue(defaultIp));
|
2008-09-20 23:42:46 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->save(ir::Type::i4(), key);
|
2008-11-07 00:39:38 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
new (stack.push(sizeof(SwitchState))) SwitchState(
|
|
|
|
c->saveState(), count, defaultIp, key, start, bottom, top);
|
2007-12-16 00:24:15 +00:00
|
|
|
|
2012-10-13 15:46:12 +00:00
|
|
|
stack.pushValue(Untable0);
|
2008-01-07 14:51:07 +00:00
|
|
|
ip = defaultIp;
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto start;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
case wide: {
|
2014-06-29 04:57:07 +00:00
|
|
|
switch (code->body()[ip++]) {
|
2007-12-09 22:45:43 +00:00
|
|
|
case aload: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::object(), codeReadInt16(t, code, ip));
|
2007-10-04 22:41:19 +00:00
|
|
|
} break;
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case astore: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::object(), codeReadInt16(t, code, ip));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iinc: {
|
|
|
|
uint16_t index = codeReadInt16(t, code, ip);
|
2010-09-23 14:50:09 +00:00
|
|
|
int16_t count = codeReadInt16(t, code, ip);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2014-05-01 03:54:52 +00:00
|
|
|
storeLocal(context,
|
|
|
|
1,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4(),
|
2014-05-01 03:54:52 +00:00
|
|
|
c->binaryOp(lir::Add,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::i4(),
|
|
|
|
c->constant(count, ir::Type::i4()),
|
|
|
|
loadLocal(context, 1, ir::Type::i4(), index)),
|
2014-05-01 03:54:52 +00:00
|
|
|
index);
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case iload: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->load(ir::Type::i4(), codeReadInt16(t, code, ip));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case istore: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->store(ir::Type::i4(), codeReadInt16(t, code, ip));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lload: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->loadLarge(ir::Type::i8(), codeReadInt16(t, code, ip));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case lstore: {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame->storeLarge(ir::Type::i8(), codeReadInt16(t, code, ip));
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-10-12 14:26:36 +00:00
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
case ret: {
|
|
|
|
unsigned index = codeReadInt16(t, code, ip);
|
2014-05-04 01:09:55 +00:00
|
|
|
|
|
|
|
unsigned returnAddress = frame->endSubroutine(index);
|
|
|
|
c->jmp(frame->machineIpValue(returnAddress));
|
|
|
|
ip = returnAddress;
|
|
|
|
} break;
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
} break;
|
2007-12-26 19:19:45 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
2012-10-13 15:46:12 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
next:
|
2012-10-13 15:46:12 +00:00
|
|
|
frame->dispose();
|
|
|
|
frame = 0;
|
|
|
|
stack.pop(sizeof(Frame));
|
2014-05-05 16:49:50 +00:00
|
|
|
stack.pop(stackSize * sizeof(ir::Type));
|
2012-10-13 15:46:12 +00:00
|
|
|
switch (stack.popValue()) {
|
|
|
|
case Return:
|
|
|
|
return;
|
|
|
|
|
|
|
|
case Unbranch:
|
2014-05-04 01:09:55 +00:00
|
|
|
if (DebugInstructions) {
|
|
|
|
fprintf(stderr, "Unbranch\n");
|
|
|
|
}
|
2012-10-13 15:46:12 +00:00
|
|
|
ip = stack.popValue();
|
|
|
|
c->restoreState(reinterpret_cast<Compiler::State*>(stack.popValue()));
|
|
|
|
frame = static_cast<Frame*>(stack.peek(sizeof(Frame)));
|
|
|
|
goto loop;
|
|
|
|
|
|
|
|
case Untable0: {
|
2014-05-04 01:09:55 +00:00
|
|
|
if (DebugInstructions) {
|
|
|
|
fprintf(stderr, "Untable0\n");
|
|
|
|
}
|
2014-07-11 15:50:18 +00:00
|
|
|
SwitchState* s = static_cast<SwitchState*>(stack.peek(sizeof(SwitchState)));
|
2012-10-13 15:46:12 +00:00
|
|
|
|
|
|
|
frame = s->frame();
|
|
|
|
|
|
|
|
c->restoreState(s->state);
|
|
|
|
|
2014-05-01 03:54:52 +00:00
|
|
|
c->condJump(lir::JumpIfGreater,
|
2014-06-01 20:22:14 +00:00
|
|
|
c->constant(s->top, ir::Type::i4()),
|
2014-05-01 03:54:52 +00:00
|
|
|
s->key,
|
2014-05-04 01:09:55 +00:00
|
|
|
frame->machineIpValue(s->defaultIp));
|
2014-05-01 02:20:19 +00:00
|
|
|
|
2014-06-01 20:22:14 +00:00
|
|
|
c->save(ir::Type::i4(), s->key);
|
2012-10-13 15:46:12 +00:00
|
|
|
ip = s->defaultIp;
|
|
|
|
stack.pushValue(Untable1);
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto start;
|
2012-10-13 15:46:12 +00:00
|
|
|
|
|
|
|
case Untable1: {
|
2014-05-04 01:09:55 +00:00
|
|
|
if (DebugInstructions) {
|
|
|
|
fprintf(stderr, "Untable1\n");
|
|
|
|
}
|
2014-07-11 15:50:18 +00:00
|
|
|
SwitchState* s = static_cast<SwitchState*>(stack.peek(sizeof(SwitchState)));
|
2012-10-13 15:46:12 +00:00
|
|
|
|
|
|
|
frame = s->frame();
|
|
|
|
|
|
|
|
c->restoreState(s->state);
|
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* normalizedKey
|
2014-06-01 20:22:14 +00:00
|
|
|
= (s->bottom
|
|
|
|
? c->binaryOp(lir::Subtract,
|
|
|
|
ir::Type::i4(),
|
|
|
|
c->constant(s->bottom, ir::Type::i4()),
|
|
|
|
s->key)
|
|
|
|
: s->key);
|
2014-05-01 03:54:52 +00:00
|
|
|
|
2014-05-01 18:44:42 +00:00
|
|
|
ir::Value* entry = c->memory(frame->absoluteAddressOperand(s->start),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-05-01 18:44:42 +00:00
|
|
|
0,
|
|
|
|
normalizedKey);
|
2014-05-01 03:54:52 +00:00
|
|
|
|
2014-07-12 17:54:19 +00:00
|
|
|
c->jmp(c->load(ir::ExtendMode::Signed,
|
2014-05-01 03:54:52 +00:00
|
|
|
context->bootContext
|
|
|
|
? c->binaryOp(lir::Add,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-05-01 04:27:19 +00:00
|
|
|
c->memory(c->threadRegister(),
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr(),
|
2014-05-01 03:54:52 +00:00
|
|
|
TARGET_THREAD_CODEIMAGE),
|
|
|
|
entry)
|
|
|
|
: entry,
|
2014-06-01 20:22:14 +00:00
|
|
|
ir::Type::iptr()));
|
2012-10-13 15:46:12 +00:00
|
|
|
|
|
|
|
s->state = c->saveState();
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto switchloop;
|
2012-10-13 15:46:12 +00:00
|
|
|
|
|
|
|
case Unswitch: {
|
2014-05-04 01:09:55 +00:00
|
|
|
if (DebugInstructions) {
|
|
|
|
fprintf(stderr, "Unswitch\n");
|
|
|
|
}
|
2014-07-11 15:50:18 +00:00
|
|
|
SwitchState* s = static_cast<SwitchState*>(stack.peek(sizeof(SwitchState)));
|
2014-05-01 06:13:39 +00:00
|
|
|
|
2012-10-13 15:46:12 +00:00
|
|
|
frame = s->frame();
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
c->restoreState(
|
|
|
|
static_cast<SwitchState*>(stack.peek(sizeof(SwitchState)))->state);
|
|
|
|
}
|
|
|
|
goto switchloop;
|
2012-10-13 15:46:12 +00:00
|
|
|
|
|
|
|
case Unsubroutine: {
|
2014-05-04 01:09:55 +00:00
|
|
|
if (DebugInstructions) {
|
|
|
|
fprintf(stderr, "Unsubroutine\n");
|
|
|
|
}
|
2012-10-13 15:46:12 +00:00
|
|
|
ip = stack.popValue();
|
|
|
|
unsigned start = stack.popValue();
|
|
|
|
frame = reinterpret_cast<Frame*>(stack.peek(sizeof(Frame)));
|
|
|
|
frame->endSubroutine(start);
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
|
|
|
goto loop;
|
2012-10-13 15:46:12 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
switchloop : {
|
|
|
|
SwitchState* s = static_cast<SwitchState*>(stack.peek(sizeof(SwitchState)));
|
2012-10-13 15:46:12 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (s->index < s->count) {
|
|
|
|
ip = s->ipTable()[s->index++];
|
|
|
|
stack.pushValue(Unswitch);
|
|
|
|
goto start;
|
|
|
|
} else {
|
|
|
|
ip = s->defaultIp;
|
|
|
|
unsigned count = s->count * 4;
|
|
|
|
stack.pop(sizeof(SwitchState));
|
|
|
|
stack.pop(count);
|
|
|
|
frame = reinterpret_cast<Frame*>(stack.peek(sizeof(Frame)));
|
|
|
|
goto loop;
|
2012-10-13 15:46:12 +00:00
|
|
|
}
|
2014-07-11 15:50:18 +00:00
|
|
|
}
|
2012-10-13 15:46:12 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
branch:
|
2012-10-13 15:46:12 +00:00
|
|
|
stack.pushValue(reinterpret_cast<uintptr_t>(c->saveState()));
|
|
|
|
stack.pushValue(ip);
|
|
|
|
stack.pushValue(Unbranch);
|
|
|
|
ip = newIp;
|
|
|
|
goto start;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
int resolveIpForwards(Context* context, int start, int end)
|
2011-03-26 00:55:25 +00:00
|
|
|
{
|
2012-03-11 11:00:08 +00:00
|
|
|
if (start < 0) {
|
|
|
|
start = 0;
|
|
|
|
}
|
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
while (start < end and context->visitTable[start] == 0) {
|
2014-07-11 15:50:18 +00:00
|
|
|
++start;
|
2011-04-09 00:50:22 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (start >= end) {
|
|
|
|
return -1;
|
2011-03-27 01:55:23 +00:00
|
|
|
} else {
|
2011-04-09 00:50:22 +00:00
|
|
|
return start;
|
|
|
|
}
|
|
|
|
}
|
2011-03-26 00:55:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
int resolveIpBackwards(Context* context, int start, int end)
|
2011-04-09 00:50:22 +00:00
|
|
|
{
|
2014-06-28 04:00:05 +00:00
|
|
|
if (start >= static_cast<int>(context->method->code()->length()
|
2014-05-04 01:09:55 +00:00
|
|
|
* (context->subroutineCount + 1))) {
|
2014-06-28 04:00:05 +00:00
|
|
|
start = context->method->code()->length();
|
2013-04-23 03:25:15 +00:00
|
|
|
} else {
|
|
|
|
while (start >= end and context->visitTable[start] == 0) {
|
2014-07-11 15:50:18 +00:00
|
|
|
--start;
|
2013-04-23 03:25:15 +00:00
|
|
|
}
|
2011-04-09 00:50:22 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (start < end) {
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return start;
|
2011-03-26 00:55:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcIntArray* truncateIntArray(Thread* t, GcIntArray* array, unsigned length)
|
2011-04-09 00:50:22 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
expect(t, array->length() > length);
|
2011-04-09 00:50:22 +00:00
|
|
|
|
|
|
|
PROTECT(t, array);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcIntArray* newArray = makeIntArray(t, length);
|
2012-03-17 18:57:59 +00:00
|
|
|
if (length) {
|
2014-07-11 15:47:57 +00:00
|
|
|
memcpy(newArray->body().begin(), array->body().begin(), length * 4);
|
2012-03-17 18:57:59 +00:00
|
|
|
}
|
2011-04-09 00:50:22 +00:00
|
|
|
|
|
|
|
return newArray;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcArray* truncateArray(Thread* t, GcArray* array, unsigned length)
|
2011-04-09 00:50:22 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
expect(t, array->length() > length);
|
2011-04-09 00:50:22 +00:00
|
|
|
|
|
|
|
PROTECT(t, array);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcArray* newArray = makeArray(t, length);
|
2012-03-17 18:57:59 +00:00
|
|
|
if (length) {
|
2014-07-11 15:47:57 +00:00
|
|
|
for (size_t i = 0; i < length; i++) {
|
2014-06-25 20:38:13 +00:00
|
|
|
newArray->setBodyElement(t, i, array->body()[i]);
|
|
|
|
}
|
2012-03-17 18:57:59 +00:00
|
|
|
}
|
2011-04-09 00:50:22 +00:00
|
|
|
|
|
|
|
return newArray;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcLineNumberTable* truncateLineNumberTable(Thread* t,
|
|
|
|
GcLineNumberTable* table,
|
|
|
|
unsigned length)
|
2011-04-09 00:50:22 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
expect(t, table->length() > length);
|
2011-04-09 00:50:22 +00:00
|
|
|
|
|
|
|
PROTECT(t, table);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcLineNumberTable* newTable = makeLineNumberTable(t, length);
|
2012-03-17 18:57:59 +00:00
|
|
|
if (length) {
|
2014-06-29 04:57:07 +00:00
|
|
|
memcpy(newTable->body().begin(),
|
|
|
|
table->body().begin(),
|
2012-03-17 18:57:59 +00:00
|
|
|
length * sizeof(uint64_t));
|
|
|
|
}
|
2011-04-09 00:50:22 +00:00
|
|
|
|
|
|
|
return newTable;
|
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcArray* translateExceptionHandlerTable(MyThread* t,
|
2014-07-11 15:47:57 +00:00
|
|
|
Context* context,
|
|
|
|
intptr_t start,
|
|
|
|
intptr_t end)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = context->compiler;
|
2011-03-27 01:55:23 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcExceptionHandlerTable* oldTable = cast<GcExceptionHandlerTable>(
|
|
|
|
t, context->method->code()->exceptionHandlerTable());
|
2009-08-10 13:56:16 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
if (oldTable) {
|
|
|
|
PROTECT(t, oldTable);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned length = oldTable->length();
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcIntArray* newIndex
|
|
|
|
= makeIntArray(t, length * (context->subroutineCount + 1) * 3);
|
2008-04-11 19:03:40 +00:00
|
|
|
PROTECT(t, newIndex);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcArray* newTable
|
|
|
|
= makeArray(t, length * (context->subroutineCount + 1) + 1);
|
2008-04-24 22:06:36 +00:00
|
|
|
PROTECT(t, newTable);
|
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
unsigned ni = 0;
|
2014-05-04 01:09:55 +00:00
|
|
|
for (unsigned subI = 0; subI <= context->subroutineCount; ++subI) {
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned duplicatedBaseIp = subI * context->method->code()->length();
|
2014-05-04 01:09:55 +00:00
|
|
|
|
|
|
|
for (unsigned oi = 0; oi < length; ++oi) {
|
2014-06-29 04:57:07 +00:00
|
|
|
uint64_t oldHandler = oldTable->body()[oi];
|
2014-05-04 01:09:55 +00:00
|
|
|
|
|
|
|
int handlerStart = resolveIpForwards(
|
|
|
|
context,
|
|
|
|
duplicatedBaseIp + exceptionHandlerStart(oldHandler),
|
|
|
|
duplicatedBaseIp + exceptionHandlerEnd(oldHandler));
|
|
|
|
|
|
|
|
if (LIKELY(handlerStart >= 0)) {
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(t,
|
|
|
|
handlerStart
|
|
|
|
< static_cast<int>(context->method->code()->length()
|
|
|
|
* (context->subroutineCount + 1)));
|
2014-05-04 01:09:55 +00:00
|
|
|
|
|
|
|
int handlerEnd = resolveIpBackwards(
|
|
|
|
context,
|
|
|
|
duplicatedBaseIp + exceptionHandlerEnd(oldHandler),
|
|
|
|
duplicatedBaseIp + exceptionHandlerStart(oldHandler));
|
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, handlerEnd >= 0);
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(
|
|
|
|
t,
|
|
|
|
handlerEnd <= static_cast<int>(context->method->code()->length()
|
|
|
|
* (context->subroutineCount + 1)));
|
2014-05-04 01:09:55 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
newIndex->body()[ni * 3] = c->machineIp(handlerStart)->value()
|
|
|
|
- start;
|
2014-05-04 01:09:55 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
newIndex->body()[(ni * 3) + 1]
|
|
|
|
= (handlerEnd
|
|
|
|
== static_cast<int>(context->method->code()->length())
|
2014-05-04 01:09:55 +00:00
|
|
|
? end
|
|
|
|
: c->machineIp(handlerEnd)->value()) - start;
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
newIndex->body()[(ni * 3) + 2]
|
2014-05-04 01:09:55 +00:00
|
|
|
= c->machineIp(exceptionHandlerIp(oldHandler))->value() - start;
|
|
|
|
|
|
|
|
object type;
|
|
|
|
if (exceptionHandlerCatchType(oldHandler)) {
|
2014-07-02 21:11:27 +00:00
|
|
|
type = resolveClassInPool(
|
|
|
|
t, context->method, exceptionHandlerCatchType(oldHandler) - 1);
|
2014-05-04 01:09:55 +00:00
|
|
|
} else {
|
|
|
|
type = 0;
|
|
|
|
}
|
2011-04-09 00:50:22 +00:00
|
|
|
|
2014-06-25 20:38:13 +00:00
|
|
|
newTable->setBodyElement(t, ni + 1, type);
|
2011-04-09 00:50:22 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
++ni;
|
2011-04-09 00:50:22 +00:00
|
|
|
}
|
2008-04-24 22:06:36 +00:00
|
|
|
}
|
2011-04-09 00:50:22 +00:00
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (UNLIKELY(ni < length)) {
|
|
|
|
newIndex = truncateIntArray(t, newIndex, ni * 3);
|
|
|
|
newTable = truncateArray(t, newTable, ni + 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
newTable->setBodyElement(t, 0, newIndex);
|
2011-04-09 00:50:22 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
return newTable;
|
|
|
|
} else {
|
|
|
|
return 0;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcLineNumberTable* translateLineNumberTable(MyThread* t,
|
|
|
|
Context* context,
|
|
|
|
intptr_t start)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcLineNumberTable* oldTable = context->method->code()->lineNumberTable();
|
2008-01-07 14:51:07 +00:00
|
|
|
if (oldTable) {
|
|
|
|
PROTECT(t, oldTable);
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned length = oldTable->length();
|
|
|
|
GcLineNumberTable* newTable = makeLineNumberTable(t, length);
|
2011-04-09 00:50:22 +00:00
|
|
|
unsigned ni = 0;
|
|
|
|
for (unsigned oi = 0; oi < length; ++oi) {
|
2014-06-29 04:57:07 +00:00
|
|
|
uint64_t oldLine = oldTable->body()[oi];
|
2011-04-09 00:50:22 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int ip = resolveIpForwards(
|
|
|
|
context,
|
|
|
|
lineNumberIp(oldLine),
|
|
|
|
oi + 1 < length ? lineNumberIp(oldTable->body()[oi + 1]) - 1
|
|
|
|
: lineNumberIp(oldLine) + 1);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (LIKELY(ip >= 0)) {
|
2014-07-11 15:47:57 +00:00
|
|
|
newTable->body()[ni++]
|
|
|
|
= lineNumber(context->compiler->machineIp(ip)->value() - start,
|
|
|
|
lineNumberLine(oldLine));
|
2011-04-09 00:50:22 +00:00
|
|
|
}
|
|
|
|
}
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
if (UNLIKELY(ni < length)) {
|
2014-05-04 01:09:55 +00:00
|
|
|
newTable = truncateLineNumberTable(t, newTable, ni);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
return newTable;
|
|
|
|
} else {
|
|
|
|
return 0;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void printSet(uintptr_t* m, unsigned limit)
|
2008-01-07 16:01:35 +00:00
|
|
|
{
|
2008-11-25 23:01:30 +00:00
|
|
|
if (limit) {
|
2012-12-12 23:04:26 +00:00
|
|
|
for (unsigned i = 0; i < 32; ++i) {
|
|
|
|
if ((*m >> i) & 1) {
|
2008-11-25 23:01:30 +00:00
|
|
|
fprintf(stderr, "1");
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "_");
|
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void calculateTryCatchRoots(Context* context,
|
|
|
|
uintptr_t* roots,
|
|
|
|
unsigned mapSize,
|
|
|
|
unsigned start,
|
|
|
|
unsigned end)
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
{
|
|
|
|
memset(roots, 0xFF, mapSize * BytesPerWord);
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
2014-05-04 01:09:55 +00:00
|
|
|
fprintf(stderr, "calculate try/catch roots from %d to %d\n", start, end);
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (TraceElement* te = context->traceLog; te; te = te->next) {
|
|
|
|
if (te->ip >= start and te->ip < end) {
|
|
|
|
uintptr_t* traceRoots = 0;
|
2014-05-04 01:09:55 +00:00
|
|
|
traceRoots = te->map;
|
|
|
|
te->watch = true;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
|
|
|
|
if (traceRoots) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, " use roots at ip %3d: ", te->ip);
|
2012-12-12 23:04:26 +00:00
|
|
|
printSet(traceRoots, mapSize);
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
|
|
|
roots[wi] &= traceRoots[wi];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, " skip roots at ip %3d\n", te->ip);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "result roots : ");
|
2012-12-12 23:04:26 +00:00
|
|
|
printSet(roots, mapSize);
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
unsigned calculateFrameMaps(MyThread* t,
|
|
|
|
Context* context,
|
|
|
|
uintptr_t* originalRoots,
|
|
|
|
unsigned eventIndex,
|
|
|
|
uintptr_t* resultRoots)
|
2008-01-07 14:51:07 +00:00
|
|
|
{
|
2008-01-20 23:03:28 +00:00
|
|
|
// for each instruction with more than one predecessor, and for each
|
|
|
|
// stack position, determine if there exists a path to that
|
|
|
|
// instruction such that there is not an object pointer left at that
|
|
|
|
// stack position (i.e. it is uninitialized or contains primitive
|
|
|
|
// data).
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
unsigned mapSize = frameMapSizeInWords(t, context->method);
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, roots, mapSize);
|
2008-01-20 23:03:28 +00:00
|
|
|
if (originalRoots) {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(RUNTIME_ARRAY_BODY(roots), originalRoots, mapSize * BytesPerWord);
|
2008-01-20 23:03:28 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
memset(RUNTIME_ARRAY_BODY(roots), 0, mapSize * BytesPerWord);
|
2008-01-20 23:03:28 +00:00
|
|
|
}
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
int32_t ip = -1;
|
|
|
|
|
2008-01-08 17:10:24 +00:00
|
|
|
// invariant: for each stack position, roots contains a zero at that
|
|
|
|
// position if there exists some path to the current instruction
|
|
|
|
// such that there is definitely not an object pointer at that
|
|
|
|
// position. Otherwise, roots contains a one at that position,
|
|
|
|
// meaning either all known paths result in an object pointer at
|
|
|
|
// that position, or the contents of that position are as yet
|
|
|
|
// unknown.
|
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
unsigned length = context->eventLog.length();
|
|
|
|
while (eventIndex < length) {
|
2008-01-20 23:03:28 +00:00
|
|
|
Event e = static_cast<Event>(context->eventLog.get(eventIndex++));
|
2008-01-07 14:51:07 +00:00
|
|
|
switch (e) {
|
2008-07-05 20:21:13 +00:00
|
|
|
case PushContextEvent: {
|
2014-05-04 01:09:55 +00:00
|
|
|
eventIndex = calculateFrameMaps(t,
|
|
|
|
context,
|
|
|
|
RUNTIME_ARRAY_BODY(roots),
|
|
|
|
eventIndex, /*subroutinePath,*/
|
|
|
|
resultRoots);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2008-07-05 20:21:13 +00:00
|
|
|
case PopContextEvent:
|
2011-02-16 21:29:57 +00:00
|
|
|
goto exit;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
|
|
|
case IpEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
ip = context->eventLog.get2(eventIndex);
|
2008-03-05 21:44:17 +00:00
|
|
|
eventIndex += 2;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2008-01-07 21:32:41 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " roots at ip %3d: ", ip);
|
2012-12-12 23:04:26 +00:00
|
|
|
printSet(RUNTIME_ARRAY_BODY(roots), mapSize);
|
2008-01-07 21:32:41 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(context->thread, ip * mapSize <= context->rootTable.count);
|
2014-05-04 01:09:55 +00:00
|
|
|
uintptr_t* tableRoots = context->rootTable.begin() + (ip * mapSize);
|
2008-01-20 23:03:28 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
if (context->visitTable[ip] > 1) {
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
2009-08-27 00:26:44 +00:00
|
|
|
uintptr_t newRoots = tableRoots[wi] & RUNTIME_ARRAY_BODY(roots)[wi];
|
2008-03-05 21:44:17 +00:00
|
|
|
|
|
|
|
if ((eventIndex == length
|
2008-07-05 20:21:13 +00:00
|
|
|
or context->eventLog.get(eventIndex) == PopContextEvent)
|
2014-07-11 15:50:18 +00:00
|
|
|
and newRoots != tableRoots[wi]) {
|
2008-03-05 21:44:17 +00:00
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "dirty roots!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
context->dirtyRoots = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tableRoots[wi] = newRoots;
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(roots)[wi] &= tableRoots[wi];
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 16:01:35 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " table roots at ip %3d: ", ip);
|
2012-12-12 23:04:26 +00:00
|
|
|
printSet(tableRoots, mapSize);
|
2008-01-07 16:01:35 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
2008-01-20 23:03:28 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(tableRoots, RUNTIME_ARRAY_BODY(roots), mapSize * BytesPerWord);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
} break;
|
|
|
|
|
|
|
|
case MarkEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
unsigned i = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
markBit(RUNTIME_ARRAY_BODY(roots), i);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case ClearEvent: {
|
2008-01-20 23:03:28 +00:00
|
|
|
unsigned i = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
2008-03-05 21:44:17 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
clearBit(RUNTIME_ARRAY_BODY(roots), i);
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2009-07-13 23:49:15 +00:00
|
|
|
case PushExceptionHandlerEvent: {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
unsigned start = context->eventLog.get2(eventIndex);
|
|
|
|
eventIndex += 2;
|
|
|
|
unsigned end = context->eventLog.get2(eventIndex);
|
2009-06-16 19:41:31 +00:00
|
|
|
eventIndex += 2;
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
calculateTryCatchRoots(
|
|
|
|
context, RUNTIME_ARRAY_BODY(roots), mapSize, start, end);
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
eventIndex = calculateFrameMaps(
|
|
|
|
t, context, RUNTIME_ARRAY_BODY(roots), eventIndex, 0);
|
2009-06-16 19:41:31 +00:00
|
|
|
} break;
|
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
case TraceEvent: {
|
2014-07-11 15:50:18 +00:00
|
|
|
TraceElement* te;
|
|
|
|
context->eventLog.get(eventIndex, &te, BytesPerWord);
|
2008-11-09 23:56:37 +00:00
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " trace roots at ip %3d: ", ip);
|
2012-12-12 23:04:26 +00:00
|
|
|
printSet(RUNTIME_ARRAY_BODY(roots), mapSize);
|
2008-11-09 23:56:37 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
uintptr_t* map;
|
|
|
|
bool watch;
|
2014-05-04 01:09:55 +00:00
|
|
|
map = te->map;
|
|
|
|
watch = te->watch;
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
|
|
|
|
for (unsigned wi = 0; wi < mapSize; ++wi) {
|
|
|
|
uintptr_t v = RUNTIME_ARRAY_BODY(roots)[wi];
|
|
|
|
|
|
|
|
if (watch and map[wi] != v) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "dirty roots due to trace watch!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
context->dirtyRoots = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
map[wi] = v;
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2008-01-20 23:03:28 +00:00
|
|
|
eventIndex += BytesPerWord;
|
2008-01-07 14:51:07 +00:00
|
|
|
} break;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
}
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
exit:
|
2011-02-16 21:29:57 +00:00
|
|
|
if (resultRoots and ip != -1) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "result roots at ip %3d: ", ip);
|
2012-12-12 23:04:26 +00:00
|
|
|
printSet(RUNTIME_ARRAY_BODY(roots), mapSize);
|
2011-02-16 21:29:57 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(resultRoots, RUNTIME_ARRAY_BODY(roots), mapSize * BytesPerWord);
|
|
|
|
}
|
|
|
|
|
2008-01-20 23:03:28 +00:00
|
|
|
return eventIndex;
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
int compareTraceElementPointers(const void* va, const void* vb)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
|
|
|
TraceElement* a = *static_cast<TraceElement* const*>(va);
|
|
|
|
TraceElement* b = *static_cast<TraceElement* const*>(vb);
|
2008-04-13 19:48:20 +00:00
|
|
|
if (a->address->value() > b->address->value()) {
|
2008-04-07 23:47:41 +00:00
|
|
|
return 1;
|
2008-04-13 19:48:20 +00:00
|
|
|
} else if (a->address->value() < b->address->value()) {
|
2008-04-07 23:47:41 +00:00
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint8_t* finish(MyThread* t,
|
|
|
|
FixedAllocator* allocator,
|
|
|
|
avian::codegen::Assembler* a,
|
|
|
|
const char* name,
|
|
|
|
unsigned length)
|
2008-02-11 17:21:41 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
uint8_t* start
|
|
|
|
= static_cast<uint8_t*>(allocator->allocate(length, TargetBytesPerWord));
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2011-02-28 06:03:13 +00:00
|
|
|
a->setDestination(start);
|
|
|
|
a->write();
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
logCompile(t, start, length, 0, name, 0);
|
2007-10-04 22:41:19 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
return start;
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2007-10-12 14:26:36 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void setBit(int32_t* dst, unsigned index)
|
2008-11-09 23:56:37 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
dst[index / 32] |= static_cast<int32_t>(1) << (index % 32);
|
2008-11-09 23:56:37 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void clearBit(int32_t* dst, unsigned index)
|
2008-11-09 23:56:37 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
dst[index / 32] &= ~(static_cast<int32_t>(1) << (index % 32));
|
|
|
|
}
|
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
void copyFrameMap(int32_t* dst,
|
|
|
|
uintptr_t* src,
|
|
|
|
unsigned mapSizeInBits,
|
|
|
|
unsigned offset,
|
2014-06-01 16:04:56 +00:00
|
|
|
TraceElement* p)
|
2009-06-26 21:36:04 +00:00
|
|
|
{
|
|
|
|
if (DebugFrameMaps) {
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " orig roots at ip %3d: ", p->ip);
|
2013-02-11 01:06:15 +00:00
|
|
|
printSet(src, ceilingDivide(mapSizeInBits, BitsPerWord));
|
2009-06-26 21:36:04 +00:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
|
fix stack frame mapping code for exception handlers
Previously, the stack frame mapping code (responsible for statically
calculating the map of GC roots for a method's stack frame during JIT
compilation) would assume that the map of GC roots on entry to an
exception handler is the same as on entry to the "try" block which the
handler is attached to. Technically, this is true, but the algorithm
we use does not consider whether a local variable is still "live"
(i.e. will be read later) when calculating the map - only whether we
can expect to find a reference there via normal (non-exceptional)
control flow. This can backfire if, within a "try" block, the stack
location which held an object reference on entry to the block gets
overwritten with a non-reference (i.e. a primitive). If an exception
is later thrown from such a block, we might end up trying to treat
that non-reference as a reference during GC, which will crash the VM.
The ideal way to fix this is to calculate the true interval for which
each value is live and use that to produce the stack frame maps. This
would provide the added benefit of ensuring that the garbage collector
does not visit references which, although still present on the stack,
will not be used again.
However, this commit uses the less invasive strategy of ANDing
together the root maps at each GC point within a "try" block and using
the result as the map on entry to the corresponding exception
handler(s). This should give us safe, if not optimal, results. Later
on, we can refine it as described above.
2010-02-05 01:03:32 +00:00
|
|
|
fprintf(stderr, " final roots at ip %3d: ", p->ip);
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < p->argumentIndex; ++j) {
|
|
|
|
if (getBit(src, j)) {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "1");
|
|
|
|
}
|
|
|
|
setBit(dst, offset + j);
|
|
|
|
} else {
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "_");
|
|
|
|
}
|
|
|
|
clearBit(dst, offset + j);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DebugFrameMaps) {
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class FrameMapTableHeader {
|
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
FrameMapTableHeader(unsigned indexCount) : indexCount(indexCount)
|
|
|
|
{
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
unsigned indexCount;
|
|
|
|
};
|
|
|
|
|
|
|
|
class FrameMapTableIndexElement {
|
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
FrameMapTableIndexElement(int offset, unsigned base, unsigned path)
|
|
|
|
: offset(offset), base(base), path(path)
|
|
|
|
{
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
int offset;
|
|
|
|
unsigned base;
|
|
|
|
unsigned path;
|
|
|
|
};
|
|
|
|
|
|
|
|
class FrameMapTablePath {
|
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
FrameMapTablePath(unsigned stackIndex, unsigned elementCount, unsigned next)
|
|
|
|
: stackIndex(stackIndex), elementCount(elementCount), next(next)
|
|
|
|
{
|
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
|
|
|
|
unsigned stackIndex;
|
|
|
|
unsigned elementCount;
|
|
|
|
unsigned next;
|
|
|
|
int32_t elements[0];
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcIntArray* makeSimpleFrameMapTable(MyThread* t,
|
|
|
|
Context* context,
|
|
|
|
uint8_t* start,
|
|
|
|
TraceElement** elements,
|
|
|
|
unsigned elementCount)
|
2009-06-26 21:36:04 +00:00
|
|
|
{
|
|
|
|
unsigned mapSize = frameMapSizeInBits(t, context->method);
|
2014-07-11 15:47:57 +00:00
|
|
|
GcIntArray* table = makeIntArray(
|
|
|
|
t, elementCount + ceilingDivide(elementCount * mapSize, 32));
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(t,
|
|
|
|
table->length()
|
|
|
|
== elementCount + simpleFrameMapTableSize(t, context->method, table));
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
for (unsigned i = 0; i < elementCount; ++i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
TraceElement* p = elements[i];
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
table->body()[i] = static_cast<intptr_t>(p->address->value())
|
2014-07-11 15:47:57 +00:00
|
|
|
- reinterpret_cast<intptr_t>(start);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(
|
|
|
|
t,
|
|
|
|
elementCount + ceilingDivide((i + 1) * mapSize, 32) <= table->length());
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2009-06-30 23:35:28 +00:00
|
|
|
if (mapSize) {
|
2014-07-11 15:47:57 +00:00
|
|
|
copyFrameMap(
|
|
|
|
&table->body()[elementCount], p->map, mapSize, i * mapSize, p);
|
2009-06-30 23:35:28 +00:00
|
|
|
}
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return table;
|
2008-11-09 23:56:37 +00:00
|
|
|
}
|
2009-10-20 14:20:49 +00:00
|
|
|
|
2014-09-22 17:10:35 +00:00
|
|
|
void insertCallNode(MyThread* t, GcCallNode* node);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void finish(MyThread* t, FixedAllocator* allocator, Context* context)
|
2008-02-11 17:21:41 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = context->compiler;
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2009-10-24 23:18:56 +00:00
|
|
|
if (false) {
|
2014-07-11 15:47:57 +00:00
|
|
|
logCompile(
|
|
|
|
t,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
reinterpret_cast<const char*>(
|
|
|
|
context->method->class_()->name()->body().begin()),
|
|
|
|
reinterpret_cast<const char*>(context->method->name()->body().begin()),
|
|
|
|
reinterpret_cast<const char*>(context->method->spec()->body().begin()));
|
2009-10-24 23:18:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// for debugging:
|
2014-07-11 15:47:57 +00:00
|
|
|
if (false
|
|
|
|
and ::strcmp(reinterpret_cast<const char*>(
|
|
|
|
context->method->class_()->name()->body().begin()),
|
|
|
|
"java/lang/System") == 0
|
|
|
|
and ::strcmp(reinterpret_cast<const char*>(
|
|
|
|
context->method->name()->body().begin()),
|
|
|
|
"<clinit>") == 0) {
|
2009-10-24 23:18:56 +00:00
|
|
|
trap();
|
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
// todo: this is a CPU-intensive operation, so consider doing it
|
|
|
|
// earlier before we've acquired the global class lock to improve
|
|
|
|
// parallelism (the downside being that it may end up being a waste
|
|
|
|
// of cycles if another thread compiles the same method in parallel,
|
|
|
|
// which might be mitigated by fine-grained, per-method locking):
|
2011-02-28 06:03:13 +00:00
|
|
|
c->compile(context->leaf ? 0 : stackOverflowThunk(t),
|
2012-06-20 19:14:16 +00:00
|
|
|
TARGET_THREAD_STACKLIMIT);
|
2011-02-28 06:03:13 +00:00
|
|
|
|
|
|
|
// we must acquire the class lock here at the latest
|
2014-02-25 22:46:35 +00:00
|
|
|
|
|
|
|
unsigned codeSize = c->resolve(allocator->memory.begin() + allocator->offset);
|
2010-09-17 01:43:27 +00:00
|
|
|
|
2011-09-24 05:25:52 +00:00
|
|
|
unsigned total = pad(codeSize, TargetBytesPerWord)
|
2014-07-11 15:50:18 +00:00
|
|
|
+ pad(c->poolSize(), TargetBytesPerWord);
|
2010-12-27 22:55:23 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
target_uintptr_t* code = static_cast<target_uintptr_t*>(
|
|
|
|
allocator->allocate(total, TargetBytesPerWord));
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
uint8_t* start = reinterpret_cast<uint8_t*>(code);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
context->executableAllocator = allocator;
|
|
|
|
context->executableStart = code;
|
|
|
|
context->executableSize = total;
|
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
if (context->objectPool) {
|
2014-07-11 15:47:57 +00:00
|
|
|
object pool = allocate3(
|
|
|
|
t,
|
|
|
|
allocator,
|
|
|
|
Machine::ImmortalAllocation,
|
|
|
|
GcArray::FixedSize + ((context->objectPoolCount + 1) * BytesPerWord),
|
|
|
|
true);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2014-02-25 22:46:35 +00:00
|
|
|
context->executableSize = (allocator->memory.begin() + allocator->offset)
|
|
|
|
- static_cast<uint8_t*>(context->executableStart);
|
2013-04-10 00:43:46 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
initArray(
|
|
|
|
t, reinterpret_cast<GcArray*>(pool), context->objectPoolCount + 1);
|
2008-11-29 01:23:01 +00:00
|
|
|
mark(t, pool, 0);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2014-06-26 02:17:27 +00:00
|
|
|
setField(t, pool, ArrayBody, compileRoots(t)->objectPools());
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setObjectPools(t, pool);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
|
|
|
unsigned i = 1;
|
|
|
|
for (PoolElement* p = context->objectPool; p; p = p->next) {
|
2011-09-01 03:18:00 +00:00
|
|
|
unsigned offset = ArrayBody + ((i++) * BytesPerWord);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-11-23 23:58:01 +00:00
|
|
|
p->address = reinterpret_cast<uintptr_t>(pool) + offset;
|
|
|
|
|
2014-06-26 02:17:27 +00:00
|
|
|
setField(t, pool, offset, p->target);
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
}
|
2008-04-11 19:03:40 +00:00
|
|
|
|
2011-02-28 06:03:13 +00:00
|
|
|
c->write();
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-12-02 16:45:20 +00:00
|
|
|
BootContext* bc = context->bootContext;
|
|
|
|
if (bc) {
|
2013-02-11 15:07:46 +00:00
|
|
|
for (avian::codegen::DelayedPromise* p = bc->addresses;
|
2008-12-02 16:45:20 +00:00
|
|
|
p != bc->addressSentinal;
|
2014-07-11 15:50:18 +00:00
|
|
|
p = p->next) {
|
|
|
|
p->basis = new (bc->zone)
|
|
|
|
avian::codegen::ResolvedPromise(p->basis->value());
|
2008-12-02 16:45:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
{
|
|
|
|
GcArray* newExceptionHandlerTable = translateExceptionHandlerTable(
|
|
|
|
t,
|
|
|
|
context,
|
|
|
|
reinterpret_cast<intptr_t>(start),
|
|
|
|
reinterpret_cast<intptr_t>(start) + codeSize);
|
2010-12-27 22:55:23 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
PROTECT(t, newExceptionHandlerTable);
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcLineNumberTable* newLineNumberTable = translateLineNumberTable(
|
|
|
|
t, context, reinterpret_cast<intptr_t>(start));
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcCode* code = context->method->code();
|
2008-04-11 21:00:18 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
code = makeCode(t,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
newExceptionHandlerTable,
|
|
|
|
newLineNumberTable,
|
|
|
|
reinterpret_cast<uintptr_t>(start),
|
|
|
|
codeSize,
|
|
|
|
code->maxStack(),
|
|
|
|
code->maxLocals(),
|
|
|
|
0);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2014-06-25 20:38:13 +00:00
|
|
|
context->method->setCode(t, code);
|
2008-02-11 17:21:41 +00:00
|
|
|
}
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2008-04-13 19:48:20 +00:00
|
|
|
if (context->traceLogCount) {
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, TraceElement*, elements, context->traceLogCount);
|
2008-04-13 19:48:20 +00:00
|
|
|
unsigned index = 0;
|
2014-05-04 01:09:55 +00:00
|
|
|
// unsigned pathFootprint = 0;
|
|
|
|
// unsigned mapCount = 0;
|
2008-04-13 19:48:20 +00:00
|
|
|
for (TraceElement* p = context->traceLog; p; p = p->next) {
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, index < context->traceLogCount);
|
2008-04-11 21:00:18 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
if (p->address) {
|
|
|
|
RUNTIME_ARRAY_BODY(elements)[index++] = p;
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
if (p->target) {
|
2014-07-11 15:50:18 +00:00
|
|
|
insertCallNode(
|
|
|
|
t, makeCallNode(t, p->address->value(), p->target, p->flags, 0));
|
2010-09-17 01:43:27 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-04-13 19:48:20 +00:00
|
|
|
}
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
qsort(RUNTIME_ARRAY_BODY(elements),
|
|
|
|
index,
|
|
|
|
sizeof(TraceElement*),
|
|
|
|
compareTraceElementPointers);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcIntArray* map = makeSimpleFrameMapTable(
|
2014-06-01 16:04:56 +00:00
|
|
|
t, context, start, RUNTIME_ARRAY_BODY(elements), index);
|
2007-10-08 21:41:41 +00:00
|
|
|
|
2014-06-25 20:38:13 +00:00
|
|
|
context->method->code()->setStackMap(t, map);
|
2008-04-13 19:48:20 +00:00
|
|
|
}
|
2007-12-31 22:40:56 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
logCompile(
|
|
|
|
t,
|
|
|
|
start,
|
|
|
|
codeSize,
|
|
|
|
reinterpret_cast<const char*>(
|
|
|
|
context->method->class_()->name()->body().begin()),
|
|
|
|
reinterpret_cast<const char*>(context->method->name()->body().begin()),
|
|
|
|
reinterpret_cast<const char*>(context->method->spec()->body().begin()));
|
2008-02-11 17:21:41 +00:00
|
|
|
|
|
|
|
// for debugging:
|
2014-07-11 15:47:57 +00:00
|
|
|
if (false
|
|
|
|
and ::strcmp(reinterpret_cast<const char*>(
|
|
|
|
context->method->class_()->name()->body().begin()),
|
|
|
|
"java/lang/System") == 0
|
|
|
|
and ::strcmp(reinterpret_cast<const char*>(
|
|
|
|
context->method->name()->body().begin()),
|
|
|
|
"<clinit>") == 0) {
|
2009-03-03 01:40:06 +00:00
|
|
|
trap();
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-03-09 14:26:23 +00:00
|
|
|
syncInstructionCache(start, codeSize);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void compile(MyThread* t, Context* context)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Compiler* c = context->compiler;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-12 16:16:03 +00:00
|
|
|
if (false) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"compiling %s.%s%s\n",
|
|
|
|
context->method->class_()->name()->body().begin(),
|
|
|
|
context->method->name()->body().begin(),
|
|
|
|
context->method->spec()->body().begin());
|
|
|
|
}
|
2008-01-08 21:23:49 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned footprint = context->method->parameterFootprint();
|
2008-01-20 18:55:08 +00:00
|
|
|
unsigned locals = localSize(t, context->method);
|
2014-07-11 15:47:57 +00:00
|
|
|
c->init(context->method->code()->length(),
|
|
|
|
footprint,
|
|
|
|
locals,
|
2008-09-28 19:00:52 +00:00
|
|
|
alignedFrameSize(t, context->method));
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
ir::Type* stackMap = (ir::Type*)malloc(sizeof(ir::Type)
|
|
|
|
* context->method->code()->maxStack());
|
2014-05-05 16:49:50 +00:00
|
|
|
Frame frame(context, stackMap);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned index = context->method->parameterFootprint();
|
|
|
|
if ((context->method->flags() & ACC_STATIC) == 0) {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame.set(--index, ir::Type::object());
|
|
|
|
c->initLocal(index, ir::Type::object());
|
2008-01-07 14:51:07 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
for (MethodSpecIterator it(t,
|
|
|
|
reinterpret_cast<const char*>(
|
|
|
|
context->method->spec()->body().begin()));
|
|
|
|
it.hasNext();) {
|
2008-01-07 14:51:07 +00:00
|
|
|
switch (*it.next()) {
|
|
|
|
case 'L':
|
|
|
|
case '[':
|
2014-06-01 20:22:14 +00:00
|
|
|
frame.set(--index, ir::Type::object());
|
|
|
|
c->initLocal(index, ir::Type::object());
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
2014-05-01 03:35:08 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
case 'J':
|
2014-06-01 20:22:14 +00:00
|
|
|
frame.set(--index, ir::Type::i8());
|
|
|
|
frame.set(--index, ir::Type::i8());
|
|
|
|
c->initLocal(index, ir::Type::i8());
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2009-11-30 15:08:45 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
case 'D':
|
2014-06-01 20:22:14 +00:00
|
|
|
frame.set(--index, ir::Type::f8());
|
|
|
|
frame.set(--index, ir::Type::f8());
|
|
|
|
c->initLocal(index, ir::Type::f8());
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
2014-05-01 03:35:08 +00:00
|
|
|
|
2009-08-10 19:20:23 +00:00
|
|
|
case 'F':
|
2014-06-01 20:22:14 +00:00
|
|
|
frame.set(--index, ir::Type::i4());
|
|
|
|
c->initLocal(index, ir::Type::f4());
|
2009-08-10 19:20:23 +00:00
|
|
|
break;
|
2014-05-01 03:35:08 +00:00
|
|
|
|
2008-01-07 14:51:07 +00:00
|
|
|
default:
|
2014-06-01 20:22:14 +00:00
|
|
|
frame.set(--index, ir::Type::i4());
|
|
|
|
c->initLocal(index, ir::Type::i4());
|
2008-01-07 14:51:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-09 23:56:37 +00:00
|
|
|
handleEntrance(t, &frame);
|
|
|
|
|
2008-09-13 21:09:26 +00:00
|
|
|
Compiler::State* state = c->saveState();
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
compile(t, &frame, 0);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
context->dirtyRoots = false;
|
2014-05-04 01:09:55 +00:00
|
|
|
unsigned eventIndex = calculateFrameMaps(t, context, 0, 0, 0);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcExceptionHandlerTable* eht = cast<GcExceptionHandlerTable>(
|
|
|
|
t, context->method->code()->exceptionHandlerTable());
|
2007-12-09 22:45:43 +00:00
|
|
|
if (eht) {
|
|
|
|
PROTECT(t, eht);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned visitCount = eht->length();
|
2009-03-08 01:23:28 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, visited, visitCount);
|
2009-08-27 00:26:44 +00:00
|
|
|
memset(RUNTIME_ARRAY_BODY(visited), 0, visitCount * sizeof(bool));
|
2008-01-26 00:17:27 +00:00
|
|
|
|
2011-04-09 00:50:22 +00:00
|
|
|
bool progress = true;
|
|
|
|
while (progress) {
|
|
|
|
progress = false;
|
2008-01-26 00:17:27 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
for (unsigned subI = 0; subI <= context->subroutineCount; ++subI) {
|
2014-07-11 15:47:57 +00:00
|
|
|
unsigned duplicatedBaseIp = subI * context->method->code()->length();
|
2008-01-26 00:17:27 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
for (unsigned i = 0; i < eht->length(); ++i) {
|
|
|
|
uint64_t eh = eht->body()[i];
|
2014-05-04 01:09:55 +00:00
|
|
|
int start
|
|
|
|
= resolveIpForwards(context,
|
|
|
|
duplicatedBaseIp + exceptionHandlerStart(eh),
|
|
|
|
duplicatedBaseIp + exceptionHandlerEnd(eh));
|
2008-01-26 00:17:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if ((not RUNTIME_ARRAY_BODY(visited)[i]) and start >= 0
|
|
|
|
and context->visitTable[start]) {
|
2014-05-04 01:09:55 +00:00
|
|
|
RUNTIME_ARRAY_BODY(visited)[i] = true;
|
|
|
|
progress = true;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
c->restoreState(state);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2014-05-05 16:49:50 +00:00
|
|
|
ir::Type* stackMap = (ir::Type*)malloc(
|
2014-07-11 15:47:57 +00:00
|
|
|
sizeof(ir::Type) * context->method->code()->maxStack());
|
2014-05-05 16:49:50 +00:00
|
|
|
Frame frame2(&frame, stackMap);
|
2008-01-20 23:03:28 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
unsigned end = duplicatedBaseIp + exceptionHandlerEnd(eh);
|
|
|
|
if (exceptionHandlerIp(eh) >= static_cast<unsigned>(start)
|
|
|
|
and exceptionHandlerIp(eh) < end) {
|
|
|
|
end = duplicatedBaseIp + exceptionHandlerIp(eh);
|
|
|
|
}
|
2009-07-13 23:49:15 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
context->eventLog.append(PushExceptionHandlerEvent);
|
|
|
|
context->eventLog.append2(start);
|
|
|
|
context->eventLog.append2(end);
|
2011-04-09 18:44:28 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
for (unsigned i = 1; i < context->method->code()->maxStack(); ++i) {
|
2014-06-01 20:22:14 +00:00
|
|
|
frame2.set(localSize(t, context->method) + i, ir::Type::i4());
|
2014-05-04 01:09:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
compile(t, &frame2, exceptionHandlerIp(eh), start);
|
2011-04-09 18:44:28 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
context->eventLog.append(PopContextEvent);
|
2011-04-09 18:44:28 +00:00
|
|
|
|
2014-05-04 01:09:55 +00:00
|
|
|
eventIndex = calculateFrameMaps(t, context, 0, eventIndex, 0);
|
|
|
|
}
|
2008-01-26 00:17:27 +00:00
|
|
|
}
|
2008-01-20 23:03:28 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2008-03-05 21:44:17 +00:00
|
|
|
while (context->dirtyRoots) {
|
|
|
|
context->dirtyRoots = false;
|
2014-05-04 01:09:55 +00:00
|
|
|
calculateFrameMaps(t, context, 0, 0, 0);
|
2008-03-05 21:44:17 +00:00
|
|
|
}
|
2014-05-05 16:49:50 +00:00
|
|
|
free(stackMap);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2014-09-22 17:10:35 +00:00
|
|
|
#endif // not AVIAN_AOT_ONLY
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void updateCall(MyThread* t,
|
|
|
|
avian::codegen::lir::UnaryOperation op,
|
|
|
|
void* returnAddress,
|
|
|
|
void* target)
|
2008-11-29 23:08:14 +00:00
|
|
|
{
|
2009-10-18 00:18:03 +00:00
|
|
|
t->arch->updateCall(op, returnAddress, target);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void* compileMethod2(MyThread* t, void* ip);
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint64_t compileMethod(MyThread* t)
|
2007-12-16 22:41:07 +00:00
|
|
|
{
|
2009-04-07 00:34:12 +00:00
|
|
|
void* ip;
|
2009-03-31 20:15:08 +00:00
|
|
|
if (t->tailAddress) {
|
|
|
|
ip = t->tailAddress;
|
|
|
|
t->tailAddress = 0;
|
|
|
|
} else {
|
2011-02-20 20:31:29 +00:00
|
|
|
ip = getIp(t);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(compileMethod2(t, ip));
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void* compileVirtualMethod2(MyThread* t, GcClass* class_, unsigned index)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2009-04-27 01:53:42 +00:00
|
|
|
// If class_ has BootstrapFlag set, that means its vtable is not yet
|
|
|
|
// available. However, we must set t->trace->targetMethod to an
|
|
|
|
// appropriate method to ensure we can accurately scan the stack for
|
|
|
|
// GC roots. We find such a method by looking for a superclass with
|
|
|
|
// a vtable and using it instead:
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcClass* c = class_;
|
|
|
|
while (c->vmFlags() & BootstrapFlag) {
|
|
|
|
c = c->super();
|
2009-04-27 01:53:42 +00:00
|
|
|
}
|
2014-07-11 15:47:57 +00:00
|
|
|
t->trace->targetMethod
|
|
|
|
= cast<GcMethod>(t, cast<GcArray>(t, c->virtualTable())->body()[index]);
|
2009-04-27 01:53:42 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RESOURCE0(t, static_cast<MyThread*>(t)->trace->targetMethod = 0;);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
PROTECT(t, class_);
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* target = resolveTarget(t, class_, index);
|
2009-04-27 14:46:43 +00:00
|
|
|
PROTECT(t, target);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
compile(t, codeAllocator(t), 0, target);
|
2009-04-27 01:53:42 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
void* address = reinterpret_cast<void*>(methodAddress(t, target));
|
2014-05-29 04:17:25 +00:00
|
|
|
if (target->flags() & ACC_NATIVE) {
|
2010-12-27 22:55:23 +00:00
|
|
|
t->trace->nativeMethod = target;
|
2009-04-05 21:42:10 +00:00
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
class_->vtable()[target->offset()] = address;
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
2010-12-27 22:55:23 +00:00
|
|
|
return address;
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint64_t compileVirtualMethod(MyThread* t)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
GcClass* class_ = objectClass(t, static_cast<object>(t->virtualCallTarget));
|
2009-05-03 20:57:11 +00:00
|
|
|
t->virtualCallTarget = 0;
|
2009-04-05 21:42:10 +00:00
|
|
|
|
|
|
|
unsigned index = t->virtualCallIndex;
|
|
|
|
t->virtualCallIndex = 0;
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(compileVirtualMethod2(t, class_, index));
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t invokeNativeFast(MyThread* t, GcMethod* method, void* function)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
FastNativeFunction f;
|
|
|
|
memcpy(&f, &function, sizeof(void*));
|
|
|
|
return f(t,
|
|
|
|
method,
|
|
|
|
static_cast<uintptr_t*>(t->stack) + t->arch->frameFooterSize()
|
2010-11-12 23:53:16 +00:00
|
|
|
+ t->arch->frameReturnAddressSize());
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t invokeNativeSlow(MyThread* t, GcMethod* method, void* function)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, method);
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned footprint = method->parameterFootprint() + 1;
|
|
|
|
if (method->flags() & ACC_STATIC) {
|
2014-07-11 15:50:18 +00:00
|
|
|
++footprint;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned count = method->parameterCount() + 2;
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, args, footprint);
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned argOffset = 0;
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uint8_t, types, count);
|
2007-12-09 22:45:43 +00:00
|
|
|
unsigned typeOffset = 0;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = reinterpret_cast<uintptr_t>(t);
|
|
|
|
RUNTIME_ARRAY_BODY(types)[typeOffset++] = POINTER_TYPE;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t* sp = static_cast<uintptr_t*>(t->stack) + t->arch->frameFooterSize()
|
|
|
|
+ t->arch->frameReturnAddressSize();
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2014-06-21 04:16:33 +00:00
|
|
|
GcJclass* jclass = 0;
|
2010-09-01 16:13:52 +00:00
|
|
|
PROTECT(t, jclass);
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_STATIC) {
|
2014-06-21 04:16:33 +00:00
|
|
|
jclass = getJClass(t, method->class_());
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++]
|
2014-07-11 15:50:18 +00:00
|
|
|
= reinterpret_cast<uintptr_t>(&jclass);
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
2014-07-11 15:50:18 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = reinterpret_cast<uintptr_t>(sp++);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(types)[typeOffset++] = POINTER_TYPE;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
MethodSpecIterator it(
|
|
|
|
t, reinterpret_cast<const char*>(method->spec()->body().begin()));
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
while (it.hasNext()) {
|
2009-08-27 00:26:44 +00:00
|
|
|
unsigned type = RUNTIME_ARRAY_BODY(types)[typeOffset++]
|
2014-07-11 15:50:18 +00:00
|
|
|
= fieldType(t, fieldCode(t, *it.next()));
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
switch (type) {
|
|
|
|
case INT8_TYPE:
|
|
|
|
case INT16_TYPE:
|
|
|
|
case INT32_TYPE:
|
|
|
|
case FLOAT_TYPE:
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = *(sp++);
|
2007-12-09 22:45:43 +00:00
|
|
|
break;
|
2007-10-22 14:14:05 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case INT64_TYPE:
|
|
|
|
case DOUBLE_TYPE: {
|
2009-08-27 00:26:44 +00:00
|
|
|
memcpy(RUNTIME_ARRAY_BODY(args) + argOffset, sp, 8);
|
2007-12-18 02:09:32 +00:00
|
|
|
argOffset += (8 / BytesPerWord);
|
2009-05-03 20:57:11 +00:00
|
|
|
sp += 2;
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
case POINTER_TYPE: {
|
2007-12-20 00:02:32 +00:00
|
|
|
if (*sp) {
|
2014-07-11 15:50:18 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = reinterpret_cast<uintptr_t>(sp);
|
2007-12-20 00:02:32 +00:00
|
|
|
} else {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(args)[argOffset++] = 0;
|
2007-12-20 00:02:32 +00:00
|
|
|
}
|
2014-07-11 15:50:18 +00:00
|
|
|
++sp;
|
2007-12-09 22:45:43 +00:00
|
|
|
} break;
|
2007-09-28 23:41:03 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned returnCode = method->returnCode();
|
2007-12-23 18:09:41 +00:00
|
|
|
unsigned returnType = fieldType(t, returnCode);
|
2007-12-09 22:45:43 +00:00
|
|
|
uint64_t result;
|
2008-01-20 22:05:59 +00:00
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (DebugNatives) {
|
2014-07-11 15:47:57 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
"invoke native method %s.%s\n",
|
2014-06-21 04:16:33 +00:00
|
|
|
method->class_()->name()->body().begin(),
|
|
|
|
method->name()->body().begin());
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-04 00:41:54 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_SYNCHRONIZED) {
|
|
|
|
if (method->flags() & ACC_STATIC) {
|
2014-07-02 21:11:27 +00:00
|
|
|
acquire(t, method->class_());
|
2008-01-11 17:49:11 +00:00
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
acquire(t, *reinterpret_cast<object*>(RUNTIME_ARRAY_BODY(args)[1]));
|
2008-01-11 17:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-01 21:17:54 +00:00
|
|
|
Reference* reference = t->reference;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
ENTER(t, Thread::IdleState);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
bool noThrow = t->checkpoint->noThrow;
|
|
|
|
t->checkpoint->noThrow = true;
|
|
|
|
THREAD_RESOURCE(t, bool, noThrow, t->checkpoint->noThrow = noThrow);
|
|
|
|
|
2014-02-23 04:40:57 +00:00
|
|
|
result = vm::dynamicCall(function,
|
|
|
|
RUNTIME_ARRAY_BODY(args),
|
|
|
|
RUNTIME_ARRAY_BODY(types),
|
|
|
|
count,
|
|
|
|
footprint * BytesPerWord,
|
|
|
|
returnType);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_SYNCHRONIZED) {
|
|
|
|
if (method->flags() & ACC_STATIC) {
|
2014-07-02 21:11:27 +00:00
|
|
|
release(t, method->class_());
|
2008-01-11 17:49:11 +00:00
|
|
|
} else {
|
2010-09-10 21:05:29 +00:00
|
|
|
release(t, *reinterpret_cast<object*>(RUNTIME_ARRAY_BODY(args)[1]));
|
2008-01-11 17:49:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-26 23:59:55 +00:00
|
|
|
if (DebugNatives) {
|
2014-07-11 15:47:57 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
"return from native method %s.%s\n",
|
2014-06-21 04:16:33 +00:00
|
|
|
method->class_()->name()->body().begin(),
|
|
|
|
method->name()->body().begin());
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
if (UNLIKELY(t->exception)) {
|
2014-06-28 23:24:24 +00:00
|
|
|
GcThrowable* exception = t->exception;
|
2010-12-27 22:55:23 +00:00
|
|
|
t->exception = 0;
|
|
|
|
vm::throw_(t, exception);
|
|
|
|
}
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
switch (returnCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
result = static_cast<int8_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case CharField:
|
|
|
|
result = static_cast<uint16_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case ShortField:
|
|
|
|
result = static_cast<int16_t>(result);
|
|
|
|
break;
|
2007-12-18 02:09:32 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
|
|
|
result = static_cast<int32_t>(result);
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case LongField:
|
|
|
|
case DoubleField:
|
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case ObjectField:
|
2014-07-11 15:50:18 +00:00
|
|
|
result = static_cast<uintptr_t>(result)
|
|
|
|
? *reinterpret_cast<uintptr_t*>(static_cast<uintptr_t>(result))
|
|
|
|
: 0;
|
2010-12-27 22:55:23 +00:00
|
|
|
break;
|
2007-12-23 18:09:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
case VoidField:
|
2008-02-01 21:17:54 +00:00
|
|
|
result = 0;
|
2010-12-27 22:55:23 +00:00
|
|
|
break;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
default:
|
|
|
|
abort(t);
|
2007-10-03 00:22:48 +00:00
|
|
|
}
|
2008-02-01 21:17:54 +00:00
|
|
|
|
|
|
|
while (t->reference != reference) {
|
|
|
|
dispose(t, t->reference);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
uint64_t invokeNative2(MyThread* t, GcMethod* method)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcNative* native = getMethodRuntimeData(t, method)->native();
|
|
|
|
if (native->fast()) {
|
|
|
|
return invokeNativeFast(t, method, native->function());
|
2009-05-03 20:57:11 +00:00
|
|
|
} else {
|
2014-06-29 04:57:07 +00:00
|
|
|
return invokeNativeSlow(t, method, native->function());
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint64_t invokeNative(MyThread* t)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-04-07 23:47:41 +00:00
|
|
|
if (t->trace->nativeMethod == 0) {
|
2009-04-07 00:34:12 +00:00
|
|
|
void* ip;
|
2009-03-31 20:15:08 +00:00
|
|
|
if (t->tailAddress) {
|
|
|
|
ip = t->tailAddress;
|
|
|
|
t->tailAddress = 0;
|
|
|
|
} else {
|
2011-02-20 20:31:29 +00:00
|
|
|
ip = getIp(t);
|
2009-03-31 20:15:08 +00:00
|
|
|
}
|
|
|
|
|
2014-06-29 03:08:10 +00:00
|
|
|
GcCallNode* node = findCallNode(t, ip);
|
|
|
|
GcMethod* target = node->target();
|
|
|
|
if (node->flags() & TraceElement::VirtualCall) {
|
2008-04-23 16:33:31 +00:00
|
|
|
target = resolveTarget(t, t->stack, target);
|
2008-04-01 17:37:59 +00:00
|
|
|
}
|
2008-04-23 16:33:31 +00:00
|
|
|
t->trace->nativeMethod = target;
|
2008-01-11 22:16:24 +00:00
|
|
|
}
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->tailAddress == 0);
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2007-12-17 22:38:59 +00:00
|
|
|
uint64_t result = 0;
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
t->trace->targetMethod = t->trace->nativeMethod;
|
|
|
|
|
2011-04-10 17:26:44 +00:00
|
|
|
t->m->classpath->resolveNative(t, t->trace->nativeMethod);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
result = invokeNative2(t, t->trace->nativeMethod);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned parameterFootprint = t->trace->targetMethod->parameterFootprint();
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
uintptr_t* stack = static_cast<uintptr_t*>(t->stack);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
if (avian::codegen::TailCalls
|
2010-12-27 22:55:23 +00:00
|
|
|
and t->arch->argumentFootprint(parameterFootprint)
|
2014-07-11 15:50:18 +00:00
|
|
|
> t->arch->stackAlignmentInWords()) {
|
2010-12-27 22:55:23 +00:00
|
|
|
stack += t->arch->argumentFootprint(parameterFootprint)
|
2014-07-11 15:50:18 +00:00
|
|
|
- t->arch->stackAlignmentInWords();
|
2010-12-27 22:55:23 +00:00
|
|
|
}
|
2009-05-17 00:39:08 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
stack += t->arch->frameReturnAddressSize();
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2011-07-14 00:06:02 +00:00
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
|
2012-06-18 14:27:18 +00:00
|
|
|
t->newStack = stack;
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return result;
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2008-04-10 23:48:28 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void findFrameMapInSimpleTable(MyThread* t,
|
|
|
|
GcMethod* method,
|
|
|
|
GcIntArray* table,
|
|
|
|
int32_t offset,
|
|
|
|
int32_t** map,
|
|
|
|
unsigned* start)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2009-06-26 21:36:04 +00:00
|
|
|
unsigned tableSize = simpleFrameMapTableSize(t, method, table);
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned indexSize = table->length() - tableSize;
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
*map = &table->body()[indexSize];
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
unsigned bottom = 0;
|
|
|
|
unsigned top = indexSize;
|
|
|
|
for (unsigned span = top - bottom; span; span = top - bottom) {
|
|
|
|
unsigned middle = bottom + (span / 2);
|
2014-06-29 04:57:07 +00:00
|
|
|
int32_t v = table->body()[middle];
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
if (offset == v) {
|
2009-06-26 21:36:04 +00:00
|
|
|
*start = frameMapSizeInBits(t, method) * middle;
|
|
|
|
return;
|
2008-04-07 23:47:41 +00:00
|
|
|
} else if (offset < v) {
|
|
|
|
top = middle;
|
|
|
|
} else {
|
|
|
|
bottom = middle + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
abort(t);
|
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void findFrameMap(MyThread* t,
|
|
|
|
void* stack UNUSED,
|
|
|
|
GcMethod* method,
|
|
|
|
int32_t offset,
|
|
|
|
int32_t** map,
|
|
|
|
unsigned* start)
|
2009-06-26 21:36:04 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
findFrameMapInSimpleTable(
|
|
|
|
t, method, method->code()->stackMap(), offset, map, start);
|
2009-06-26 21:36:04 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void visitStackAndLocals(MyThread* t,
|
|
|
|
Heap::Visitor* v,
|
|
|
|
void* frame,
|
|
|
|
GcMethod* method,
|
|
|
|
void* ip)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2009-04-27 14:46:43 +00:00
|
|
|
unsigned count = frameMapSizeInBits(t, method);
|
2009-04-26 21:55:35 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (count) {
|
2009-04-26 21:55:35 +00:00
|
|
|
void* stack = stackForFrame(t, frame, method);
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
int32_t* map;
|
|
|
|
unsigned offset;
|
2014-07-11 15:50:18 +00:00
|
|
|
findFrameMap(
|
|
|
|
t,
|
|
|
|
stack,
|
|
|
|
method,
|
|
|
|
difference(ip, reinterpret_cast<void*>(methodAddress(t, method))),
|
|
|
|
&map,
|
|
|
|
&offset);
|
2009-06-26 21:36:04 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
int j = offset + i;
|
|
|
|
if (map[j / 32] & (static_cast<int32_t>(1) << (j % 32))) {
|
2014-05-29 04:17:25 +00:00
|
|
|
v->visit(localObject(t, stack, method, i));
|
2007-10-17 01:21:35 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void visitArgument(MyThread* t, Heap::Visitor* v, void* stack, unsigned index)
|
2009-04-27 01:53:42 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
v->visit(static_cast<object*>(stack) + index
|
|
|
|
+ t->arch->frameReturnAddressSize() + t->arch->frameFooterSize());
|
2009-04-27 01:53:42 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void visitArguments(MyThread* t,
|
|
|
|
Heap::Visitor* v,
|
|
|
|
void* stack,
|
|
|
|
GcMethod* method)
|
2009-04-27 01:53:42 +00:00
|
|
|
{
|
|
|
|
unsigned index = 0;
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if ((method->flags() & ACC_STATIC) == 0) {
|
2009-05-15 02:08:01 +00:00
|
|
|
visitArgument(t, v, stack, index++);
|
2009-04-27 01:53:42 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
for (MethodSpecIterator it(
|
|
|
|
t, reinterpret_cast<const char*>(method->spec()->body().begin()));
|
|
|
|
it.hasNext();) {
|
2009-04-27 01:53:42 +00:00
|
|
|
switch (*it.next()) {
|
|
|
|
case 'L':
|
|
|
|
case '[':
|
2009-05-15 02:08:01 +00:00
|
|
|
visitArgument(t, v, stack, index++);
|
2009-04-27 01:53:42 +00:00
|
|
|
break;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
case 'J':
|
|
|
|
case 'D':
|
|
|
|
index += 2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2014-07-11 15:50:18 +00:00
|
|
|
++index;
|
2009-04-27 01:53:42 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void visitStack(MyThread* t, Heap::Visitor* v)
|
2007-09-26 23:23:03 +00:00
|
|
|
{
|
2011-02-20 03:33:26 +00:00
|
|
|
void* ip = getIp(t);
|
2008-08-17 19:32:40 +00:00
|
|
|
void* stack = t->stack;
|
2007-12-30 22:24:48 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread::CallTrace* trace = t->trace;
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* targetMethod = (trace ? trace->targetMethod : 0);
|
|
|
|
GcMethod* target = targetMethod;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
bool mostRecent = true;
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2007-12-16 22:41:07 +00:00
|
|
|
while (stack) {
|
2009-05-17 23:43:48 +00:00
|
|
|
if (targetMethod) {
|
|
|
|
visitArguments(t, v, stack, targetMethod);
|
|
|
|
targetMethod = 0;
|
|
|
|
}
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method = methodForIp(t, ip);
|
2008-04-07 23:47:41 +00:00
|
|
|
if (method) {
|
|
|
|
PROTECT(t, method);
|
2008-01-07 16:01:35 +00:00
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
void* nextIp = ip;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
nextFrame(t, &nextIp, &stack, method, target, mostRecent);
|
2008-01-07 14:51:07 +00:00
|
|
|
|
2009-04-27 01:53:42 +00:00
|
|
|
visitStackAndLocals(t, v, stack, method, ip);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2011-01-26 00:22:43 +00:00
|
|
|
ip = nextIp;
|
|
|
|
|
|
|
|
target = method;
|
2007-12-09 22:45:43 +00:00
|
|
|
} else if (trace) {
|
2008-08-17 19:32:40 +00:00
|
|
|
stack = trace->stack;
|
2011-02-20 20:31:29 +00:00
|
|
|
ip = trace->ip;
|
2007-12-09 22:45:43 +00:00
|
|
|
trace = trace->next;
|
2009-04-27 01:53:42 +00:00
|
|
|
|
|
|
|
if (trace) {
|
|
|
|
targetMethod = trace->targetMethod;
|
2011-01-26 00:22:43 +00:00
|
|
|
target = targetMethod;
|
|
|
|
} else {
|
|
|
|
target = 0;
|
2009-04-27 01:53:42 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
|
|
|
|
mostRecent = false;
|
2008-07-12 00:11:13 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void walkContinuationBody(MyThread* t,
|
|
|
|
Heap::Walker* w,
|
|
|
|
GcContinuation* c,
|
|
|
|
int start)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2009-05-17 23:43:48 +00:00
|
|
|
const int BodyOffset = ContinuationBody / BytesPerWord;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2014-06-27 00:17:16 +00:00
|
|
|
GcMethod* method = t->m->heap->follow(c->method());
|
2009-05-17 23:43:48 +00:00
|
|
|
int count = frameMapSizeInBits(t, method);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
if (count) {
|
2014-07-11 15:47:57 +00:00
|
|
|
int stack = BodyOffset + (c->framePointerOffset() / BytesPerWord)
|
|
|
|
- t->arch->framePointerOffset()
|
|
|
|
- stackOffsetFromFrame(t, method);
|
2009-05-17 23:43:48 +00:00
|
|
|
|
|
|
|
int first = stack + localOffsetFromStack(t, count - 1, method);
|
|
|
|
if (start > first) {
|
|
|
|
count -= start - first;
|
|
|
|
}
|
|
|
|
|
2009-06-26 21:36:04 +00:00
|
|
|
int32_t* map;
|
|
|
|
unsigned offset;
|
2014-07-11 15:47:57 +00:00
|
|
|
findFrameMap(t,
|
|
|
|
reinterpret_cast<uintptr_t*>(c) + stack,
|
|
|
|
method,
|
|
|
|
difference(c->address(),
|
|
|
|
reinterpret_cast<void*>(methodAddress(t, method))),
|
|
|
|
&map,
|
|
|
|
&offset);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2009-05-17 23:43:48 +00:00
|
|
|
for (int i = count - 1; i >= 0; --i) {
|
2009-06-26 21:36:04 +00:00
|
|
|
int j = offset + i;
|
|
|
|
if (map[j / 32] & (static_cast<int32_t>(1) << (j % 32))) {
|
2009-05-17 23:43:48 +00:00
|
|
|
if (not w->visit(stack + localOffsetFromStack(t, i, method))) {
|
2009-05-03 20:57:11 +00:00
|
|
|
return;
|
2009-05-17 23:43:48 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void callContinuation(MyThread* t,
|
|
|
|
GcContinuation* continuation,
|
|
|
|
object result,
|
|
|
|
GcThrowable* exception,
|
|
|
|
void* ip,
|
|
|
|
void* stack)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->exception == 0);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
|
|
|
if (exception) {
|
|
|
|
t->exception = exception;
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
MyThread::TraceContext c(t, ip, stack, continuation, t->trace);
|
2010-06-25 15:51:35 +00:00
|
|
|
|
2011-01-28 04:06:01 +00:00
|
|
|
void* frame;
|
|
|
|
findUnwindTarget(t, &ip, &frame, &stack, &continuation);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
|
2009-05-25 04:27:50 +00:00
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
|
2011-01-27 18:54:41 +00:00
|
|
|
popResources(t);
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
transition(t, ip, stack, continuation, t->trace);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2011-01-28 04:06:01 +00:00
|
|
|
vmJump(ip, 0, stack, t, reinterpret_cast<uintptr_t>(result), 0);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
int8_t* returnSpec(MyThread* t, GcMethod* method)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2014-06-21 04:16:33 +00:00
|
|
|
int8_t* s = method->spec()->body().begin();
|
2014-07-11 15:50:18 +00:00
|
|
|
while (*s and *s != ')')
|
|
|
|
++s;
|
2009-05-23 22:15:06 +00:00
|
|
|
expect(t, *s == ')');
|
|
|
|
return s + 1;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcClass* returnClass(MyThread* t, GcMethod* method)
|
2009-05-24 01:49:14 +00:00
|
|
|
{
|
2009-08-10 13:56:16 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
int8_t* spec = returnSpec(t, method);
|
|
|
|
unsigned length = strlen(reinterpret_cast<char*>(spec));
|
2014-06-28 23:24:24 +00:00
|
|
|
GcByteArray* name;
|
2009-05-24 01:49:14 +00:00
|
|
|
if (*spec == '[') {
|
2014-06-28 23:24:24 +00:00
|
|
|
name = makeByteArray(t, length + 1);
|
|
|
|
memcpy(name->body().begin(), spec, length);
|
2009-05-24 01:49:14 +00:00
|
|
|
} else {
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, *spec == 'L');
|
|
|
|
assertT(t, spec[length - 1] == ';');
|
2014-06-28 23:24:24 +00:00
|
|
|
name = makeByteArray(t, length - 1);
|
|
|
|
memcpy(name->body().begin(), spec + 1, length - 2);
|
2009-05-24 01:49:14 +00:00
|
|
|
}
|
2009-08-10 13:56:16 +00:00
|
|
|
|
2014-06-28 21:11:31 +00:00
|
|
|
return resolveClass(t, method->class_()->loader(), name);
|
2009-05-24 01:49:14 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
bool compatibleReturnType(MyThread* t, GcMethod* oldMethod, GcMethod* newMethod)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
|
|
|
if (oldMethod == newMethod) {
|
|
|
|
return true;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if (oldMethod->returnCode() == newMethod->returnCode()) {
|
2014-05-29 04:17:25 +00:00
|
|
|
if (oldMethod->returnCode() == ObjectField) {
|
2009-05-24 01:49:14 +00:00
|
|
|
PROTECT(t, newMethod);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcClass* oldClass = returnClass(t, oldMethod);
|
2009-05-24 01:49:14 +00:00
|
|
|
PROTECT(t, oldClass);
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcClass* newClass = returnClass(t, newMethod);
|
2009-05-24 01:49:14 +00:00
|
|
|
|
|
|
|
return isAssignableFrom(t, oldClass, newClass);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
return oldMethod->returnCode() == VoidField;
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void jumpAndInvoke(MyThread* t, GcMethod* method, void* stack, ...)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2009-05-24 01:49:14 +00:00
|
|
|
t->trace->targetMethod = 0;
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->flags() & ACC_NATIVE) {
|
2009-05-24 01:49:14 +00:00
|
|
|
t->trace->nativeMethod = method;
|
|
|
|
} else {
|
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned argumentCount = method->parameterFootprint();
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, arguments, argumentCount);
|
2014-07-11 15:50:18 +00:00
|
|
|
va_list a;
|
|
|
|
va_start(a, stack);
|
2009-05-24 01:49:14 +00:00
|
|
|
for (unsigned i = 0; i < argumentCount; ++i) {
|
2009-08-27 00:26:44 +00:00
|
|
|
RUNTIME_ARRAY_BODY(arguments)[i] = va_arg(a, uintptr_t);
|
2009-05-24 01:49:14 +00:00
|
|
|
}
|
|
|
|
va_end(a);
|
2011-01-27 18:54:41 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->exception == 0);
|
2011-01-27 18:54:41 +00:00
|
|
|
|
|
|
|
popResources(t);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
vmJumpAndInvoke(
|
|
|
|
t,
|
|
|
|
reinterpret_cast<void*>(methodAddress(t, method)),
|
|
|
|
stack,
|
|
|
|
argumentCount * BytesPerWord,
|
|
|
|
RUNTIME_ARRAY_BODY(arguments),
|
|
|
|
(t->arch->alignFrameSize(t->arch->argumentFootprint(argumentCount))
|
|
|
|
+ t->arch->frameReturnAddressSize()) * BytesPerWord);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void callContinuation(MyThread* t,
|
|
|
|
GcContinuation* continuation,
|
|
|
|
object result,
|
|
|
|
GcThrowable* exception)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
enum { Call, Unwind, Rewind } action;
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* nextContinuation = 0;
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (t->continuation == 0
|
2014-07-11 15:47:57 +00:00
|
|
|
or t->continuation->context() != continuation->context()) {
|
2009-05-23 22:15:06 +00:00
|
|
|
PROTECT(t, continuation);
|
|
|
|
PROTECT(t, result);
|
|
|
|
PROTECT(t, exception);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
if (compatibleReturnType(
|
|
|
|
t, t->trace->originalMethod, continuation->context()->method())) {
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuationContext* oldContext;
|
|
|
|
GcContinuationContext* unwindContext;
|
2009-05-23 22:15:06 +00:00
|
|
|
|
2009-05-24 01:49:14 +00:00
|
|
|
if (t->continuation) {
|
2014-06-29 03:50:32 +00:00
|
|
|
oldContext = t->continuation->context();
|
2009-05-24 01:49:14 +00:00
|
|
|
unwindContext = oldContext;
|
|
|
|
} else {
|
|
|
|
oldContext = 0;
|
|
|
|
unwindContext = 0;
|
|
|
|
}
|
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuationContext* rewindContext = 0;
|
2009-05-24 01:49:14 +00:00
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
for (GcContinuationContext* newContext = continuation->context();
|
2014-07-11 15:47:57 +00:00
|
|
|
newContext;
|
|
|
|
newContext = newContext->next()) {
|
2009-05-24 01:49:14 +00:00
|
|
|
if (newContext == oldContext) {
|
|
|
|
unwindContext = 0;
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
rewindContext = newContext;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
if (unwindContext and unwindContext->continuation()) {
|
|
|
|
nextContinuation
|
|
|
|
= cast<GcContinuation>(t, unwindContext->continuation());
|
2014-07-02 21:11:27 +00:00
|
|
|
result = makeUnwindResult(t, continuation, result, exception);
|
2009-05-23 22:15:06 +00:00
|
|
|
action = Unwind;
|
2014-07-11 15:47:57 +00:00
|
|
|
} else if (rewindContext and rewindContext->continuation()) {
|
|
|
|
nextContinuation
|
|
|
|
= cast<GcContinuation>(t, rewindContext->continuation());
|
2009-05-24 01:49:14 +00:00
|
|
|
action = Rewind;
|
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
if (compileRoots(t)->rewindMethod() == 0) {
|
2009-05-24 01:49:14 +00:00
|
|
|
PROTECT(t, nextContinuation);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* method = resolveMethod(
|
|
|
|
t,
|
|
|
|
roots(t)->bootLoader(),
|
|
|
|
"avian/Continuations",
|
|
|
|
"rewind",
|
|
|
|
"(Ljava/lang/Runnable;Lavian/Callback;Ljava/lang/Object;"
|
|
|
|
"Ljava/lang/Throwable;)V");
|
2011-01-28 04:06:01 +00:00
|
|
|
|
|
|
|
PROTECT(t, method);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setRewindMethod(t, method);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-25 04:49:39 +00:00
|
|
|
} else {
|
|
|
|
action = Call;
|
|
|
|
}
|
2009-05-24 01:49:14 +00:00
|
|
|
} else {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcIncompatibleContinuationException::Type);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
action = Call;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* ip;
|
2011-01-28 04:06:01 +00:00
|
|
|
void* frame;
|
2009-05-23 22:15:06 +00:00
|
|
|
void* stack;
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* threadContinuation;
|
2011-01-28 04:06:01 +00:00
|
|
|
findUnwindTarget(t, &ip, &frame, &stack, &threadContinuation);
|
2009-05-23 22:15:06 +00:00
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case Call: {
|
2014-06-29 04:57:07 +00:00
|
|
|
callContinuation(t, continuation, result, exception, ip, stack);
|
2009-05-23 22:15:06 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case Unwind: {
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
callContinuation(t, nextContinuation, result, 0, ip, stack);
|
2009-05-23 22:15:06 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case Rewind: {
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
transition(t, 0, 0, nextContinuation, t->trace);
|
2009-05-25 04:49:39 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
jumpAndInvoke(t,
|
|
|
|
compileRoots(t)->rewindMethod(),
|
|
|
|
stack,
|
|
|
|
nextContinuation->context()->before(),
|
|
|
|
continuation,
|
|
|
|
result,
|
|
|
|
exception);
|
2009-05-23 22:15:06 +00:00
|
|
|
} break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void callWithCurrentContinuation(MyThread* t, object receiver)
|
2009-05-26 05:27:10 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* method = 0;
|
2009-05-26 05:27:10 +00:00
|
|
|
void* ip = 0;
|
|
|
|
void* stack = 0;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, receiver);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
if (compileRoots(t)->receiveMethod() == 0) {
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* m = resolveMethod(t,
|
|
|
|
roots(t)->bootLoader(),
|
|
|
|
"avian/Function",
|
|
|
|
"call",
|
|
|
|
"(Ljava/lang/Object;)Ljava/lang/Object;");
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (m) {
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setReceiveMethod(t, m);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-06-28 23:24:24 +00:00
|
|
|
GcClass* continuationClass = type(t, GcContinuation::Type);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2014-06-28 23:24:24 +00:00
|
|
|
if (continuationClass->vmFlags() & BootstrapFlag) {
|
2014-06-29 04:57:07 +00:00
|
|
|
resolveSystemClass(
|
2014-06-30 01:44:41 +00:00
|
|
|
t, roots(t)->bootLoader(), continuationClass->name());
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
method = findInterfaceMethod(
|
|
|
|
t, compileRoots(t)->receiveMethod(), objectClass(t, receiver));
|
2010-12-27 22:55:23 +00:00
|
|
|
PROTECT(t, method);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
t->continuation = makeCurrentContinuation(t, &ip, &stack);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
jumpAndInvoke(t, method, stack, receiver, t->continuation);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dynamicWind(MyThread* t, object before, object thunk, object after)
|
2009-05-26 05:27:10 +00:00
|
|
|
{
|
|
|
|
void* ip = 0;
|
|
|
|
void* stack = 0;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, before);
|
2009-05-26 05:27:10 +00:00
|
|
|
PROTECT(t, thunk);
|
|
|
|
PROTECT(t, after);
|
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
if (compileRoots(t)->windMethod() == 0) {
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* method = resolveMethod(
|
|
|
|
t,
|
|
|
|
roots(t)->bootLoader(),
|
|
|
|
"avian/Continuations",
|
|
|
|
"wind",
|
|
|
|
"(Ljava/lang/Runnable;Ljava/util/concurrent/Callable;"
|
|
|
|
"Ljava/lang/Runnable;)Lavian/Continuations$UnwindResult;");
|
2009-05-26 05:27:10 +00:00
|
|
|
|
|
|
|
if (method) {
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setWindMethod(t, method);
|
2009-08-27 00:26:44 +00:00
|
|
|
compile(t, local::codeAllocator(t), 0, method);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
t->continuation = makeCurrentContinuation(t, &ip, &stack);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcContinuationContext* newContext
|
|
|
|
= makeContinuationContext(t,
|
|
|
|
t->continuation->context(),
|
|
|
|
before,
|
|
|
|
after,
|
|
|
|
t->continuation,
|
|
|
|
t->trace->originalMethod);
|
2009-05-26 05:27:10 +00:00
|
|
|
|
2014-06-25 20:38:13 +00:00
|
|
|
t->continuation->setContext(t, newContext);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
jumpAndInvoke(t, compileRoots(t)->windMethod(), stack, before, thunk, after);
|
2009-05-26 05:27:10 +00:00
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
class ArgumentList {
|
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
ArgumentList(Thread* t,
|
|
|
|
uintptr_t* array,
|
|
|
|
unsigned size,
|
|
|
|
bool* objectMask,
|
|
|
|
object this_,
|
|
|
|
const char* spec,
|
|
|
|
bool indirectObjects,
|
|
|
|
va_list arguments)
|
|
|
|
: t(static_cast<MyThread*>(t)),
|
|
|
|
array(array),
|
|
|
|
objectMask(objectMask),
|
|
|
|
size(size),
|
|
|
|
position(0),
|
|
|
|
protector(this)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
|
|
|
if (this_) {
|
|
|
|
addObject(this_);
|
|
|
|
}
|
|
|
|
|
2007-10-12 17:56:43 +00:00
|
|
|
for (MethodSpecIterator it(t, spec); it.hasNext();) {
|
|
|
|
switch (*it.next()) {
|
2007-09-25 23:53:11 +00:00
|
|
|
case 'L':
|
|
|
|
case '[':
|
|
|
|
if (indirectObjects) {
|
|
|
|
object* v = va_arg(arguments, object*);
|
|
|
|
addObject(v ? *v : 0);
|
|
|
|
} else {
|
|
|
|
addObject(va_arg(arguments, object));
|
|
|
|
}
|
|
|
|
break;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
case 'J':
|
|
|
|
addLong(va_arg(arguments, uint64_t));
|
|
|
|
break;
|
2007-10-12 17:56:43 +00:00
|
|
|
|
2012-02-28 22:35:28 +00:00
|
|
|
case 'D':
|
|
|
|
addLong(doubleToBits(va_arg(arguments, double)));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'F':
|
|
|
|
addInt(floatToBits(va_arg(arguments, double)));
|
|
|
|
break;
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
default:
|
|
|
|
addInt(va_arg(arguments, uint32_t));
|
2014-05-29 04:17:25 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
ArgumentList(Thread* t,
|
|
|
|
uintptr_t* array,
|
|
|
|
unsigned size,
|
|
|
|
bool* objectMask,
|
|
|
|
object this_,
|
|
|
|
const char* spec,
|
|
|
|
const jvalue* arguments)
|
|
|
|
: t(static_cast<MyThread*>(t)),
|
|
|
|
array(array),
|
|
|
|
objectMask(objectMask),
|
|
|
|
size(size),
|
|
|
|
position(0),
|
|
|
|
protector(this)
|
2012-06-15 23:41:40 +00:00
|
|
|
{
|
|
|
|
if (this_) {
|
|
|
|
addObject(this_);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned index = 0;
|
|
|
|
for (MethodSpecIterator it(t, spec); it.hasNext();) {
|
|
|
|
switch (*it.next()) {
|
|
|
|
case 'L':
|
|
|
|
case '[': {
|
|
|
|
object* v = arguments[index++].l;
|
|
|
|
addObject(v ? *v : 0);
|
|
|
|
} break;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2012-06-15 23:41:40 +00:00
|
|
|
case 'J':
|
|
|
|
addLong(arguments[index++].j);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'D':
|
2012-07-06 16:56:04 +00:00
|
|
|
addLong(doubleToBits(arguments[index++].d));
|
2012-06-15 23:41:40 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'F':
|
2012-07-06 16:56:04 +00:00
|
|
|
addInt(floatToBits(arguments[index++].f));
|
2012-06-15 23:41:40 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2012-07-06 16:56:04 +00:00
|
|
|
addInt(arguments[index++].i);
|
2014-05-29 04:17:25 +00:00
|
|
|
break;
|
2012-06-15 23:41:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
ArgumentList(Thread* t,
|
|
|
|
uintptr_t* array,
|
|
|
|
unsigned size,
|
|
|
|
bool* objectMask,
|
|
|
|
object this_,
|
|
|
|
const char* spec,
|
|
|
|
object arguments)
|
|
|
|
: t(static_cast<MyThread*>(t)),
|
|
|
|
array(array),
|
|
|
|
objectMask(objectMask),
|
|
|
|
size(size),
|
|
|
|
position(0),
|
|
|
|
protector(this)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
|
|
|
if (this_) {
|
|
|
|
addObject(this_);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned index = 0;
|
2007-10-12 17:56:43 +00:00
|
|
|
for (MethodSpecIterator it(t, spec); it.hasNext();) {
|
|
|
|
switch (*it.next()) {
|
2007-09-25 23:53:11 +00:00
|
|
|
case 'L':
|
|
|
|
case '[':
|
|
|
|
addObject(objectArrayBody(t, arguments, index++));
|
|
|
|
break;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
case 'J':
|
|
|
|
case 'D':
|
2014-07-11 15:50:18 +00:00
|
|
|
addLong(
|
|
|
|
fieldAtOffset<int64_t>(objectArrayBody(t, arguments, index++), 8));
|
2007-09-25 23:53:11 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-02-11 00:38:51 +00:00
|
|
|
addInt(fieldAtOffset<int32_t>(objectArrayBody(t, arguments, index++),
|
2014-07-11 15:50:18 +00:00
|
|
|
BytesPerWord));
|
2011-03-17 14:46:46 +00:00
|
|
|
break;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void addObject(object v)
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, position < size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
array[position] = reinterpret_cast<uintptr_t>(v);
|
|
|
|
objectMask[position] = true;
|
2014-07-11 15:50:18 +00:00
|
|
|
++position;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void addInt(uintptr_t v)
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, position < size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
array[position] = v;
|
|
|
|
objectMask[position] = false;
|
2014-07-11 15:50:18 +00:00
|
|
|
++position;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void addLong(uint64_t v)
|
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, position < size - 1);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
memcpy(array + position, &v, 8);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
objectMask[position] = false;
|
2007-12-23 20:06:24 +00:00
|
|
|
objectMask[position + 1] = false;
|
2009-05-03 20:57:11 +00:00
|
|
|
|
|
|
|
position += 2;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
|
|
|
uintptr_t* array;
|
|
|
|
bool* objectMask;
|
2009-02-17 02:49:28 +00:00
|
|
|
unsigned size;
|
2007-09-25 23:53:11 +00:00
|
|
|
unsigned position;
|
2007-10-12 17:56:43 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyProtector : public Thread::Protector {
|
2007-10-12 17:56:43 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyProtector(ArgumentList* list) : Protector(list->t), list(list)
|
|
|
|
{
|
|
|
|
}
|
2007-10-12 17:56:43 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void visit(Heap::Visitor* v)
|
|
|
|
{
|
2009-05-17 23:43:48 +00:00
|
|
|
for (unsigned i = 0; i < list->position; ++i) {
|
2007-10-12 17:56:43 +00:00
|
|
|
if (list->objectMask[i]) {
|
2007-10-28 19:14:53 +00:00
|
|
|
v->visit(reinterpret_cast<object*>(list->array + i));
|
2007-10-12 17:56:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ArgumentList* list;
|
|
|
|
} protector;
|
2007-09-25 23:53:11 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
object invoke(Thread* thread, GcMethod* method, ArgumentList* arguments)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
|
|
|
MyThread* t = static_cast<MyThread*>(thread);
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-03-26 00:55:25 +00:00
|
|
|
if (false) {
|
|
|
|
PROTECT(t, method);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
compile(
|
|
|
|
t,
|
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)),
|
|
|
|
0,
|
|
|
|
resolveMethod(
|
|
|
|
t, roots(t)->appLoader(), "foo/ClassName", "methodName", "()V"));
|
2011-03-26 00:55:25 +00:00
|
|
|
}
|
|
|
|
|
2010-12-19 22:23:19 +00:00
|
|
|
uintptr_t stackLimit = t->stackLimit;
|
|
|
|
uintptr_t stackPosition = reinterpret_cast<uintptr_t>(&t);
|
|
|
|
if (stackLimit == 0) {
|
2012-03-14 18:36:42 +00:00
|
|
|
t->stackLimit = stackPosition - t->m->stackSizeInBytes;
|
2010-12-19 22:23:19 +00:00
|
|
|
} else if (stackPosition < stackLimit) {
|
2014-05-29 04:17:25 +00:00
|
|
|
throwNew(t, GcStackOverflowError::Type);
|
2010-12-19 22:23:19 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
THREAD_RESOURCE(t,
|
|
|
|
uintptr_t,
|
|
|
|
stackLimit,
|
2010-12-27 22:55:23 +00:00
|
|
|
static_cast<MyThread*>(t)->stackLimit = stackLimit);
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned returnCode = method->returnCode();
|
2007-09-25 23:53:11 +00:00
|
|
|
unsigned returnType = fieldType(t, returnCode);
|
|
|
|
|
2007-12-30 22:24:48 +00:00
|
|
|
uint64_t result;
|
2007-09-30 02:48:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
MyThread::CallTrace trace(t, method);
|
2008-04-07 23:47:41 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
MyCheckpoint checkpoint(t);
|
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, arguments->position == arguments->size);
|
2009-02-17 02:49:28 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
result = vmInvoke(
|
|
|
|
t,
|
|
|
|
reinterpret_cast<void*>(methodAddress(t, method)),
|
|
|
|
arguments->array,
|
|
|
|
arguments->position * BytesPerWord,
|
|
|
|
t->arch->alignFrameSize(t->arch->argumentFootprint(arguments->position))
|
|
|
|
* BytesPerWord,
|
|
|
|
returnType);
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (t->exception) {
|
2014-08-20 15:49:00 +00:00
|
|
|
if (UNLIKELY(t->getFlags() & Thread::UseBackupHeapFlag)) {
|
2008-04-20 16:21:32 +00:00
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-06-28 23:24:24 +00:00
|
|
|
GcThrowable* exception = t->exception;
|
2010-12-27 22:55:23 +00:00
|
|
|
t->exception = 0;
|
|
|
|
vm::throw_(t, exception);
|
2008-04-20 16:21:32 +00:00
|
|
|
}
|
|
|
|
|
2007-09-25 23:53:11 +00:00
|
|
|
object r;
|
|
|
|
switch (returnCode) {
|
|
|
|
case ByteField:
|
|
|
|
case BooleanField:
|
|
|
|
case CharField:
|
|
|
|
case ShortField:
|
|
|
|
case FloatField:
|
|
|
|
case IntField:
|
2014-07-02 21:11:27 +00:00
|
|
|
r = makeInt(t, result);
|
2007-09-25 23:53:11 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case LongField:
|
|
|
|
case DoubleField:
|
2014-07-02 21:11:27 +00:00
|
|
|
r = makeLong(t, result);
|
2007-09-25 23:53:11 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ObjectField:
|
2007-12-16 00:24:15 +00:00
|
|
|
r = reinterpret_cast<object>(result);
|
2007-09-25 23:53:11 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VoidField:
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort(t);
|
2009-09-19 00:01:54 +00:00
|
|
|
}
|
2007-09-24 01:39:03 +00:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class SignalHandler : public SignalRegistrar::Handler {
|
2007-12-30 22:24:48 +00:00
|
|
|
public:
|
2014-06-26 02:17:27 +00:00
|
|
|
typedef GcThrowable* (GcRoots::*ExceptionGetter)();
|
2014-07-11 15:47:57 +00:00
|
|
|
SignalHandler(Gc::Type type, ExceptionGetter exc, unsigned fixedSize)
|
|
|
|
: m(0), type(type), exc(exc), fixedSize(fixedSize)
|
|
|
|
{
|
|
|
|
}
|
2007-12-30 22:24:48 +00:00
|
|
|
|
2014-08-19 17:08:15 +00:00
|
|
|
void setException(MyThread* t) {
|
|
|
|
if (ensure(t, pad(fixedSize) + traceSize(t))) {
|
2014-08-20 15:49:00 +00:00
|
|
|
t->setFlag(Thread::TracingFlag);
|
2014-08-19 17:08:15 +00:00
|
|
|
t->exception = makeThrowable(t, type);
|
2014-08-20 15:49:00 +00:00
|
|
|
t->clearFlag(Thread::TracingFlag);
|
2014-08-19 17:08:15 +00:00
|
|
|
} else {
|
|
|
|
// not enough memory available for a new exception and stack
|
|
|
|
// trace -- use a preallocated instance instead
|
|
|
|
t->exception = (vm::roots(t)->*exc)();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual bool handleSignal(void** ip,
|
|
|
|
void** frame,
|
|
|
|
void** stack,
|
2011-01-28 04:06:01 +00:00
|
|
|
void** thread)
|
|
|
|
{
|
2007-12-30 22:24:48 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(m->localThread->get());
|
2009-08-18 21:47:08 +00:00
|
|
|
if (t and t->state == Thread::ActiveState) {
|
2014-08-20 15:49:00 +00:00
|
|
|
if (t->getFlags() & Thread::TryNativeFlag) {
|
2014-08-19 17:08:15 +00:00
|
|
|
setException(t);
|
|
|
|
|
|
|
|
popResources(t);
|
|
|
|
|
|
|
|
GcContinuation* continuation;
|
|
|
|
findUnwindTarget(t, ip, frame, stack, &continuation);
|
|
|
|
|
|
|
|
t->trace->targetMethod = 0;
|
|
|
|
t->trace->nativeMethod = 0;
|
|
|
|
|
|
|
|
transition(t, *ip, *stack, continuation, t->trace);
|
|
|
|
|
|
|
|
*thread = t;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
} else if (methodForIp(t, *ip)) {
|
2009-09-04 23:08:45 +00:00
|
|
|
// add one to the IP since findLineNumber will subtract one
|
2009-09-04 21:09:40 +00:00
|
|
|
// when we make the trace:
|
2014-07-11 15:50:18 +00:00
|
|
|
MyThread::TraceContext context(
|
|
|
|
t,
|
|
|
|
static_cast<uint8_t*>(*ip) + 1,
|
|
|
|
static_cast<void**>(*stack) - t->arch->frameReturnAddressSize(),
|
|
|
|
t->continuation,
|
|
|
|
t->trace);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2014-08-19 17:08:15 +00:00
|
|
|
setException(t);
|
2008-01-02 01:07:12 +00:00
|
|
|
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
// printTrace(t, t->exception);
|
2009-08-13 15:17:05 +00:00
|
|
|
|
2014-06-29 03:50:32 +00:00
|
|
|
GcContinuation* continuation;
|
2011-01-28 04:06:01 +00:00
|
|
|
findUnwindTarget(t, ip, frame, stack, &continuation);
|
2008-04-23 16:33:31 +00:00
|
|
|
|
2014-08-19 17:08:15 +00:00
|
|
|
transition(t, *ip, *stack, continuation, t->trace);
|
2008-04-23 16:33:31 +00:00
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
*thread = t;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
return true;
|
|
|
|
}
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
2009-06-11 23:14:54 +00:00
|
|
|
|
|
|
|
if (compileLog) {
|
|
|
|
fflush(compileLog);
|
|
|
|
}
|
|
|
|
|
2008-01-02 01:07:12 +00:00
|
|
|
return false;
|
2007-12-30 22:24:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Machine* m;
|
2014-05-29 04:17:25 +00:00
|
|
|
Gc::Type type;
|
2014-06-30 01:44:41 +00:00
|
|
|
ExceptionGetter exc;
|
2010-12-20 00:47:21 +00:00
|
|
|
unsigned fixedSize;
|
2007-12-30 22:24:48 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isThunk(MyThread* t, void* ip);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isVirtualThunk(MyThread* t, void* ip);
|
2010-07-06 22:13:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isThunkUnsafeStack(MyThread* t, void* ip);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void boot(MyThread* t, BootImage* image, uint8_t* code);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2008-04-22 15:31:40 +00:00
|
|
|
class MyProcessor;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
MyProcessor* processor(MyThread* t);
|
2008-04-22 15:31:40 +00:00
|
|
|
|
2014-09-22 17:10:35 +00:00
|
|
|
#ifndef AVIAN_AOT_ONLY
|
2014-07-11 15:50:18 +00:00
|
|
|
void compileThunks(MyThread* t, FixedAllocator* allocator);
|
2014-09-22 17:10:35 +00:00
|
|
|
#endif
|
2010-09-14 16:49:41 +00:00
|
|
|
|
2012-05-02 15:49:31 +00:00
|
|
|
class CompilationHandlerList {
|
2014-07-11 15:50:18 +00:00
|
|
|
public:
|
|
|
|
CompilationHandlerList(CompilationHandlerList* next,
|
|
|
|
Processor::CompilationHandler* handler)
|
|
|
|
: next(next), handler(handler)
|
|
|
|
{
|
|
|
|
}
|
2012-05-02 15:49:31 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void dispose(Allocator* allocator)
|
|
|
|
{
|
|
|
|
if (this) {
|
2012-05-02 15:49:31 +00:00
|
|
|
next->dispose(allocator);
|
|
|
|
handler->dispose();
|
|
|
|
allocator->free(this, sizeof(*this));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CompilationHandlerList* next;
|
|
|
|
Processor::CompilationHandler* handler;
|
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
template <class T, class C>
|
|
|
|
int checkConstant(MyThread* t, size_t expected, T C::*field, const char* name)
|
|
|
|
{
|
|
|
|
size_t actual = reinterpret_cast<uint8_t*>(&(t->*field))
|
|
|
|
- reinterpret_cast<uint8_t*>(t);
|
|
|
|
if (expected != actual) {
|
2013-12-19 05:25:23 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
"constant mismatch (%s): \n\tconstant says: %d\n\tc++ compiler "
|
|
|
|
"says: %d\n",
|
|
|
|
name,
|
|
|
|
(unsigned)expected,
|
|
|
|
(unsigned)actual);
|
2012-06-20 19:14:16 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class MyProcessor : public Processor {
|
2007-09-24 01:39:03 +00:00
|
|
|
public:
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
class Thunk {
|
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
Thunk() : start(0), frameSavedOffset(0), length(0)
|
|
|
|
{
|
|
|
|
}
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
Thunk(uint8_t* start, unsigned frameSavedOffset, unsigned length)
|
|
|
|
: start(start), frameSavedOffset(frameSavedOffset), length(length)
|
|
|
|
{
|
|
|
|
}
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
uint8_t* start;
|
|
|
|
unsigned frameSavedOffset;
|
|
|
|
unsigned length;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ThunkCollection {
|
|
|
|
public:
|
|
|
|
Thunk default_;
|
|
|
|
Thunk defaultVirtual;
|
|
|
|
Thunk native;
|
|
|
|
Thunk aioob;
|
2010-12-19 22:23:19 +00:00
|
|
|
Thunk stackOverflow;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
Thunk table;
|
|
|
|
};
|
|
|
|
|
2014-02-22 00:06:17 +00:00
|
|
|
MyProcessor(System* s,
|
|
|
|
Allocator* allocator,
|
|
|
|
const char* crashDumpDirectory,
|
|
|
|
bool useNativeFeatures)
|
|
|
|
: s(s),
|
|
|
|
allocator(allocator),
|
|
|
|
roots(0),
|
|
|
|
bootImage(0),
|
|
|
|
heapImage(0),
|
|
|
|
codeImage(0),
|
|
|
|
codeImageSize(0),
|
2014-05-29 04:17:25 +00:00
|
|
|
segFaultHandler(GcNullPointerException::Type,
|
2014-06-30 01:44:41 +00:00
|
|
|
&GcRoots::nullPointerException,
|
2014-05-29 04:17:25 +00:00
|
|
|
GcNullPointerException::FixedSize),
|
|
|
|
divideByZeroHandler(GcArithmeticException::Type,
|
2014-06-30 01:44:41 +00:00
|
|
|
&GcRoots::arithmeticException,
|
2014-05-29 04:17:25 +00:00
|
|
|
GcArithmeticException::FixedSize),
|
2014-02-25 22:46:35 +00:00
|
|
|
codeAllocator(s, Slice<uint8_t>(0, 0)),
|
2014-02-22 00:06:17 +00:00
|
|
|
callTableSize(0),
|
|
|
|
useNativeFeatures(useNativeFeatures),
|
|
|
|
compilationHandlers(0)
|
2011-09-20 22:30:30 +00:00
|
|
|
{
|
|
|
|
thunkTable[compileMethodIndex] = voidPointer(local::compileMethod);
|
|
|
|
thunkTable[compileVirtualMethodIndex] = voidPointer(compileVirtualMethod);
|
|
|
|
thunkTable[invokeNativeIndex] = voidPointer(invokeNative);
|
2014-07-11 15:50:18 +00:00
|
|
|
thunkTable[throwArrayIndexOutOfBoundsIndex]
|
|
|
|
= voidPointer(throwArrayIndexOutOfBounds);
|
2011-09-20 22:30:30 +00:00
|
|
|
thunkTable[throwStackOverflowIndex] = voidPointer(throwStackOverflow);
|
2013-12-19 05:46:58 +00:00
|
|
|
|
|
|
|
using namespace avian::codegen::runtime;
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
#define THUNK(s) thunkTable[s##Index] = voidPointer(s);
|
|
|
|
#include "thunks.cpp"
|
|
|
|
#undef THUNK
|
2011-09-24 04:21:54 +00:00
|
|
|
// Set the dummyIndex entry to a constant which should require the
|
|
|
|
// maximum number of bytes to represent in assembly code
|
|
|
|
// (i.e. can't be represented by a smaller number of bytes and
|
|
|
|
// implicitly sign- or zero-extended). We'll use this property
|
|
|
|
// later to determine the maximum size of a thunk in the thunk
|
|
|
|
// table.
|
2014-07-11 15:50:18 +00:00
|
|
|
thunkTable[dummyIndex] = reinterpret_cast<void*>(
|
|
|
|
static_cast<uintptr_t>(UINT64_C(0x5555555555555555)));
|
2014-02-22 00:06:17 +00:00
|
|
|
|
|
|
|
signals.setCrashDumpDirectory(crashDumpDirectory);
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual Thread* makeThread(Machine* m, GcThread* javaThread, Thread* parent)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
MyThread* t = new (m->heap->allocate(sizeof(MyThread))) MyThread(
|
|
|
|
m, javaThread, static_cast<MyThread*>(parent), useNativeFeatures);
|
2011-09-20 22:30:30 +00:00
|
|
|
|
|
|
|
t->heapImage = heapImage;
|
|
|
|
t->codeImage = codeImage;
|
|
|
|
t->thunkTable = thunkTable;
|
2009-05-05 01:04:17 +00:00
|
|
|
|
2012-06-28 22:21:24 +00:00
|
|
|
#if TARGET_BYTES_PER_WORD == BYTES_PER_WORD
|
|
|
|
|
2013-12-19 05:25:23 +00:00
|
|
|
int mismatches
|
|
|
|
= checkConstant(t,
|
|
|
|
TARGET_THREAD_EXCEPTION,
|
|
|
|
&Thread::exception,
|
|
|
|
"TARGET_THREAD_EXCEPTION")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_EXCEPTIONSTACKADJUSTMENT,
|
|
|
|
&MyThread::exceptionStackAdjustment,
|
|
|
|
"TARGET_THREAD_EXCEPTIONSTACKADJUSTMENT")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_EXCEPTIONOFFSET,
|
|
|
|
&MyThread::exceptionOffset,
|
|
|
|
"TARGET_THREAD_EXCEPTIONOFFSET")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_EXCEPTIONHANDLER,
|
|
|
|
&MyThread::exceptionHandler,
|
|
|
|
"TARGET_THREAD_EXCEPTIONHANDLER")
|
|
|
|
+ checkConstant(
|
|
|
|
t, TARGET_THREAD_IP, &MyThread::ip, "TARGET_THREAD_IP")
|
|
|
|
+ checkConstant(
|
|
|
|
t, TARGET_THREAD_STACK, &MyThread::stack, "TARGET_THREAD_STACK")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_NEWSTACK,
|
|
|
|
&MyThread::newStack,
|
|
|
|
"TARGET_THREAD_NEWSTACK")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_TAILADDRESS,
|
|
|
|
&MyThread::tailAddress,
|
|
|
|
"TARGET_THREAD_TAILADDRESS")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_VIRTUALCALLTARGET,
|
|
|
|
&MyThread::virtualCallTarget,
|
|
|
|
"TARGET_THREAD_VIRTUALCALLTARGET")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_VIRTUALCALLINDEX,
|
|
|
|
&MyThread::virtualCallIndex,
|
|
|
|
"TARGET_THREAD_VIRTUALCALLINDEX")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_HEAPIMAGE,
|
|
|
|
&MyThread::heapImage,
|
|
|
|
"TARGET_THREAD_HEAPIMAGE")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_CODEIMAGE,
|
2013-12-19 05:46:58 +00:00
|
|
|
&MyThread::codeImage,
|
2013-12-19 05:25:23 +00:00
|
|
|
"TARGET_THREAD_CODEIMAGE")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_THUNKTABLE,
|
|
|
|
&MyThread::thunkTable,
|
|
|
|
"TARGET_THREAD_THUNKTABLE")
|
|
|
|
+ checkConstant(t,
|
|
|
|
TARGET_THREAD_STACKLIMIT,
|
|
|
|
&MyThread::stackLimit,
|
|
|
|
"TARGET_THREAD_STACKLIMIT");
|
2012-06-20 19:14:16 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (mismatches > 0) {
|
2012-06-20 19:14:16 +00:00
|
|
|
fprintf(stderr, "%d constant mismatches\n", mismatches);
|
|
|
|
abort(t);
|
2009-05-25 00:22:36 +00:00
|
|
|
}
|
|
|
|
|
2014-06-28 00:32:20 +00:00
|
|
|
expect(t, TargetClassArrayElementSize == ClassArrayElementSize);
|
|
|
|
expect(t, TargetClassFixedSize == ClassFixedSize);
|
|
|
|
expect(t, TargetClassVtable == ClassVtable);
|
|
|
|
|
2012-06-28 22:21:24 +00:00
|
|
|
#endif
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
t->init();
|
|
|
|
|
2007-10-25 22:06:05 +00:00
|
|
|
return t;
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual GcMethod* makeMethod(vm::Thread* t,
|
|
|
|
uint8_t vmFlags,
|
|
|
|
uint8_t returnCode,
|
|
|
|
uint8_t parameterCount,
|
|
|
|
uint8_t parameterFootprint,
|
|
|
|
uint16_t flags,
|
|
|
|
uint16_t offset,
|
|
|
|
GcByteArray* name,
|
|
|
|
GcByteArray* spec,
|
|
|
|
GcMethodAddendum* addendum,
|
|
|
|
GcClass* class_,
|
|
|
|
GcCode* code)
|
2007-10-04 03:19:39 +00:00
|
|
|
{
|
2010-09-17 01:43:27 +00:00
|
|
|
if (code) {
|
2014-05-29 04:17:25 +00:00
|
|
|
code->compiled() = local::defaultThunk(static_cast<MyThread*>(t));
|
2010-09-17 01:43:27 +00:00
|
|
|
}
|
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
return vm::makeMethod(t,
|
|
|
|
vmFlags,
|
|
|
|
returnCode,
|
|
|
|
parameterCount,
|
|
|
|
parameterFootprint,
|
|
|
|
flags,
|
|
|
|
offset,
|
|
|
|
0,
|
|
|
|
0,
|
2014-06-21 04:16:33 +00:00
|
|
|
name,
|
|
|
|
spec,
|
2014-06-28 19:16:26 +00:00
|
|
|
addendum,
|
2014-06-21 04:16:33 +00:00
|
|
|
class_,
|
2014-06-28 04:00:05 +00:00
|
|
|
code);
|
2007-10-04 03:19:39 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual GcClass* makeClass(vm::Thread* t,
|
|
|
|
uint16_t flags,
|
|
|
|
uint16_t vmFlags,
|
|
|
|
uint16_t fixedSize,
|
|
|
|
uint8_t arrayElementSize,
|
|
|
|
uint8_t arrayDimensions,
|
|
|
|
GcClass* arrayElementClass,
|
|
|
|
GcIntArray* objectMask,
|
|
|
|
GcByteArray* name,
|
|
|
|
GcByteArray* sourceFile,
|
|
|
|
GcClass* super,
|
|
|
|
object interfaceTable,
|
|
|
|
object virtualTable,
|
|
|
|
object fieldTable,
|
|
|
|
object methodTable,
|
|
|
|
GcClassAddendum* addendum,
|
|
|
|
GcSingleton* staticTable,
|
|
|
|
GcClassLoader* loader,
|
|
|
|
unsigned vtableLength)
|
2007-09-26 23:23:03 +00:00
|
|
|
{
|
2014-05-29 04:17:25 +00:00
|
|
|
return vm::makeClass(t,
|
|
|
|
flags,
|
|
|
|
vmFlags,
|
|
|
|
fixedSize,
|
|
|
|
arrayElementSize,
|
|
|
|
arrayDimensions,
|
2014-06-28 00:32:20 +00:00
|
|
|
arrayElementClass,
|
2014-05-29 04:17:25 +00:00
|
|
|
0,
|
2014-06-28 19:16:26 +00:00
|
|
|
objectMask,
|
|
|
|
name,
|
|
|
|
sourceFile,
|
|
|
|
super,
|
2014-05-29 04:17:25 +00:00
|
|
|
interfaceTable,
|
|
|
|
virtualTable,
|
|
|
|
fieldTable,
|
|
|
|
methodTable,
|
|
|
|
addendum,
|
2014-06-28 00:32:20 +00:00
|
|
|
staticTable,
|
2014-06-28 19:16:26 +00:00
|
|
|
loader,
|
2014-05-29 04:17:25 +00:00
|
|
|
0,
|
|
|
|
vtableLength);
|
2007-12-11 21:26:59 +00:00
|
|
|
}
|
2007-09-26 23:23:03 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual void initVtable(Thread* t, GcClass* c)
|
2007-12-11 21:26:59 +00:00
|
|
|
{
|
2009-06-04 23:20:55 +00:00
|
|
|
PROTECT(t, c);
|
2014-05-29 04:17:25 +00:00
|
|
|
for (int i = c->length() - 1; i >= 0; --i) {
|
2014-07-11 15:50:18 +00:00
|
|
|
void* thunk
|
|
|
|
= reinterpret_cast<void*>(virtualThunk(static_cast<MyThread*>(t), i));
|
2014-06-29 04:57:07 +00:00
|
|
|
c->vtable()[i] = thunk;
|
2007-09-26 23:23:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void visitObjects(Thread* vmt, Heap::Visitor* v)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-10-12 17:56:43 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
if (t == t->m->rootThread) {
|
2010-09-14 16:49:41 +00:00
|
|
|
v->visit(&roots);
|
2007-10-14 01:18:25 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2008-04-07 23:47:41 +00:00
|
|
|
for (MyThread::CallTrace* trace = t->trace; trace; trace = trace->next) {
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(trace->continuation));
|
2008-04-07 23:47:41 +00:00
|
|
|
v->visit(&(trace->nativeMethod));
|
2009-04-27 01:53:42 +00:00
|
|
|
v->visit(&(trace->targetMethod));
|
2009-05-24 01:49:14 +00:00
|
|
|
v->visit(&(trace->originalMethod));
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2009-05-03 20:57:11 +00:00
|
|
|
v->visit(&(t->continuation));
|
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
for (Reference* r = t->reference; r; r = r->next) {
|
|
|
|
v->visit(&(r->target));
|
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
visitStack(t, v);
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void walkStack(Thread* vmt, StackVisitor* v)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
MyStackWalker walker(t);
|
|
|
|
walker.walk(v);
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual int lineNumber(Thread* vmt, GcMethod* method, int ip)
|
2007-10-04 22:41:19 +00:00
|
|
|
{
|
2007-12-09 22:45:43 +00:00
|
|
|
return findLineNumber(static_cast<MyThread*>(vmt), method, ip);
|
2007-10-04 22:41:19 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual object* makeLocalReference(Thread* vmt, object o)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-09-30 03:33:38 +00:00
|
|
|
if (o) {
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2009-12-17 02:16:51 +00:00
|
|
|
|
|
|
|
for (Reference* r = t->reference; r; r = r->next) {
|
|
|
|
if (r->target == o) {
|
|
|
|
acquire(t, r);
|
|
|
|
|
|
|
|
return &(r->target);
|
|
|
|
}
|
|
|
|
}
|
2007-09-30 03:33:38 +00:00
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
Reference* r = new (t->m->heap->allocate(sizeof(Reference)))
|
2014-07-11 15:50:18 +00:00
|
|
|
Reference(o, &(t->reference), false);
|
2007-09-30 03:33:38 +00:00
|
|
|
|
2009-12-17 02:16:51 +00:00
|
|
|
acquire(t, r);
|
|
|
|
|
2007-09-30 03:33:38 +00:00
|
|
|
return &(r->target);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void disposeLocalReference(Thread* t, object* r)
|
2007-09-25 23:53:11 +00:00
|
|
|
{
|
2007-09-30 03:33:38 +00:00
|
|
|
if (r) {
|
2009-12-17 02:16:51 +00:00
|
|
|
release(t, reinterpret_cast<Reference*>(r));
|
2007-09-30 03:33:38 +00:00
|
|
|
}
|
2007-09-25 23:53:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual bool pushLocalFrame(Thread* vmt, unsigned)
|
2012-08-11 12:56:19 +00:00
|
|
|
{
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
t->referenceFrame = new (t->m->heap->allocate(sizeof(List<Reference*>)))
|
|
|
|
List<Reference*>(t->reference, t->referenceFrame);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2012-08-11 12:56:19 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void popLocalFrame(Thread* vmt)
|
2012-08-11 12:56:19 +00:00
|
|
|
{
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
|
2013-12-11 04:36:55 +00:00
|
|
|
List<Reference*>* f = t->referenceFrame;
|
2012-08-11 12:56:19 +00:00
|
|
|
t->referenceFrame = f->next;
|
2013-12-11 04:36:55 +00:00
|
|
|
while (t->reference != f->item) {
|
2012-08-11 12:56:19 +00:00
|
|
|
vm::dispose(t, t->reference);
|
|
|
|
}
|
|
|
|
|
2013-12-11 04:36:55 +00:00
|
|
|
t->m->heap->free(f, sizeof(List<Reference*>));
|
2012-08-11 12:56:19 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual object invokeArray(Thread* t,
|
|
|
|
GcMethod* method,
|
|
|
|
object this_,
|
|
|
|
object arguments)
|
2007-09-24 01:39:03 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->exception == 0);
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(
|
|
|
|
t,
|
|
|
|
t->state == Thread::ActiveState or t->state == Thread::ExclusiveState);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
method = findMethod(t, method, this_);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
const char* spec = reinterpret_cast<char*>(method->spec()->body().begin());
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned size = method->parameterFootprint();
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, array, size);
|
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, objectMask, size);
|
2014-07-11 15:50:18 +00:00
|
|
|
ArgumentList list(t,
|
|
|
|
RUNTIME_ARRAY_BODY(array),
|
|
|
|
size,
|
|
|
|
RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_,
|
|
|
|
spec,
|
|
|
|
arguments);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(static_cast<MyThread*>(t),
|
2014-07-11 15:50:18 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)),
|
|
|
|
0,
|
|
|
|
method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual object invokeArray(Thread* t,
|
|
|
|
GcMethod* method,
|
|
|
|
object this_,
|
|
|
|
const jvalue* arguments)
|
2012-06-15 23:41:40 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->exception == 0);
|
2012-06-15 23:41:40 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(
|
|
|
|
t,
|
|
|
|
t->state == Thread::ActiveState or t->state == Thread::ExclusiveState);
|
2012-06-15 23:41:40 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2012-06-15 23:41:40 +00:00
|
|
|
method = findMethod(t, method, this_);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
const char* spec = reinterpret_cast<char*>(method->spec()->body().begin());
|
2012-06-15 23:41:40 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned size = method->parameterFootprint();
|
2012-06-15 23:41:40 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, array, size);
|
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, objectMask, size);
|
2014-07-11 15:50:18 +00:00
|
|
|
ArgumentList list(t,
|
|
|
|
RUNTIME_ARRAY_BODY(array),
|
|
|
|
size,
|
|
|
|
RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_,
|
|
|
|
spec,
|
|
|
|
arguments);
|
2012-06-15 23:41:40 +00:00
|
|
|
|
|
|
|
PROTECT(t, method);
|
|
|
|
|
|
|
|
compile(static_cast<MyThread*>(t),
|
2014-07-11 15:50:18 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)),
|
|
|
|
0,
|
|
|
|
method);
|
2012-06-15 23:41:40 +00:00
|
|
|
|
|
|
|
return local::invoke(t, method, &list);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual object invokeList(Thread* t,
|
|
|
|
GcMethod* method,
|
|
|
|
object this_,
|
|
|
|
bool indirectObjects,
|
|
|
|
va_list arguments)
|
2007-09-24 01:39:03 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->exception == 0);
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(
|
|
|
|
t,
|
|
|
|
t->state == Thread::ActiveState or t->state == Thread::ExclusiveState);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2009-08-13 15:17:05 +00:00
|
|
|
method = findMethod(t, method, this_);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
const char* spec = reinterpret_cast<char*>(method->spec()->body().begin());
|
2007-09-25 23:53:11 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
unsigned size = method->parameterFootprint();
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, array, size);
|
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, objectMask, size);
|
2014-07-11 15:50:18 +00:00
|
|
|
ArgumentList list(t,
|
|
|
|
RUNTIME_ARRAY_BODY(array),
|
|
|
|
size,
|
|
|
|
RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_,
|
|
|
|
spec,
|
|
|
|
indirectObjects,
|
|
|
|
arguments);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2007-12-09 22:45:43 +00:00
|
|
|
PROTECT(t, method);
|
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(static_cast<MyThread*>(t),
|
2014-07-11 15:50:18 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)),
|
|
|
|
0,
|
|
|
|
method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual object invokeList(Thread* t,
|
|
|
|
GcClassLoader* loader,
|
|
|
|
const char* className,
|
|
|
|
const char* methodName,
|
|
|
|
const char* methodSpec,
|
|
|
|
object this_,
|
|
|
|
va_list arguments)
|
2007-09-24 01:39:03 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, t->exception == 0);
|
2008-04-01 17:37:59 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(
|
|
|
|
t,
|
|
|
|
t->state == Thread::ActiveState or t->state == Thread::ExclusiveState);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2010-09-10 21:05:29 +00:00
|
|
|
unsigned size = parameterFootprint(t, methodSpec, this_ == 0);
|
2010-12-27 22:55:23 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t, uintptr_t, array, size);
|
|
|
|
THREAD_RUNTIME_ARRAY(t, bool, objectMask, size);
|
2014-07-11 15:50:18 +00:00
|
|
|
ArgumentList list(t,
|
|
|
|
RUNTIME_ARRAY_BODY(array),
|
|
|
|
size,
|
|
|
|
RUNTIME_ARRAY_BODY(objectMask),
|
|
|
|
this_,
|
|
|
|
methodSpec,
|
|
|
|
false,
|
|
|
|
arguments);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcMethod* method
|
|
|
|
= resolveMethod(t, loader, className, methodName, methodSpec);
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
2007-09-24 01:39:03 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
PROTECT(t, method);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
|
|
|
compile(static_cast<MyThread*>(t),
|
2014-07-11 15:47:57 +00:00
|
|
|
local::codeAllocator(static_cast<MyThread*>(t)),
|
|
|
|
0,
|
|
|
|
method);
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
return local::invoke(t, method, &list);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-10-03 00:22:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void dispose(Thread* vmt)
|
|
|
|
{
|
2007-12-30 22:24:48 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
|
2007-12-11 21:26:59 +00:00
|
|
|
while (t->reference) {
|
|
|
|
vm::dispose(t, t->reference);
|
|
|
|
}
|
2008-01-10 01:20:36 +00:00
|
|
|
|
2008-08-18 15:23:01 +00:00
|
|
|
t->arch->release();
|
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
t->m->heap->free(t, sizeof(*t));
|
2007-12-11 21:26:59 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void dispose()
|
|
|
|
{
|
2014-02-25 22:46:35 +00:00
|
|
|
if (codeAllocator.memory.begin()) {
|
2014-09-22 17:10:35 +00:00
|
|
|
#ifndef AVIAN_AOT_ONLY
|
2014-05-21 00:37:25 +00:00
|
|
|
Memory::free(codeAllocator.memory);
|
2013-01-28 18:51:35 +00:00
|
|
|
#endif
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2012-05-02 15:49:31 +00:00
|
|
|
compilationHandlers->dispose(allocator);
|
|
|
|
|
2014-02-22 06:23:01 +00:00
|
|
|
signals.unregisterHandler(SignalRegistrar::SegFault);
|
|
|
|
signals.unregisterHandler(SignalRegistrar::DivideByZero);
|
2014-02-22 00:06:17 +00:00
|
|
|
signals.setCrashDumpDirectory(0);
|
2008-01-10 01:20:36 +00:00
|
|
|
|
2014-05-20 22:11:28 +00:00
|
|
|
this->~MyProcessor();
|
|
|
|
|
2008-04-13 18:15:04 +00:00
|
|
|
allocator->free(this, sizeof(*this));
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual object getStackTrace(Thread* vmt, Thread* vmTarget)
|
|
|
|
{
|
2008-04-09 19:08:13 +00:00
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
|
|
|
MyThread* target = static_cast<MyThread*>(vmTarget);
|
2008-12-02 02:38:00 +00:00
|
|
|
MyProcessor* p = this;
|
2008-04-22 15:31:40 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
class Visitor : public System::ThreadVisitor {
|
2008-04-09 19:08:13 +00:00
|
|
|
public:
|
2014-07-11 15:50:18 +00:00
|
|
|
Visitor(MyThread* t, MyProcessor* p, MyThread* target)
|
|
|
|
: t(t), p(p), target(target), trace(0)
|
|
|
|
{
|
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void visit(void* ip, void* stack, void* link)
|
|
|
|
{
|
2011-01-26 00:22:43 +00:00
|
|
|
MyThread::TraceContext c(target, link);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2008-04-21 17:29:36 +00:00
|
|
|
if (methodForIp(t, ip)) {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in Java code - use the register values
|
|
|
|
c.ip = ip;
|
|
|
|
c.stack = stack;
|
fix a couple of subtle Thread.getStackTrace bugs
The first problem was that, on x86, we failed to properly keep track
of whether to expect the return address to be on the stack or not when
unwinding through a frame. We were relying on a "stackLimit" pointer
to tell us whether we were looking at the most recently-called frame
by comparing it with the stack pointer for that frame. That was
inaccurate in the case of a thread executing at the beginning of a
method before a new frame is allocated, in which case the most recent
two frames share a stack pointer, confusing the unwinder. The
solution involves keeping track of how many frames we've looked at
while walking the stack.
The other problem was that compareIpToMethodBounds assumed every
method was followed by at least one byte of padding before the next
method started. That assumption was usually valid because we were
storing the size following method code prior to the code itself.
However, the last method of an AOT-compiled code image is not followed
by any such method header and may instead be followed directly by
native code with no intervening padding. In that case, we risk
interpreting that native code as part of the preceding method, with
potentially bizarre results.
The reason for the compareIpToMethodBounds assumption was that methods
which throw exceptions as their last instruction generate a
non-returning call, which nonetheless push a return address on the
stack which points past the end of the method, and the unwinder needs
to know that return address belongs to that method. A better solution
is to add an extra trap instruction to the end of such methods, which
is what this patch does.
2012-05-05 00:35:13 +00:00
|
|
|
c.methodIsMostRecent = true;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
} else if (target->transition) {
|
|
|
|
// we caught the thread in native code while in the middle
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
// of updating the context fields (MyThread::stack, etc.)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
static_cast<MyThread::Context&>(c) = *(target->transition);
|
|
|
|
} else if (isVmInvokeUnsafeStack(ip)) {
|
|
|
|
// we caught the thread in native code just after returning
|
|
|
|
// from java code, but before clearing MyThread::stack
|
|
|
|
// (which now contains a garbage value), and the most recent
|
|
|
|
// Java frame, if any, can be found in
|
|
|
|
// MyThread::continuation or MyThread::trace
|
|
|
|
c.ip = 0;
|
|
|
|
c.stack = 0;
|
2014-07-11 15:50:18 +00:00
|
|
|
} else if (target->stack and (not isThunkUnsafeStack(t, ip))
|
|
|
|
and (not isVirtualThunk(t, ip))) {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in a thunk or native code, and the
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
// saved stack pointer indicates the most recent Java frame
|
|
|
|
// on the stack
|
2011-02-20 20:31:29 +00:00
|
|
|
c.ip = getIp(target);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
c.stack = target->stack;
|
2010-07-06 22:13:11 +00:00
|
|
|
} else if (isThunk(t, ip) or isVirtualThunk(t, ip)) {
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
// we caught the thread in a thunk where the stack register
|
|
|
|
// indicates the most recent Java frame on the stack
|
2014-04-29 19:26:40 +00:00
|
|
|
|
2011-02-21 22:25:52 +00:00
|
|
|
// On e.g. x86, the return address will have already been
|
|
|
|
// pushed onto the stack, in which case we use getIp to
|
2014-04-29 19:26:40 +00:00
|
|
|
// retrieve it. On e.g. ARM, it will be in the
|
2011-02-21 22:25:52 +00:00
|
|
|
// link register. Note that we can't just check if the link
|
|
|
|
// argument is null here, since we use ecx/rcx as a
|
|
|
|
// pseudo-link register on x86 for the purpose of tail
|
|
|
|
// calls.
|
|
|
|
c.ip = t->arch->hasLinkRegister() ? link : getIp(t, link, stack);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
c.stack = stack;
|
2008-04-22 15:31:40 +00:00
|
|
|
} else {
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
// we caught the thread in native code, and the most recent
|
|
|
|
// Java frame, if any, can be found in
|
|
|
|
// MyThread::continuation or MyThread::trace
|
|
|
|
c.ip = 0;
|
|
|
|
c.stack = 0;
|
2008-04-21 17:29:36 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2010-06-19 22:40:21 +00:00
|
|
|
if (ensure(t, traceSize(target))) {
|
2014-08-20 15:49:00 +00:00
|
|
|
t->setFlag(Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
trace = makeTrace(t, target);
|
2014-08-20 15:49:00 +00:00
|
|
|
t->clearFlag(Thread::TracingFlag);
|
2010-06-19 22:40:21 +00:00
|
|
|
}
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MyThread* t;
|
2008-05-31 22:14:27 +00:00
|
|
|
MyProcessor* p;
|
2008-04-09 19:08:13 +00:00
|
|
|
MyThread* target;
|
|
|
|
object trace;
|
2008-05-31 22:14:27 +00:00
|
|
|
} visitor(t, p, target);
|
2008-04-09 19:08:13 +00:00
|
|
|
|
2008-04-21 22:36:13 +00:00
|
|
|
t->m->system->visit(t->systemThread, target->systemThread, &visitor);
|
|
|
|
|
2014-08-20 15:49:00 +00:00
|
|
|
if (UNLIKELY(t->getFlags() & Thread::UseBackupHeapFlag)) {
|
2008-04-09 19:08:13 +00:00
|
|
|
PROTECT(t, visitor.trace);
|
|
|
|
|
|
|
|
collect(t, Heap::MinorCollection);
|
|
|
|
}
|
|
|
|
|
2010-09-27 15:39:44 +00:00
|
|
|
return visitor.trace ? visitor.trace : makeObjectArray(t, 0);
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2014-02-25 22:46:35 +00:00
|
|
|
virtual void initialize(BootImage* image, Slice<uint8_t> code)
|
|
|
|
{
|
2009-06-01 03:16:58 +00:00
|
|
|
bootImage = image;
|
2014-02-25 22:46:35 +00:00
|
|
|
codeAllocator.memory = code;
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void addCompilationHandler(CompilationHandler* handler)
|
|
|
|
{
|
2013-12-19 05:25:23 +00:00
|
|
|
compilationHandlers
|
|
|
|
= new (allocator->allocate(sizeof(CompilationHandlerList)))
|
|
|
|
CompilationHandlerList(compilationHandlers, handler);
|
2012-05-02 15:49:31 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual void compileMethod(Thread* vmt,
|
|
|
|
Zone* zone,
|
|
|
|
GcTriple** constants,
|
|
|
|
GcTriple** calls,
|
|
|
|
avian::codegen::DelayedPromise** addresses,
|
|
|
|
GcMethod* method,
|
|
|
|
OffsetResolver* resolver)
|
2008-11-23 23:58:01 +00:00
|
|
|
{
|
|
|
|
MyThread* t = static_cast<MyThread*>(vmt);
|
2014-06-29 04:57:07 +00:00
|
|
|
BootContext bootContext(t, *constants, *calls, *addresses, zone, resolver);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2009-04-05 21:42:10 +00:00
|
|
|
compile(t, &codeAllocator, &bootContext, method);
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
*constants = bootContext.constants;
|
|
|
|
*calls = bootContext.calls;
|
2008-12-02 16:45:20 +00:00
|
|
|
*addresses = bootContext.addresses;
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void visitRoots(Thread* t, HeapWalker* w)
|
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
bootImage->methodTree = w->visitRoot(compileRoots(t)->methodTree());
|
|
|
|
bootImage->methodTreeSentinal
|
|
|
|
= w->visitRoot(compileRoots(t)->methodTreeSentinal());
|
|
|
|
bootImage->virtualThunks = w->visitRoot(compileRoots(t)->virtualThunks());
|
2008-11-28 22:02:45 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void normalizeVirtualThunks(Thread* t)
|
|
|
|
{
|
2014-06-22 02:54:51 +00:00
|
|
|
GcWordArray* a = compileRoots(t)->virtualThunks();
|
2014-07-11 15:47:57 +00:00
|
|
|
for (unsigned i = 0; i < a->length(); i += 2) {
|
2014-06-29 04:57:07 +00:00
|
|
|
if (a->body()[i]) {
|
|
|
|
a->body()[i]
|
2014-02-25 22:46:35 +00:00
|
|
|
-= reinterpret_cast<uintptr_t>(codeAllocator.memory.begin());
|
2011-09-01 03:18:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual unsigned* makeCallTable(Thread* t, HeapWalker* w)
|
|
|
|
{
|
2009-06-01 03:16:58 +00:00
|
|
|
bootImage->codeSize = codeAllocator.offset;
|
|
|
|
bootImage->callCount = callTableSize;
|
2008-11-28 22:02:45 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned* table = static_cast<unsigned*>(
|
|
|
|
t->m->heap->allocate(callTableSize * sizeof(unsigned) * 2));
|
2008-11-29 23:08:14 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned index = 0;
|
2014-06-22 02:54:51 +00:00
|
|
|
GcArray* callTable = compileRoots(t)->callTable();
|
2014-06-29 04:57:07 +00:00
|
|
|
for (unsigned i = 0; i < callTable->length(); ++i) {
|
2014-07-11 15:47:57 +00:00
|
|
|
for (GcCallNode* p = cast<GcCallNode>(t, callTable->body()[i]); p;
|
|
|
|
p = p->next()) {
|
|
|
|
table[index++]
|
|
|
|
= targetVW(p->address() - reinterpret_cast<uintptr_t>(
|
|
|
|
codeAllocator.memory.begin()));
|
2014-02-25 22:46:35 +00:00
|
|
|
table[index++] = targetVW(
|
2014-07-02 21:11:27 +00:00
|
|
|
w->map()->find(p->target())
|
2014-06-29 03:08:10 +00:00
|
|
|
| (static_cast<unsigned>(p->flags()) << TargetBootShift));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
2008-11-29 23:08:14 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
return table;
|
|
|
|
}
|
2008-11-28 22:02:45 +00:00
|
|
|
|
2014-02-25 22:46:35 +00:00
|
|
|
virtual void boot(Thread* t, BootImage* image, uint8_t* code)
|
|
|
|
{
|
2014-09-22 17:10:35 +00:00
|
|
|
#ifndef AVIAN_AOT_ONLY
|
2014-02-25 22:46:35 +00:00
|
|
|
if (codeAllocator.memory.begin() == 0) {
|
2014-05-21 00:37:25 +00:00
|
|
|
codeAllocator.memory = Memory::allocate(ExecutableAreaSizeInBytes,
|
|
|
|
Memory::ReadWriteExecute);
|
2014-04-12 01:52:53 +00:00
|
|
|
|
2014-05-21 00:37:25 +00:00
|
|
|
expect(t, codeAllocator.memory.begin());
|
2009-06-01 03:16:58 +00:00
|
|
|
}
|
2013-01-28 18:51:35 +00:00
|
|
|
#endif
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
if (image and code) {
|
|
|
|
local::boot(static_cast<MyThread*>(t), image, code);
|
2008-12-02 02:38:00 +00:00
|
|
|
} else {
|
2014-06-22 02:54:51 +00:00
|
|
|
roots = makeCompileRoots(t, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
{
|
|
|
|
GcArray* ct = makeArray(t, 128);
|
|
|
|
// sequence point, for gc (don't recombine statements)
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setCallTable(t, ct);
|
2014-06-22 02:54:51 +00:00
|
|
|
}
|
2014-02-25 22:46:35 +00:00
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
GcTreeNode* tree = makeTreeNode(t, 0, 0, 0);
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setMethodTreeSentinal(t, tree);
|
|
|
|
compileRoots(t)->setMethodTree(t, tree);
|
|
|
|
tree->setLeft(t, tree);
|
|
|
|
tree->setRight(t, tree);
|
2011-09-30 19:17:28 +00:00
|
|
|
}
|
2009-10-18 00:18:03 +00:00
|
|
|
|
2013-01-29 17:23:22 +00:00
|
|
|
#ifdef AVIAN_AOT_ONLY
|
|
|
|
thunks = bootThunks;
|
|
|
|
#else
|
2011-10-02 00:11:02 +00:00
|
|
|
local::compileThunks(static_cast<MyThread*>(t), &codeAllocator);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
if (not(image and code)) {
|
2011-10-03 14:05:49 +00:00
|
|
|
bootThunks = thunks;
|
|
|
|
}
|
2013-01-29 17:23:22 +00:00
|
|
|
#endif
|
2011-10-03 14:05:49 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
segFaultHandler.m = t->m;
|
2014-07-11 15:50:18 +00:00
|
|
|
expect(
|
|
|
|
t,
|
|
|
|
signals.registerHandler(SignalRegistrar::SegFault, &segFaultHandler));
|
2010-12-20 00:47:21 +00:00
|
|
|
|
|
|
|
divideByZeroHandler.m = t->m;
|
2014-07-11 15:50:18 +00:00
|
|
|
expect(t,
|
|
|
|
signals.registerHandler(SignalRegistrar::DivideByZero,
|
|
|
|
÷ByZeroHandler));
|
2008-11-23 23:58:01 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void callWithCurrentContinuation(Thread* t, object receiver)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::callWithCurrentContinuation(static_cast<MyThread*>(t), receiver);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
2009-05-26 05:27:10 +00:00
|
|
|
abort(t);
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void dynamicWind(Thread* t, object before, object thunk, object after)
|
2009-05-03 20:57:11 +00:00
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2009-08-27 00:26:44 +00:00
|
|
|
local::dynamicWind(static_cast<MyThread*>(t), before, thunk, after);
|
2009-05-23 22:15:06 +00:00
|
|
|
} else {
|
2009-05-26 05:27:10 +00:00
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual void feedResultToContinuation(Thread* t,
|
|
|
|
GcContinuation* continuation,
|
2009-05-23 22:15:06 +00:00
|
|
|
object result)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2014-06-29 03:50:32 +00:00
|
|
|
callContinuation(static_cast<MyThread*>(t), continuation, result, 0);
|
2009-05-26 05:27:10 +00:00
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-23 22:15:06 +00:00
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
virtual void feedExceptionToContinuation(Thread* t,
|
|
|
|
GcContinuation* continuation,
|
2014-06-28 19:16:26 +00:00
|
|
|
GcThrowable* exception)
|
2009-05-23 22:15:06 +00:00
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2014-06-29 04:57:07 +00:00
|
|
|
callContinuation(static_cast<MyThread*>(t), continuation, 0, exception);
|
2009-05-26 05:27:10 +00:00
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-06 00:29:05 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
virtual void walkContinuationBody(Thread* t,
|
|
|
|
Heap::Walker* w,
|
|
|
|
object o,
|
2009-05-03 20:57:11 +00:00
|
|
|
unsigned start)
|
|
|
|
{
|
2009-05-26 05:27:10 +00:00
|
|
|
if (Continuations) {
|
2014-07-11 15:47:57 +00:00
|
|
|
local::walkContinuationBody(
|
|
|
|
static_cast<MyThread*>(t), w, cast<GcContinuation>(t, o), start);
|
2009-05-26 05:27:10 +00:00
|
|
|
} else {
|
|
|
|
abort(t);
|
|
|
|
}
|
2009-05-03 20:57:11 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2007-09-24 01:39:03 +00:00
|
|
|
System* s;
|
2014-02-22 00:06:17 +00:00
|
|
|
SignalRegistrar signals;
|
2008-01-13 22:05:08 +00:00
|
|
|
Allocator* allocator;
|
2014-06-22 02:54:51 +00:00
|
|
|
GcCompileRoots* roots;
|
2009-10-10 23:46:43 +00:00
|
|
|
BootImage* bootImage;
|
2011-09-20 22:30:30 +00:00
|
|
|
uintptr_t* heapImage;
|
|
|
|
uint8_t* codeImage;
|
|
|
|
unsigned codeImageSize;
|
2010-12-20 00:47:21 +00:00
|
|
|
SignalHandler segFaultHandler;
|
|
|
|
SignalHandler divideByZeroHandler;
|
2009-04-05 21:42:10 +00:00
|
|
|
FixedAllocator codeAllocator;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
ThunkCollection thunks;
|
2011-10-02 00:11:02 +00:00
|
|
|
ThunkCollection bootThunks;
|
2009-10-10 23:46:43 +00:00
|
|
|
unsigned callTableSize;
|
|
|
|
bool useNativeFeatures;
|
2011-09-20 22:30:30 +00:00
|
|
|
void* thunkTable[dummyIndex + 1];
|
2012-05-02 15:49:31 +00:00
|
|
|
CompilationHandlerList* compilationHandlers;
|
2007-09-24 01:39:03 +00:00
|
|
|
};
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
const char* stringOrNull(const char* str)
|
|
|
|
{
|
|
|
|
if (str) {
|
2012-05-07 16:00:59 +00:00
|
|
|
return str;
|
|
|
|
} else {
|
|
|
|
return "(null)";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
size_t stringOrNullSize(const char* str)
|
|
|
|
{
|
2012-05-07 16:00:59 +00:00
|
|
|
return strlen(stringOrNull(str));
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void logCompile(MyThread* t,
|
|
|
|
const void* code,
|
|
|
|
unsigned size,
|
|
|
|
const char* class_,
|
|
|
|
const char* name,
|
|
|
|
const char* spec)
|
2012-05-02 15:49:31 +00:00
|
|
|
{
|
|
|
|
static bool open = false;
|
|
|
|
if (not open) {
|
|
|
|
open = true;
|
|
|
|
const char* path = findProperty(t, "avian.jit.log");
|
|
|
|
if (path) {
|
|
|
|
compileLog = vm::fopen(path, "wb");
|
|
|
|
} else if (DebugCompile) {
|
|
|
|
compileLog = stderr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (compileLog) {
|
2014-07-11 15:50:18 +00:00
|
|
|
fprintf(compileLog,
|
|
|
|
"%p,%p %s.%s%s\n",
|
|
|
|
code,
|
|
|
|
static_cast<const uint8_t*>(code) + size,
|
|
|
|
class_,
|
|
|
|
name,
|
|
|
|
spec);
|
2012-05-02 15:49:31 +00:00
|
|
|
}
|
|
|
|
|
2013-12-19 05:25:23 +00:00
|
|
|
size_t nameLength = stringOrNullSize(class_) + stringOrNullSize(name)
|
|
|
|
+ stringOrNullSize(spec) + 2;
|
2012-05-07 16:00:59 +00:00
|
|
|
|
|
|
|
THREAD_RUNTIME_ARRAY(t, char, completeName, nameLength);
|
|
|
|
|
2013-12-19 05:25:23 +00:00
|
|
|
sprintf(RUNTIME_ARRAY_BODY(completeName),
|
|
|
|
"%s.%s%s",
|
|
|
|
stringOrNull(class_),
|
|
|
|
stringOrNull(name),
|
|
|
|
stringOrNull(spec));
|
2012-05-07 16:00:59 +00:00
|
|
|
|
2012-05-02 15:49:31 +00:00
|
|
|
MyProcessor* p = static_cast<MyProcessor*>(t->m->processor);
|
2014-07-11 15:50:18 +00:00
|
|
|
for (CompilationHandlerList* h = p->compilationHandlers; h; h = h->next) {
|
2012-05-07 16:00:59 +00:00
|
|
|
h->handler->compiled(code, 0, 0, RUNTIME_ARRAY_BODY(completeName));
|
2012-05-02 15:49:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void* compileMethod2(MyThread* t, void* ip)
|
2011-09-20 22:30:30 +00:00
|
|
|
{
|
2014-06-29 03:08:10 +00:00
|
|
|
GcCallNode* node = findCallNode(t, ip);
|
|
|
|
GcMethod* target = node->target();
|
2011-09-20 22:30:30 +00:00
|
|
|
|
|
|
|
PROTECT(t, node);
|
|
|
|
PROTECT(t, target);
|
|
|
|
|
|
|
|
t->trace->targetMethod = target;
|
|
|
|
|
|
|
|
THREAD_RESOURCE0(t, static_cast<MyThread*>(t)->trace->targetMethod = 0);
|
|
|
|
|
|
|
|
compile(t, codeAllocator(t), 0, target);
|
|
|
|
|
2011-10-03 14:05:49 +00:00
|
|
|
uint8_t* updateIp = static_cast<uint8_t*>(ip);
|
|
|
|
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
|
|
|
bool updateCaller = updateIp < p->codeImage
|
2014-07-11 15:50:18 +00:00
|
|
|
or updateIp >= p->codeImage + p->codeImageSize;
|
2011-10-03 14:05:49 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
uintptr_t address;
|
2014-05-29 04:17:25 +00:00
|
|
|
if (target->flags() & ACC_NATIVE) {
|
2011-10-03 14:05:49 +00:00
|
|
|
address = useLongJump(t, reinterpret_cast<uintptr_t>(ip))
|
2014-07-11 15:50:18 +00:00
|
|
|
or (not updateCaller)
|
|
|
|
? bootNativeThunk(t)
|
|
|
|
: nativeThunk(t);
|
2011-09-20 22:30:30 +00:00
|
|
|
} else {
|
|
|
|
address = methodAddress(t, target);
|
|
|
|
}
|
|
|
|
|
2011-10-03 14:05:49 +00:00
|
|
|
if (updateCaller) {
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::lir::UnaryOperation op;
|
2014-06-29 03:08:10 +00:00
|
|
|
if (node->flags() & TraceElement::LongCall) {
|
|
|
|
if (node->flags() & TraceElement::TailCall) {
|
2013-02-11 15:07:46 +00:00
|
|
|
op = avian::codegen::lir::AlignedLongJump;
|
2011-09-20 22:30:30 +00:00
|
|
|
} else {
|
2013-02-11 15:07:46 +00:00
|
|
|
op = avian::codegen::lir::AlignedLongCall;
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
2014-06-29 03:08:10 +00:00
|
|
|
} else if (node->flags() & TraceElement::TailCall) {
|
2013-02-11 15:07:46 +00:00
|
|
|
op = avian::codegen::lir::AlignedJump;
|
2011-09-20 22:30:30 +00:00
|
|
|
} else {
|
2013-02-11 15:07:46 +00:00
|
|
|
op = avian::codegen::lir::AlignedCall;
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
updateCall(t, op, updateIp, reinterpret_cast<void*>(address));
|
|
|
|
}
|
|
|
|
|
|
|
|
return reinterpret_cast<void*>(address);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isThunk(MyProcessor::ThunkCollection* thunks, void* ip)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
2010-06-17 02:29:41 +00:00
|
|
|
uint8_t* thunkStart = thunks->default_.start;
|
2014-07-11 15:50:18 +00:00
|
|
|
uint8_t* thunkEnd = thunks->table.start + (thunks->table.length * ThunkCount);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
|
|
|
return (reinterpret_cast<uintptr_t>(ip)
|
|
|
|
>= reinterpret_cast<uintptr_t>(thunkStart)
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
2014-07-11 15:50:18 +00:00
|
|
|
< reinterpret_cast<uintptr_t>(thunkEnd));
|
2010-06-17 02:29:41 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isThunk(MyThread* t, void* ip)
|
2010-06-17 02:29:41 +00:00
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
return isThunk(&(p->thunks), ip) or isThunk(&(p->bootThunks), ip);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isThunkUnsafeStack(MyProcessor::Thunk* thunk, void* ip)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(ip)
|
2014-07-11 15:50:18 +00:00
|
|
|
>= reinterpret_cast<uintptr_t>(thunk->start)
|
|
|
|
and reinterpret_cast<uintptr_t>(ip)
|
|
|
|
< reinterpret_cast<uintptr_t>(thunk->start
|
|
|
|
+ thunk->frameSavedOffset);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isThunkUnsafeStack(MyProcessor::ThunkCollection* thunks, void* ip)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
2010-12-19 22:23:19 +00:00
|
|
|
const unsigned NamedThunkCount = 5;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
MyProcessor::Thunk table[NamedThunkCount + ThunkCount];
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
table[0] = thunks->default_;
|
|
|
|
table[1] = thunks->defaultVirtual;
|
|
|
|
table[2] = thunks->native;
|
|
|
|
table[3] = thunks->aioob;
|
2010-12-19 22:23:19 +00:00
|
|
|
table[4] = thunks->stackOverflow;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
for (unsigned i = 0; i < ThunkCount; ++i) {
|
2014-07-11 15:50:18 +00:00
|
|
|
new (table + NamedThunkCount + i)
|
|
|
|
MyProcessor::Thunk(thunks->table.start + (i * thunks->table.length),
|
|
|
|
thunks->table.frameSavedOffset,
|
|
|
|
thunks->table.length);
|
2010-06-17 02:29:41 +00:00
|
|
|
}
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
|
2010-06-17 02:29:41 +00:00
|
|
|
for (unsigned i = 0; i < NamedThunkCount + ThunkCount; ++i) {
|
|
|
|
if (isThunkUnsafeStack(table + i, ip)) {
|
|
|
|
return true;
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isVirtualThunk(MyThread* t, void* ip)
|
2010-07-06 22:13:11 +00:00
|
|
|
{
|
2014-06-22 02:54:51 +00:00
|
|
|
GcWordArray* a = compileRoots(t)->virtualThunks();
|
2014-07-11 15:47:57 +00:00
|
|
|
for (unsigned i = 0; i < a->length(); i += 2) {
|
2014-06-29 04:57:07 +00:00
|
|
|
uintptr_t start = a->body()[i];
|
|
|
|
uintptr_t end = start + a->body()[i + 1];
|
2010-07-06 22:13:11 +00:00
|
|
|
|
|
|
|
if (reinterpret_cast<uintptr_t>(ip) >= start
|
2014-07-11 15:50:18 +00:00
|
|
|
and reinterpret_cast<uintptr_t>(ip) < end) {
|
2010-07-06 22:13:11 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool isThunkUnsafeStack(MyThread* t, void* ip)
|
2010-06-17 02:29:41 +00:00
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
return isThunk(t, ip) and (isThunkUnsafeStack(&(p->thunks), ip)
|
|
|
|
or isThunkUnsafeStack(&(p->bootThunks), ip));
|
2010-06-17 02:29:41 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcCallNode* findCallNode(MyThread* t, void* address)
|
2008-05-31 22:14:27 +00:00
|
|
|
{
|
2008-12-02 02:38:00 +00:00
|
|
|
if (DebugCallTable) {
|
|
|
|
fprintf(stderr, "find call node %p\n", address);
|
|
|
|
}
|
|
|
|
|
2009-03-03 01:40:06 +00:00
|
|
|
// we must use a version of the call table at least as recent as the
|
|
|
|
// compiled form of the method containing the specified address (see
|
|
|
|
// compile(MyThread*, Allocator*, BootContext*, object)):
|
2009-11-30 15:38:16 +00:00
|
|
|
loadMemoryBarrier();
|
2009-03-03 01:40:06 +00:00
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
GcArray* table = compileRoots(t)->callTable();
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
intptr_t key = reinterpret_cast<intptr_t>(address);
|
2014-06-29 04:57:07 +00:00
|
|
|
unsigned index = static_cast<uintptr_t>(key) & (table->length() - 1);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
for (GcCallNode* n = cast<GcCallNode>(t, table->body()[index]); n;
|
|
|
|
n = n->next()) {
|
2014-06-29 03:08:10 +00:00
|
|
|
intptr_t k = n->address();
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
if (k == key) {
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcArray* resizeTable(MyThread* t, GcArray* oldTable, unsigned newLength)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
|
|
|
PROTECT(t, oldTable);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-06-29 03:08:10 +00:00
|
|
|
GcCallNode* oldNode = 0;
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, oldNode);
|
2008-08-16 18:46:14 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
GcArray* newTable = makeArray(t, newLength);
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, newTable);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
for (unsigned i = 0; i < oldTable->length(); ++i) {
|
2014-07-11 15:47:57 +00:00
|
|
|
for (oldNode = cast<GcCallNode>(t, oldTable->body()[i]); oldNode;
|
|
|
|
oldNode = oldNode->next()) {
|
2014-06-29 03:08:10 +00:00
|
|
|
intptr_t k = oldNode->address();
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned index = k & (newLength - 1);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcCallNode* newNode
|
|
|
|
= makeCallNode(t,
|
|
|
|
oldNode->address(),
|
|
|
|
oldNode->target(),
|
|
|
|
oldNode->flags(),
|
|
|
|
cast<GcCallNode>(t, newTable->body()[index]));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-07-02 21:11:27 +00:00
|
|
|
newTable->setBodyElement(t, index, newNode);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return newTable;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcArray* insertCallNode(MyThread* t,
|
|
|
|
GcArray* table,
|
|
|
|
unsigned* size,
|
|
|
|
GcCallNode* node)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
|
|
|
if (DebugCallTable) {
|
2014-07-11 15:47:57 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
"insert call node %p\n",
|
2014-06-29 04:57:07 +00:00
|
|
|
reinterpret_cast<void*>(node->address()));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
PROTECT(t, table);
|
|
|
|
PROTECT(t, node);
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
++(*size);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (*size >= table->length() * 2) {
|
|
|
|
table = resizeTable(t, table, table->length() * 2);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
intptr_t key = node->address();
|
|
|
|
unsigned index = static_cast<uintptr_t>(key) & (table->length() - 1);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-06-25 20:38:13 +00:00
|
|
|
node->setNext(t, cast<GcCallNode>(t, table->body()[index]));
|
2014-07-02 21:11:27 +00:00
|
|
|
table->setBodyElement(t, index, node);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcHashMap* makeClassMap(Thread* t,
|
|
|
|
unsigned* table,
|
|
|
|
unsigned count,
|
|
|
|
uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2014-06-28 20:41:27 +00:00
|
|
|
GcArray* array = makeArray(t, nextPowerOfTwo(count));
|
2014-05-29 04:17:25 +00:00
|
|
|
GcHashMap* map = makeHashMap(t, 0, array);
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, map);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcClass* c = cast<GcClass>(t, bootObject(heap, table[i]));
|
2014-07-02 21:11:27 +00:00
|
|
|
hashMapInsert(t, map, c->name(), c, byteArrayHash);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcArray* makeStaticTableArray(Thread* t,
|
|
|
|
unsigned* bootTable,
|
|
|
|
unsigned bootCount,
|
|
|
|
unsigned* appTable,
|
|
|
|
unsigned appCount,
|
|
|
|
uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcArray* array = makeArray(t, bootCount + appCount);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
for (unsigned i = 0; i < bootCount; ++i) {
|
2014-07-11 15:47:57 +00:00
|
|
|
array->setBodyElement(
|
|
|
|
t, i, cast<GcClass>(t, bootObject(heap, bootTable[i]))->staticTable());
|
2010-09-14 16:49:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < appCount; ++i) {
|
2014-07-11 15:47:57 +00:00
|
|
|
array->setBodyElement(
|
|
|
|
t,
|
|
|
|
(bootCount + i),
|
2014-07-02 21:11:27 +00:00
|
|
|
cast<GcClass>(t, bootObject(heap, appTable[i]))->staticTable());
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcHashMap* makeStringMap(Thread* t,
|
|
|
|
unsigned* table,
|
|
|
|
unsigned count,
|
|
|
|
uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2014-06-28 20:41:27 +00:00
|
|
|
GcArray* array = makeArray(t, nextPowerOfTwo(count));
|
2014-06-25 20:38:13 +00:00
|
|
|
GcHashMap* map = makeWeakHashMap(t, 0, array)->as<GcHashMap>(t);
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, map);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
|
|
|
object s = bootObject(heap, table[i]);
|
2014-06-25 20:38:13 +00:00
|
|
|
hashMapInsert(t, map, s, 0, stringHash);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcArray* makeCallTable(MyThread* t,
|
|
|
|
uintptr_t* heap,
|
|
|
|
unsigned* calls,
|
|
|
|
unsigned count,
|
|
|
|
uintptr_t base)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
GcArray* table = makeArray(t, nextPowerOfTwo(count));
|
2008-12-02 02:38:00 +00:00
|
|
|
PROTECT(t, table);
|
|
|
|
|
|
|
|
unsigned size = 0;
|
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
|
|
|
unsigned address = calls[i * 2];
|
|
|
|
unsigned target = calls[(i * 2) + 1];
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcCallNode* node
|
|
|
|
= makeCallNode(t,
|
|
|
|
base + address,
|
|
|
|
cast<GcMethod>(t, bootObject(heap, target & BootMask)),
|
|
|
|
target >> BootShift,
|
|
|
|
0);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
|
|
|
table = insertCallNode(t, table, &size, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void fixupHeap(MyThread* t UNUSED,
|
|
|
|
uintptr_t* map,
|
|
|
|
unsigned size,
|
|
|
|
uintptr_t* heap)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
|
|
|
for (unsigned word = 0; word < size; ++word) {
|
|
|
|
uintptr_t w = map[word];
|
|
|
|
if (w) {
|
|
|
|
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
|
|
|
|
if (w & (static_cast<uintptr_t>(1) << bit)) {
|
|
|
|
unsigned index = indexOf(word, bit);
|
2011-09-01 03:18:00 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t* p = heap + index;
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, *p);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t number = *p & BootMask;
|
|
|
|
uintptr_t mark = *p >> BootShift;
|
|
|
|
|
|
|
|
if (number) {
|
|
|
|
*p = reinterpret_cast<uintptr_t>(heap + (number - 1)) | mark;
|
2014-07-12 16:16:03 +00:00
|
|
|
if (false) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"fixup %d: %d 0x%x\n",
|
|
|
|
index,
|
|
|
|
static_cast<unsigned>(number),
|
|
|
|
static_cast<unsigned>(*p));
|
|
|
|
}
|
2008-12-02 02:38:00 +00:00
|
|
|
} else {
|
|
|
|
*p = mark;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void resetClassRuntimeState(Thread* t,
|
|
|
|
GcClass* c,
|
|
|
|
uintptr_t* heap,
|
|
|
|
unsigned heapSize)
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
{
|
2014-06-29 04:57:07 +00:00
|
|
|
c->runtimeDataIndex() = 0;
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (c->arrayElementSize() == 0) {
|
|
|
|
GcSingleton* staticTable = c->staticTable()->as<GcSingleton>(t);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
if (staticTable) {
|
|
|
|
for (unsigned i = 0; i < singletonCount(t, staticTable); ++i) {
|
|
|
|
if (singletonIsObject(t, staticTable, i)
|
2014-07-11 15:50:18 +00:00
|
|
|
and (reinterpret_cast<uintptr_t*>(
|
|
|
|
singletonObject(t, staticTable, i)) < heap
|
|
|
|
or reinterpret_cast<uintptr_t*>(singletonObject(
|
|
|
|
t, staticTable, i)) > heap + heapSize)) {
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
singletonObject(t, staticTable, i) = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (GcArray* mtable = cast<GcArray>(t, c->methodTable())) {
|
|
|
|
PROTECT(t, mtable);
|
|
|
|
for (unsigned i = 0; i < mtable->length(); ++i) {
|
|
|
|
GcMethod* m = cast<GcMethod>(t, mtable->body()[i]);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
m->nativeID() = 0;
|
|
|
|
m->runtimeDataIndex() = 0;
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (m->vmFlags() & ClassInitFlag) {
|
|
|
|
c->vmFlags() |= NeedInitFlag;
|
|
|
|
c->vmFlags() &= ~InitErrorFlag;
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
t->m->processor->initVtable(t, c);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void resetRuntimeState(Thread* t,
|
|
|
|
GcHashMap* map,
|
|
|
|
uintptr_t* heap,
|
|
|
|
unsigned heapSize)
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
{
|
|
|
|
for (HashMapIterator it(t, map); it.hasMore();) {
|
2014-07-11 15:47:57 +00:00
|
|
|
resetClassRuntimeState(
|
|
|
|
t, cast<GcClass>(t, it.next()->second()), heap, heapSize);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void fixupMethods(Thread* t,
|
|
|
|
GcHashMap* map,
|
|
|
|
BootImage* image UNUSED,
|
|
|
|
uint8_t* code)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2010-09-14 16:49:41 +00:00
|
|
|
for (HashMapIterator it(t, map); it.hasMore();) {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcClass* c = cast<GcClass>(t, it.next()->second());
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (GcArray* mtable = cast<GcArray>(t, c->methodTable())) {
|
|
|
|
PROTECT(t, mtable);
|
|
|
|
for (unsigned i = 0; i < mtable->length(); ++i) {
|
|
|
|
GcMethod* method = cast<GcMethod>(t, mtable->body()[i]);
|
2014-05-29 04:17:25 +00:00
|
|
|
if (method->code()) {
|
2014-07-11 15:47:57 +00:00
|
|
|
assertT(t,
|
|
|
|
methodCompiled(t, method)
|
|
|
|
<= static_cast<int32_t>(image->codeSize));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
method->code()->compiled() = methodCompiled(t, method)
|
|
|
|
+ reinterpret_cast<uintptr_t>(code);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-11-26 19:41:31 +00:00
|
|
|
if (DebugCompile) {
|
2014-07-11 15:47:57 +00:00
|
|
|
logCompile(static_cast<MyThread*>(t),
|
|
|
|
reinterpret_cast<uint8_t*>(methodCompiled(t, method)),
|
|
|
|
methodCompiledSize(t, method),
|
|
|
|
reinterpret_cast<char*>(
|
|
|
|
method->class_()->name()->body().begin()),
|
|
|
|
reinterpret_cast<char*>(method->name()->body().begin()),
|
|
|
|
reinterpret_cast<char*>(method->spec()->body().begin()));
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
t->m->processor->initVtable(t, c);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
MyProcessor::Thunk thunkToThunk(const BootImage::Thunk& thunk, uint8_t* base)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
return MyProcessor::Thunk(
|
|
|
|
base + thunk.start, thunk.frameSavedOffset, thunk.length);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void findThunks(MyThread* t, BootImage* image, uint8_t* code)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2011-10-02 00:11:02 +00:00
|
|
|
p->bootThunks.default_ = thunkToThunk(image->thunks.default_, code);
|
|
|
|
p->bootThunks.defaultVirtual
|
2014-07-11 15:50:18 +00:00
|
|
|
= thunkToThunk(image->thunks.defaultVirtual, code);
|
2011-10-02 00:11:02 +00:00
|
|
|
p->bootThunks.native = thunkToThunk(image->thunks.native, code);
|
|
|
|
p->bootThunks.aioob = thunkToThunk(image->thunks.aioob, code);
|
2014-07-11 15:50:18 +00:00
|
|
|
p->bootThunks.stackOverflow = thunkToThunk(image->thunks.stackOverflow, code);
|
2011-10-02 00:11:02 +00:00
|
|
|
p->bootThunks.table = thunkToThunk(image->thunks.table, code);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void fixupVirtualThunks(MyThread* t, uint8_t* code)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2014-06-22 02:54:51 +00:00
|
|
|
GcWordArray* a = compileRoots(t)->virtualThunks();
|
2014-07-11 15:47:57 +00:00
|
|
|
for (unsigned i = 0; i < a->length(); i += 2) {
|
2014-06-29 04:57:07 +00:00
|
|
|
if (a->body()[i]) {
|
|
|
|
a->body()[i] += reinterpret_cast<uintptr_t>(code);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void boot(MyThread* t, BootImage* image, uint8_t* code)
|
2008-12-02 02:38:00 +00:00
|
|
|
{
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, image->magic == BootImage::Magic);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2010-09-14 16:49:41 +00:00
|
|
|
unsigned* bootClassTable = reinterpret_cast<unsigned*>(image + 1);
|
|
|
|
unsigned* appClassTable = bootClassTable + image->bootClassCount;
|
|
|
|
unsigned* stringTable = appClassTable + image->appClassCount;
|
2008-12-02 02:38:00 +00:00
|
|
|
unsigned* callTable = stringTable + image->stringCount;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t* heapMap = reinterpret_cast<uintptr_t*>(
|
|
|
|
padWord(reinterpret_cast<uintptr_t>(callTable + (image->callCount * 2))));
|
2011-09-20 22:30:30 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
unsigned heapMapSizeInWords
|
|
|
|
= ceilingDivide(heapMapSize(image->heapSize), BytesPerWord);
|
2008-12-02 02:38:00 +00:00
|
|
|
uintptr_t* heap = heapMap + heapMapSizeInWords;
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
MyProcessor* p = static_cast<MyProcessor*>(t->m->processor);
|
|
|
|
|
|
|
|
t->heapImage = p->heapImage = heap;
|
|
|
|
|
2014-07-12 16:16:03 +00:00
|
|
|
if (false) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"heap from %p to %p\n",
|
|
|
|
heap,
|
|
|
|
heap + ceilingDivide(image->heapSize, BytesPerWord));
|
|
|
|
}
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
t->codeImage = p->codeImage = code;
|
|
|
|
p->codeImageSize = image->codeSize;
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-07-12 16:16:03 +00:00
|
|
|
if (false) {
|
|
|
|
fprintf(stderr, "code from %p to %p\n", code, code + image->codeSize);
|
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2012-06-02 15:06:22 +00:00
|
|
|
if (not image->initialized) {
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
fixupHeap(t, heapMap, heapMapSizeInWords, heap);
|
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2008-12-02 02:38:00 +00:00
|
|
|
t->m->heap->setImmortalHeap(heap, image->heapSize / BytesPerWord);
|
|
|
|
|
2014-06-28 23:24:24 +00:00
|
|
|
t->m->types = reinterpret_cast<GcArray*>(bootObject(heap, image->types));
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2014-07-11 19:47:43 +00:00
|
|
|
t->m->roots = GcRoots::makeZeroed(t);
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
roots(t)->setBootLoader(
|
|
|
|
t, cast<GcClassLoader>(t, bootObject(heap, image->bootLoader)));
|
|
|
|
roots(t)->setAppLoader(
|
|
|
|
t, cast<GcClassLoader>(t, bootObject(heap, image->appLoader)));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-07-11 19:47:43 +00:00
|
|
|
p->roots = GcCompileRoots::makeZeroed(t);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
compileRoots(t)->setMethodTree(
|
|
|
|
t, cast<GcTreeNode>(t, bootObject(heap, image->methodTree)));
|
|
|
|
compileRoots(t)->setMethodTreeSentinal(
|
|
|
|
t, cast<GcTreeNode>(t, bootObject(heap, image->methodTreeSentinal)));
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
compileRoots(t)->setVirtualThunks(
|
|
|
|
t, cast<GcWordArray>(t, bootObject(heap, image->virtualThunks)));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2014-06-30 01:44:41 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
GcHashMap* map
|
|
|
|
= makeClassMap(t, bootClassTable, image->bootClassCount, heap);
|
2014-06-30 01:44:41 +00:00
|
|
|
// sequence point, for gc (don't recombine statements)
|
2014-07-02 21:11:27 +00:00
|
|
|
roots(t)->bootLoader()->setMap(t, map);
|
2010-11-26 19:41:31 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
roots(t)->bootLoader()->as<GcSystemClassLoader>(t)->finder()
|
|
|
|
= t->m->bootFinder;
|
2010-11-26 19:41:31 +00:00
|
|
|
|
2014-06-30 01:44:41 +00:00
|
|
|
{
|
|
|
|
GcHashMap* map = makeClassMap(t, appClassTable, image->appClassCount, heap);
|
|
|
|
// sequence point, for gc (don't recombine statements)
|
2014-07-02 21:11:27 +00:00
|
|
|
roots(t)->appLoader()->setMap(t, map);
|
2010-11-26 19:41:31 +00:00
|
|
|
}
|
2009-08-10 13:56:16 +00:00
|
|
|
|
2014-06-30 01:44:41 +00:00
|
|
|
roots(t)->appLoader()->as<GcSystemClassLoader>(t)->finder() = t->m->appFinder;
|
2010-09-14 16:49:41 +00:00
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
{
|
2014-06-25 20:38:13 +00:00
|
|
|
GcHashMap* map = makeStringMap(t, stringTable, image->stringCount, heap);
|
2014-06-22 02:54:51 +00:00
|
|
|
// sequence point, for gc (don't recombine statements)
|
2014-06-25 20:38:13 +00:00
|
|
|
roots(t)->setStringMap(t, map);
|
2014-06-22 02:54:51 +00:00
|
|
|
}
|
2008-12-02 02:38:00 +00:00
|
|
|
|
|
|
|
p->callTableSize = image->callCount;
|
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
{
|
|
|
|
GcArray* ct = makeCallTable(t,
|
|
|
|
heap,
|
|
|
|
callTable,
|
|
|
|
image->callCount,
|
|
|
|
reinterpret_cast<uintptr_t>(code));
|
|
|
|
// sequence point, for gc (don't recombine statements)
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setCallTable(t, ct);
|
2014-06-22 02:54:51 +00:00
|
|
|
}
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
{
|
2014-07-11 15:47:57 +00:00
|
|
|
GcArray* staticTableArray = makeStaticTableArray(t,
|
|
|
|
bootClassTable,
|
|
|
|
image->bootClassCount,
|
|
|
|
appClassTable,
|
|
|
|
image->appClassCount,
|
|
|
|
heap);
|
2014-06-22 02:54:51 +00:00
|
|
|
// sequence point, for gc (don't recombine statements)
|
2014-07-02 21:11:27 +00:00
|
|
|
compileRoots(t)->setStaticTableArray(t, staticTableArray);
|
2014-06-22 02:54:51 +00:00
|
|
|
}
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
findThunks(t, image, code);
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2012-06-02 15:06:22 +00:00
|
|
|
if (image->initialized) {
|
2014-07-11 15:47:57 +00:00
|
|
|
resetRuntimeState(t,
|
|
|
|
cast<GcHashMap>(t, roots(t)->bootLoader()->map()),
|
|
|
|
heap,
|
|
|
|
image->heapSize);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
resetRuntimeState(t,
|
|
|
|
cast<GcHashMap>(t, roots(t)->appLoader()->map()),
|
|
|
|
heap,
|
|
|
|
image->heapSize);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
|
2014-06-28 23:24:24 +00:00
|
|
|
for (unsigned i = 0; i < t->m->types->length(); ++i) {
|
2014-07-11 15:47:57 +00:00
|
|
|
resetClassRuntimeState(
|
|
|
|
t, type(t, static_cast<Gc::Type>(i)), heap, image->heapSize);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fixupVirtualThunks(t, code);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
fixupMethods(
|
|
|
|
t, cast<GcHashMap>(t, roots(t)->bootLoader()->map()), image, code);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
fixupMethods(
|
|
|
|
t, cast<GcHashMap>(t, roots(t)->appLoader()->map()), image, code);
|
support multiple sequential VM instances with bootimage build
Until now, the bootimage build hasn't supported using the Java
invocation API to create a VM, destroy it, and create another in the
same process. Ideally, we would be able to create multiple VMs
simultaneously without any interference between them. In fact, Avian
is designed to support this for the most part, but there are a few
places we use global, mutable state which prevent this from working.
Most notably, the bootimage is modified in-place at runtime, so the
best we can do without extensive changes is to clean up the bootimage
when the VM is destroyed so it's ready for later instances. Hence
this commit.
Ultimately, we can move towards a fully reentrant VM by making the
bootimage immutable, but this will require some care to avoid
performance regressions. Another challenge is our Posix signal
handlers, which currently rely on a global handle to the VM, since you
can't, to my knowledge, pass a context pointer when registering a
signal handler. Thread local variables won't necessarily help, since
a thread might attatch to more than one VM at a time.
2011-11-10 20:10:53 +00:00
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2012-06-02 15:06:22 +00:00
|
|
|
image->initialized = true;
|
2008-12-02 02:38:00 +00:00
|
|
|
|
2014-06-30 01:44:41 +00:00
|
|
|
GcHashMap* map = makeHashMap(t, 0, 0);
|
|
|
|
// sequence point, for gc (don't recombine statements)
|
2014-06-25 20:38:13 +00:00
|
|
|
roots(t)->setBootstrapClassMap(t, map);
|
2008-12-02 02:38:00 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
intptr_t getThunk(MyThread* t, Thunk thunk)
|
2008-05-31 22:14:27 +00:00
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
return reinterpret_cast<intptr_t>(p->thunks.table.start
|
|
|
|
+ (thunk * p->thunks.table.length));
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
}
|
|
|
|
|
2014-09-22 17:10:35 +00:00
|
|
|
#ifndef AVIAN_AOT_ONLY
|
|
|
|
void insertCallNode(MyThread* t, GcCallNode* node)
|
|
|
|
{
|
|
|
|
GcArray* newArray = insertCallNode(
|
|
|
|
t, compileRoots(t)->callTable(), &(processor(t)->callTableSize), node);
|
|
|
|
// sequence point, for gc (don't recombine statements)
|
|
|
|
compileRoots(t)->setCallTable(t, newArray);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
BootImage::Thunk thunkToThunk(const MyProcessor::Thunk& thunk, uint8_t* base)
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
return BootImage::Thunk(
|
|
|
|
thunk.start - base, thunk.frameSavedOffset, thunk.length);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
using avian::codegen::OperandInfo;
|
|
|
|
namespace lir = avian::codegen::lir;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
|
2011-09-20 22:30:30 +00:00
|
|
|
{
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* a = c->assembler;
|
2011-09-20 22:30:30 +00:00
|
|
|
|
|
|
|
if (processor(t)->bootImage) {
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Memory table(t->arch->thread(), TARGET_THREAD_THUNKTABLE);
|
|
|
|
lir::Register scratch(t->arch->scratch());
|
|
|
|
a->apply(lir::Move,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &table),
|
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Memory proc(scratch.low, index * TargetBytesPerWord);
|
|
|
|
a->apply(lir::Move,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &proc),
|
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
|
2014-07-11 15:50:18 +00:00
|
|
|
a->apply(call ? lir::Call : lir::Jump,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
|
2011-09-20 22:30:30 +00:00
|
|
|
} else {
|
2014-07-11 15:50:18 +00:00
|
|
|
lir::Constant proc(new (&c->zone) avian::codegen::ResolvedPromise(
|
|
|
|
reinterpret_cast<intptr_t>(t->thunkTable[index])));
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
a->apply(call ? lir::LongCall : lir::LongJump,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &proc));
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
|
|
|
}
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
void compileThunks(MyThread* t, FixedAllocator* allocator)
|
2011-09-20 22:30:30 +00:00
|
|
|
{
|
|
|
|
MyProcessor* p = processor(t);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
Context context(t);
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* a = context.assembler;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2012-06-20 19:14:16 +00:00
|
|
|
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
|
2009-02-09 23:22:01 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.default_.frameSavedOffset = a->length();
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register thread(t->arch->thread());
|
2014-12-05 01:33:10 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, compileMethodIndex);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
a->popFrame(t->arch->alignFrameSize(1));
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register result(t->arch->returnLow());
|
|
|
|
a->apply(lir::Jump,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result));
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
p->thunks.default_.length = a->endBlock(false)->resolve(0, 0);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
p->thunks.default_.start
|
|
|
|
= finish(t, allocator, a, "default", p->thunks.default_.length);
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
Context context(t);
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* a = context.assembler;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register class_(t->arch->virtualCallTarget());
|
2014-07-11 15:50:18 +00:00
|
|
|
lir::Memory virtualCallTargetSrc(
|
|
|
|
t->arch->stack(),
|
|
|
|
(t->arch->frameFooterSize() + t->arch->frameReturnAddressSize())
|
|
|
|
* TargetBytesPerWord);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
a->apply(lir::Move,
|
2014-07-11 15:50:18 +00:00
|
|
|
OperandInfo(
|
2014-12-05 01:33:10 +00:00
|
|
|
TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetSrc),
|
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_));
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
lir::Memory virtualCallTargetDst(t->arch->thread(),
|
|
|
|
TARGET_THREAD_VIRTUALCALLTARGET);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
a->apply(
|
|
|
|
lir::Move,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_),
|
2014-07-11 15:50:18 +00:00
|
|
|
OperandInfo(
|
2014-12-05 01:33:10 +00:00
|
|
|
TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetDst));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register index(t->arch->virtualCallIndex());
|
2014-07-11 15:50:18 +00:00
|
|
|
lir::Memory virtualCallIndex(t->arch->thread(),
|
|
|
|
TARGET_THREAD_VIRTUALCALLINDEX);
|
2009-05-03 20:57:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
a->apply(
|
|
|
|
lir::Move,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &index),
|
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallIndex));
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2012-06-20 19:14:16 +00:00
|
|
|
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.defaultVirtual.frameSavedOffset = a->length();
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register thread(t->arch->thread());
|
2014-12-05 01:33:10 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, compileVirtualMethodIndex);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
support stack unwinding without using a frame pointer
Previously, we unwound the stack by following the chain of frame
pointers for normal returns, stack trace creation, and exception
unwinding. On x86, this required reserving EBP/RBP for frame pointer
duties, making it unavailable for general computation and requiring
that it be explicitly saved and restored on entry and exit,
respectively.
On PowerPC, we use an ABI that makes the stack pointer double as a
frame pointer, so it doesn't cost us anything. We've been using the
same convention on ARM, but it doesn't match the native calling
convention, which makes it unusable when we want to call native code
from Java and pass arguments on the stack.
So far, the ARM calling convention mismatch hasn't been an issue
because we've never passed more arguments from Java to native code
than would fit in registers. However, we must now pass an extra
argument (the thread pointer) to e.g. divideLong so it can throw an
exception on divide by zero, which means the last argument must be
passed on the stack. This will clobber the linkage area we've been
using to hold the frame pointer, so we need to stop using it.
One solution would be to use the same convention on ARM as we do on
x86, but this would introduce the same overhead of making a register
unavailable for general use and extra code at method entry and exit.
Instead, this commit removes the need for a frame pointer. Unwinding
involves consulting a map of instruction offsets to frame sizes which
is generated at compile time. This is necessary because stack trace
creation can happen at any time due to Thread.getStackTrace being
called by another thread, and the frame size varies during the
execution of a method.
So far, only x86(_64) is working, and continuations and tail call
optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
|
|
|
a->popFrame(t->arch->alignFrameSize(1));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register result(t->arch->returnLow());
|
|
|
|
a->apply(lir::Jump,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
p->thunks.defaultVirtual.length = a->endBlock(false)->resolve(0, 0);
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
p->thunks.defaultVirtual.start = finish(
|
|
|
|
t, allocator, a, "defaultVirtual", p->thunks.defaultVirtual.length);
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
Context context(t);
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* a = context.assembler;
|
2009-03-31 20:15:08 +00:00
|
|
|
|
2012-06-20 19:14:16 +00:00
|
|
|
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
|
2008-08-16 18:46:14 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.native.frameSavedOffset = a->length();
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register thread(t->arch->thread());
|
2014-12-05 01:33:10 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, invokeNativeIndex);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
a->popFrameAndUpdateStackAndReturn(t->arch->alignFrameSize(1),
|
|
|
|
TARGET_THREAD_NEWSTACK);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
p->thunks.native.length = a->endBlock(false)->resolve(0, 0);
|
2011-09-20 22:30:30 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
p->thunks.native.start
|
|
|
|
= finish(t, allocator, a, "native", p->thunks.native.length);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
Context context(t);
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* a = context.assembler;
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2012-06-20 19:14:16 +00:00
|
|
|
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
|
2008-08-16 18:46:14 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
p->thunks.aioob.frameSavedOffset = a->length();
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register thread(t->arch->thread());
|
2014-12-05 01:33:10 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, throwArrayIndexOutOfBoundsIndex);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
p->thunks.aioob.length = a->endBlock(false)->resolve(0, 0);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
p->thunks.aioob.start
|
|
|
|
= finish(t, allocator, a, "aioob", p->thunks.aioob.length);
|
2011-09-20 22:30:30 +00:00
|
|
|
}
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
Context context(t);
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* a = context.assembler;
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2012-06-20 19:14:16 +00:00
|
|
|
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
|
2010-12-19 22:23:19 +00:00
|
|
|
|
|
|
|
p->thunks.stackOverflow.frameSavedOffset = a->length();
|
|
|
|
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Register thread(t->arch->thread());
|
2014-12-05 01:33:10 +00:00
|
|
|
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, throwStackOverflowIndex);
|
2010-12-19 22:23:19 +00:00
|
|
|
|
|
|
|
p->thunks.stackOverflow.length = a->endBlock(false)->resolve(0, 0);
|
2008-09-07 20:12:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
p->thunks.stackOverflow.start = finish(
|
|
|
|
t, allocator, a, "stackOverflow", p->thunks.stackOverflow.length);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
{
|
|
|
|
{
|
|
|
|
Context context(t);
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* a = context.assembler;
|
2008-11-23 23:58:01 +00:00
|
|
|
|
2012-06-20 19:14:16 +00:00
|
|
|
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
|
2008-11-28 04:44:04 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.table.frameSavedOffset = a->length();
|
2008-11-28 04:44:04 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
compileCall(t, &context, dummyIndex, false);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
p->thunks.table.length = a->endBlock(false)->resolve(0, 0);
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
p->thunks.table.start = static_cast<uint8_t*>(allocator->allocate(
|
|
|
|
p->thunks.table.length * ThunkCount, TargetBytesPerWord));
|
2008-11-28 04:44:04 +00:00
|
|
|
}
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
uint8_t* start = p->thunks.table.start;
|
2010-12-19 22:23:19 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
#define THUNK(s) \
|
|
|
|
{ \
|
|
|
|
Context context(t); \
|
|
|
|
avian::codegen::Assembler* a = context.assembler; \
|
|
|
|
\
|
|
|
|
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP); \
|
|
|
|
\
|
|
|
|
p->thunks.table.frameSavedOffset = a->length(); \
|
|
|
|
\
|
|
|
|
compileCall(t, &context, s##Index, false); \
|
|
|
|
\
|
|
|
|
expect(t, a->endBlock(false)->resolve(0, 0) <= p->thunks.table.length); \
|
|
|
|
\
|
|
|
|
a->setDestination(start); \
|
|
|
|
a->write(); \
|
|
|
|
\
|
|
|
|
logCompile(t, start, p->thunks.table.length, 0, #s, 0); \
|
|
|
|
\
|
|
|
|
start += p->thunks.table.length; \
|
|
|
|
}
|
2011-09-20 22:30:30 +00:00
|
|
|
#include "thunks.cpp"
|
|
|
|
#undef THUNK
|
2010-12-19 22:23:19 +00:00
|
|
|
}
|
|
|
|
|
2011-09-20 22:30:30 +00:00
|
|
|
BootImage* image = p->bootImage;
|
2008-05-31 22:14:27 +00:00
|
|
|
|
2008-11-28 22:02:45 +00:00
|
|
|
if (image) {
|
2014-02-25 22:46:35 +00:00
|
|
|
uint8_t* imageBase = p->codeAllocator.memory.begin();
|
2011-09-20 22:30:30 +00:00
|
|
|
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
image->thunks.default_ = thunkToThunk(p->thunks.default_, imageBase);
|
2014-07-11 15:50:18 +00:00
|
|
|
image->thunks.defaultVirtual
|
|
|
|
= thunkToThunk(p->thunks.defaultVirtual, imageBase);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
image->thunks.native = thunkToThunk(p->thunks.native, imageBase);
|
|
|
|
image->thunks.aioob = thunkToThunk(p->thunks.aioob, imageBase);
|
2014-07-11 15:50:18 +00:00
|
|
|
image->thunks.stackOverflow
|
|
|
|
= thunkToThunk(p->thunks.stackOverflow, imageBase);
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
image->thunks.table = thunkToThunk(p->thunks.table, imageBase);
|
2008-11-28 22:02:45 +00:00
|
|
|
}
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2014-09-22 17:10:35 +00:00
|
|
|
uintptr_t aioobThunk(MyThread* t)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.aioob.start);
|
|
|
|
}
|
|
|
|
|
|
|
|
uintptr_t stackOverflowThunk(MyThread* t)
|
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.stackOverflow.start);
|
|
|
|
}
|
|
|
|
#endif // not AVIAN_AOT_ONLY
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
MyProcessor* processor(MyThread* t)
|
2007-10-04 03:19:39 +00:00
|
|
|
{
|
2008-12-02 02:38:00 +00:00
|
|
|
return static_cast<MyProcessor*>(t->m->processor);
|
2007-12-09 22:45:43 +00:00
|
|
|
}
|
2007-12-30 22:24:48 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t defaultThunk(MyThread* t)
|
2008-05-31 22:14:27 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.default_.start);
|
2008-05-31 22:14:27 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t bootDefaultThunk(MyThread* t)
|
2011-10-02 00:11:02 +00:00
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.default_.start);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t defaultVirtualThunk(MyThread* t)
|
2009-04-07 00:34:12 +00:00
|
|
|
{
|
2014-07-11 15:50:18 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.defaultVirtual.start);
|
2009-04-07 00:34:12 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t nativeThunk(MyThread* t)
|
2008-04-09 19:08:13 +00:00
|
|
|
{
|
fix Thread.getStackTrace race conditions
Implementing Thread.getStackTrace is tricky. A thread may interrupt
another thread at any time to grab a stack trace, including while the
latter is executing Java code, JNI code, helper thunks, VM code, or
while transitioning between any of these.
To create a stack trace we use several context fields associated with
the target thread, including snapshots of the instruction pointer,
stack pointer, and frame pointer. These fields must be current,
accurate, and consistent with each other in order to get a reliable
trace. Otherwise, we risk crashing the VM by trying to walk garbage
stack frames or by misinterpreting the size and/or content of
legitimate frames.
This commit addresses sensitive transition points such as entering the
helper thunks which bridge the transitions from Java to native code
(where we must save the stack and frame registers for use from native
code) and stack unwinding (where we must atomically update the thread
context fields to indicate which frame we are unwinding to). When
grabbing a trace for another thread, we determine what kind of code we
caught the thread executing in and use that information to choose the
thread context values with which to begin the trace. See
MyProcessor::getStackTrace::Visitor::visit for details.
In order to atomically update the thread context fields, we do the
following:
1. Create a temporary "transition" object to serve as a staging area
and populate it with the new field values.
2. Update a transition pointer in the thread object to point to the
object created above. As long as this pointer is non-null,
interrupting threads will use the context values in the staging
object instead of those in the thread object.
3. Update the fields in the thread object.
4. Clear the transition pointer in the thread object.
We use a memory barrier between each of these steps to ensure they are
made visible to other threads in program order. See
MyThread::doTransition for details.
2010-06-16 01:10:48 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->thunks.native.start);
|
2008-04-09 19:08:13 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t bootNativeThunk(MyThread* t)
|
2011-10-02 00:11:02 +00:00
|
|
|
{
|
|
|
|
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.native.start);
|
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
bool unresolved(MyThread* t, uintptr_t methodAddress)
|
2009-10-18 00:18:03 +00:00
|
|
|
{
|
2011-10-02 00:11:02 +00:00
|
|
|
return methodAddress == defaultThunk(t)
|
2014-07-11 15:50:18 +00:00
|
|
|
or methodAddress == bootDefaultThunk(t);
|
2009-10-18 00:18:03 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2009-04-07 00:34:12 +00:00
|
|
|
Context context(t);
|
2013-02-11 15:07:46 +00:00
|
|
|
avian::codegen::Assembler* a = context.assembler;
|
|
|
|
|
|
|
|
avian::codegen::ResolvedPromise indexPromise(index);
|
|
|
|
lir::Constant indexConstant(&indexPromise);
|
|
|
|
lir::Register indexRegister(t->arch->virtualCallIndex());
|
2014-07-11 15:50:18 +00:00
|
|
|
a->apply(
|
|
|
|
lir::Move,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &indexConstant),
|
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &indexRegister));
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
avian::codegen::ResolvedPromise defaultVirtualThunkPromise(
|
|
|
|
defaultVirtualThunk(t));
|
2013-02-11 15:07:46 +00:00
|
|
|
lir::Constant thunk(&defaultVirtualThunkPromise);
|
|
|
|
a->apply(lir::Jump,
|
2014-12-05 01:33:10 +00:00
|
|
|
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &thunk));
|
2009-04-05 21:42:10 +00:00
|
|
|
|
2010-11-14 02:28:05 +00:00
|
|
|
*size = a->endBlock(false)->resolve(0, 0);
|
2010-07-06 22:13:11 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uint8_t* start = static_cast<uint8_t*>(
|
|
|
|
codeAllocator(t)->allocate(*size, TargetBytesPerWord));
|
2009-04-07 00:34:12 +00:00
|
|
|
|
2011-02-28 06:03:13 +00:00
|
|
|
a->setDestination(start);
|
|
|
|
a->write();
|
2009-04-08 00:55:43 +00:00
|
|
|
|
2012-05-07 21:02:16 +00:00
|
|
|
const char* const virtualThunkBaseName = "virtualThunk";
|
|
|
|
const size_t virtualThunkBaseNameLength = strlen(virtualThunkBaseName);
|
|
|
|
const size_t maxIntStringLength = 10;
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
THREAD_RUNTIME_ARRAY(t,
|
|
|
|
char,
|
|
|
|
virtualThunkName,
|
|
|
|
virtualThunkBaseNameLength + maxIntStringLength);
|
2012-05-07 21:02:16 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
sprintf(RUNTIME_ARRAY_BODY(virtualThunkName),
|
|
|
|
"%s%d",
|
|
|
|
virtualThunkBaseName,
|
|
|
|
index);
|
2012-05-07 21:02:16 +00:00
|
|
|
|
2012-06-02 21:43:42 +00:00
|
|
|
logCompile(t, start, *size, 0, RUNTIME_ARRAY_BODY(virtualThunkName), 0);
|
2009-04-19 22:36:11 +00:00
|
|
|
|
2009-04-08 00:55:43 +00:00
|
|
|
return reinterpret_cast<uintptr_t>(start);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
uintptr_t virtualThunk(MyThread* t, unsigned index)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
2011-04-10 20:46:53 +00:00
|
|
|
ACQUIRE(t, t->m->classLock);
|
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
GcWordArray* oldArray = compileRoots(t)->virtualThunks();
|
2014-07-11 15:47:57 +00:00
|
|
|
if (oldArray == 0 or oldArray->length() <= index * 2) {
|
2014-06-29 04:57:07 +00:00
|
|
|
GcWordArray* newArray = makeWordArray(t, nextPowerOfTwo((index + 1) * 2));
|
2014-06-22 02:54:51 +00:00
|
|
|
if (compileRoots(t)->virtualThunks()) {
|
2014-06-29 04:57:07 +00:00
|
|
|
memcpy(newArray->body().begin(),
|
|
|
|
oldArray->body().begin(),
|
|
|
|
oldArray->length() * BytesPerWord);
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setVirtualThunks(t, newArray);
|
2014-06-29 04:57:07 +00:00
|
|
|
oldArray = newArray;
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
if (oldArray->body()[index * 2] == 0) {
|
2011-04-10 20:46:53 +00:00
|
|
|
unsigned size;
|
|
|
|
uintptr_t thunk = compileVirtualThunk(t, index, &size);
|
2014-06-29 04:57:07 +00:00
|
|
|
oldArray->body()[index * 2] = thunk;
|
|
|
|
oldArray->body()[(index * 2) + 1] = size;
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
return oldArray->body()[index * 2];
|
2009-04-05 21:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
void compile(MyThread* t,
|
2014-09-22 17:10:35 +00:00
|
|
|
FixedAllocator* allocator UNUSED,
|
2014-07-11 15:47:57 +00:00
|
|
|
BootContext* bootContext,
|
|
|
|
GcMethod* method)
|
2007-12-09 22:45:43 +00:00
|
|
|
{
|
2008-11-30 04:58:09 +00:00
|
|
|
PROTECT(t, method);
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
if (bootContext == 0 and method->flags() & ACC_STATIC) {
|
2014-06-21 04:16:33 +00:00
|
|
|
initClass(t, method->class_());
|
2008-11-30 04:58:09 +00:00
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
if (methodAddress(t, method) != defaultThunk(t)) {
|
|
|
|
return;
|
|
|
|
}
|
2007-12-09 22:45:43 +00:00
|
|
|
|
2014-06-04 01:52:01 +00:00
|
|
|
assertT(t, (method->flags() & ACC_NATIVE) == 0);
|
2008-04-10 23:48:28 +00:00
|
|
|
|
2014-09-22 17:10:35 +00:00
|
|
|
#ifdef AVIAN_AOT_ONLY
|
|
|
|
abort(t);
|
|
|
|
#else
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
// We must avoid acquiring any locks until after the first pass of
|
|
|
|
// compilation, since this pass may trigger classloading operations
|
|
|
|
// involving application classloaders and thus the potential for
|
|
|
|
// deadlock. To make this safe, we use a private clone of the
|
|
|
|
// method so that we won't be confused if another thread updates the
|
|
|
|
// original while we're working.
|
2007-12-11 00:48:09 +00:00
|
|
|
|
2014-05-29 04:17:25 +00:00
|
|
|
GcMethod* clone = methodClone(t, method);
|
2007-12-28 16:50:26 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
loadMemoryBarrier();
|
|
|
|
|
|
|
|
if (methodAddress(t, method) != defaultThunk(t)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
PROTECT(t, clone);
|
|
|
|
|
|
|
|
Context context(t, bootContext, clone);
|
|
|
|
compile(t, &context);
|
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
{
|
|
|
|
GcExceptionHandlerTable* ehTable = cast<GcExceptionHandlerTable>(
|
|
|
|
t, clone->code()->exceptionHandlerTable());
|
2010-09-22 19:58:46 +00:00
|
|
|
|
|
|
|
if (ehTable) {
|
|
|
|
PROTECT(t, ehTable);
|
|
|
|
|
|
|
|
// resolve all exception handler catch types before we acquire
|
|
|
|
// the class lock:
|
2014-06-29 04:57:07 +00:00
|
|
|
for (unsigned i = 0; i < ehTable->length(); ++i) {
|
|
|
|
uint64_t handler = ehTable->body()[i];
|
2010-09-22 19:58:46 +00:00
|
|
|
if (exceptionHandlerCatchType(handler)) {
|
2014-07-11 15:50:18 +00:00
|
|
|
resolveClassInPool(t, clone, exceptionHandlerCatchType(handler) - 1);
|
2010-09-22 19:58:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
ACQUIRE(t, t->m->classLock);
|
|
|
|
|
|
|
|
if (methodAddress(t, method) != defaultThunk(t)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
finish(t, allocator, &context);
|
2014-05-29 04:17:25 +00:00
|
|
|
|
2010-09-17 01:43:27 +00:00
|
|
|
if (DebugMethodTree) {
|
2014-07-11 15:50:18 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
"insert method at %p\n",
|
2010-09-17 01:43:27 +00:00
|
|
|
reinterpret_cast<void*>(methodCompiled(t, clone)));
|
2007-12-11 00:48:09 +00:00
|
|
|
}
|
2010-09-17 01:43:27 +00:00
|
|
|
|
|
|
|
// We can't update the MethodCode field on the original method
|
|
|
|
// before it is placed into the method tree, since another thread
|
|
|
|
// might call the method, from which stack unwinding would fail
|
|
|
|
// (since there is not yet an entry in the method tree). However,
|
|
|
|
// we can't insert the original method into the tree before updating
|
|
|
|
// the MethodCode field on it since we rely on that field to
|
|
|
|
// determine its position in the tree. Therefore, we insert the
|
|
|
|
// clone in its place. Later, we'll replace the clone with the
|
|
|
|
// original to save memory.
|
|
|
|
|
2014-06-22 02:54:51 +00:00
|
|
|
GcTreeNode* newTree = treeInsert(t,
|
|
|
|
&(context.zone),
|
|
|
|
compileRoots(t)->methodTree(),
|
|
|
|
methodCompiled(t, clone),
|
2014-07-02 21:11:27 +00:00
|
|
|
clone,
|
2014-06-22 02:54:51 +00:00
|
|
|
compileRoots(t)->methodTreeSentinal(),
|
|
|
|
compareIpToMethodBounds);
|
|
|
|
// sequence point, for gc (don't recombine statements)
|
2014-06-25 20:38:13 +00:00
|
|
|
compileRoots(t)->setMethodTree(t, newTree);
|
2010-09-17 01:43:27 +00:00
|
|
|
|
|
|
|
storeStoreMemoryBarrier();
|
|
|
|
|
2014-06-25 20:38:13 +00:00
|
|
|
method->setCode(t, clone->code());
|
2010-09-17 01:43:27 +00:00
|
|
|
|
|
|
|
if (methodVirtual(t, method)) {
|
2014-06-29 04:57:07 +00:00
|
|
|
method->class_()->vtable()[method->offset()]
|
2014-07-11 15:47:57 +00:00
|
|
|
= reinterpret_cast<void*>(methodCompiled(t, clone));
|
2010-09-17 01:43:27 +00:00
|
|
|
}
|
|
|
|
|
2010-12-27 22:55:23 +00:00
|
|
|
// we've compiled the method and inserted it into the tree without
|
|
|
|
// error, so we ensure that the executable area not be deallocated
|
|
|
|
// when we dispose of the context:
|
|
|
|
context.executableAllocator = 0;
|
|
|
|
|
2014-06-29 04:57:07 +00:00
|
|
|
treeUpdate(t,
|
2014-06-22 02:54:51 +00:00
|
|
|
compileRoots(t)->methodTree(),
|
2014-06-29 04:57:07 +00:00
|
|
|
methodCompiled(t, clone),
|
2014-07-02 21:11:27 +00:00
|
|
|
method,
|
2014-06-22 02:54:51 +00:00
|
|
|
compileRoots(t)->methodTreeSentinal(),
|
2014-06-29 04:57:07 +00:00
|
|
|
compareIpToMethodBounds);
|
2014-09-22 17:10:35 +00:00
|
|
|
#endif // not AVIAN_AOT_ONLY
|
2008-04-07 23:47:41 +00:00
|
|
|
}
|
2007-12-11 00:48:09 +00:00
|
|
|
|
2014-07-11 15:47:57 +00:00
|
|
|
GcCompileRoots* compileRoots(Thread* t)
|
2008-04-07 23:47:41 +00:00
|
|
|
{
|
2014-06-22 02:54:51 +00:00
|
|
|
return processor(static_cast<MyThread*>(t))->roots;
|
2007-10-16 17:21:26 +00:00
|
|
|
}
|
|
|
|
|
2014-02-25 22:15:37 +00:00
|
|
|
avian::util::FixedAllocator* codeAllocator(MyThread* t)
|
2009-04-05 21:42:10 +00:00
|
|
|
{
|
|
|
|
return &(processor(t)->codeAllocator);
|
2008-01-10 01:20:36 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
} // namespace local
|
2009-08-27 00:26:44 +00:00
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
} // namespace
|
2007-12-09 22:45:43 +00:00
|
|
|
|
|
|
|
namespace vm {
|
|
|
|
|
2014-02-22 00:06:17 +00:00
|
|
|
Processor* makeProcessor(System* system,
|
|
|
|
Allocator* allocator,
|
|
|
|
const char* crashDumpDirectory,
|
|
|
|
bool useNativeFeatures)
|
2007-09-24 01:39:03 +00:00
|
|
|
{
|
2009-08-27 00:26:44 +00:00
|
|
|
return new (allocator->allocate(sizeof(local::MyProcessor)))
|
2014-07-11 15:50:18 +00:00
|
|
|
local::MyProcessor(
|
|
|
|
system, allocator, crashDumpDirectory, useNativeFeatures);
|
2007-09-24 01:39:03 +00:00
|
|
|
}
|
|
|
|
|
2014-07-11 15:50:18 +00:00
|
|
|
} // namespace vm
|