Revert "remove distinction between thunks and bootThunks in compile.cpp"

This reverts commit 88d614eb25.

It turns out we still need separate sets of thunks for AOT-compiled
and JIT-compiled code to ensure we can always generate efficient jumps
and calls to thunks on architectures such as ARM and PowerPC, whose
relative jumps and calls have limited ranges.
This commit is contained in:
Joel Dice 2011-10-01 18:11:02 -06:00
parent 043e466921
commit db9f9a300d

View File

@ -2255,6 +2255,9 @@ defaultThunk(MyThread* t);
uintptr_t
nativeThunk(MyThread* t);
uintptr_t
bootNativeThunk(MyThread* t);
uintptr_t
aioobThunk(MyThread* t);
@ -8819,10 +8822,10 @@ class MyProcessor: public Processor {
root(t, MethodTreeSentinal));
set(t, root(t, MethodTree), TreeNodeRight,
root(t, MethodTreeSentinal));
local::compileThunks(static_cast<MyThread*>(t), &codeAllocator);
}
local::compileThunks(static_cast<MyThread*>(t), &codeAllocator);
segFaultHandler.m = t->m;
expect(t, t->m->system->success
(t->m->system->handleSegFault(&segFaultHandler)));
@ -8891,6 +8894,7 @@ class MyProcessor: public Processor {
SignalHandler divideByZeroHandler;
FixedAllocator codeAllocator;
ThunkCollection thunks;
ThunkCollection bootThunks;
unsigned callTableSize;
bool useNativeFeatures;
void* thunkTable[dummyIndex + 1];
@ -8915,7 +8919,7 @@ compileMethod2(MyThread* t, void* ip)
if ((methodFlags(t, target) & ACC_NATIVE)
and useLongJump(t, reinterpret_cast<uintptr_t>(ip)))
{
address = nativeThunk(t);
address = bootNativeThunk(t);
} else {
address = methodAddress(t, target);
}
@ -8962,7 +8966,7 @@ isThunk(MyThread* t, void* ip)
{
MyProcessor* p = processor(t);
return isThunk(&(p->thunks), ip);
return isThunk(&(p->thunks), ip) or isThunk(&(p->bootThunks), ip);
}
bool
@ -9026,7 +9030,9 @@ isThunkUnsafeStack(MyThread* t, void* ip)
{
MyProcessor* p = processor(t);
return isThunk(t, ip) and isThunkUnsafeStack(&(p->thunks), ip);
return isThunk(t, ip)
and (isThunkUnsafeStack(&(p->thunks), ip)
or isThunkUnsafeStack(&(p->bootThunks), ip));
}
object
@ -9274,14 +9280,14 @@ findThunks(MyThread* t, BootImage* image, uint8_t* code)
{
MyProcessor* p = processor(t);
p->thunks.default_ = thunkToThunk(image->thunks.default_, code);
p->thunks.defaultVirtual
p->bootThunks.default_ = thunkToThunk(image->thunks.default_, code);
p->bootThunks.defaultVirtual
= thunkToThunk(image->thunks.defaultVirtual, code);
p->thunks.native = thunkToThunk(image->thunks.native, code);
p->thunks.aioob = thunkToThunk(image->thunks.aioob, code);
p->thunks.stackOverflow
p->bootThunks.native = thunkToThunk(image->thunks.native, code);
p->bootThunks.aioob = thunkToThunk(image->thunks.aioob, code);
p->bootThunks.stackOverflow
= thunkToThunk(image->thunks.stackOverflow, code);
p->thunks.table = thunkToThunk(image->thunks.table, code);
p->bootThunks.table = thunkToThunk(image->thunks.table, code);
}
void
@ -9622,6 +9628,12 @@ defaultThunk(MyThread* t)
return reinterpret_cast<uintptr_t>(processor(t)->thunks.default_.start);
}
uintptr_t
bootDefaultThunk(MyThread* t)
{
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.default_.start);
}
uintptr_t
defaultVirtualThunk(MyThread* t)
{
@ -9635,6 +9647,12 @@ nativeThunk(MyThread* t)
return reinterpret_cast<uintptr_t>(processor(t)->thunks.native.start);
}
uintptr_t
bootNativeThunk(MyThread* t)
{
return reinterpret_cast<uintptr_t>(processor(t)->bootThunks.native.start);
}
uintptr_t
aioobThunk(MyThread* t)
{
@ -9650,7 +9668,8 @@ stackOverflowThunk(MyThread* t)
bool
unresolved(MyThread* t, uintptr_t methodAddress)
{
return methodAddress == defaultThunk(t);
return methodAddress == defaultThunk(t)
or methodAddress == bootDefaultThunk(t);
}
uintptr_t