more work on tail recursion

We now create a unique thunk for each vtable position so as to avoid
relying on using the return address to determine what method is to be
compiled and invoked, since we will not have the correct return address
in the case of a tail call.  This required refactoring how executable
memory is allocated in order to keep AOT compilation working.  Also, we
must always use the same register to hold the class pointer when
compiling virtual calls, and ensure that the pointer stays there until
the call instruction is executed so we know where to find it in the
thunk.
This commit is contained in:
Joel Dice 2009-04-05 15:42:10 -06:00
parent 5e740170f2
commit fea92ed995
11 changed files with 347 additions and 140 deletions

View File

@ -274,6 +274,8 @@ class Assembler {
virtual unsigned argumentRegisterCount() = 0;
virtual int argumentRegister(unsigned index) = 0;
virtual bool matchCall(void* returnAddress, void* target) = 0;
virtual void updateCall(UnaryOperation op, bool assertAlignment,
void* returnAddress, void* newTarget) = 0;

View File

@ -37,7 +37,7 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
const char* methodName, const char* methodSpec)
{
unsigned size = 0;
t->m->processor->compileThunks(t, image, code, &size, capacity);
t->m->processor->initialize(t, image, code, &size, capacity);
object constants = 0;
PROTECT(t, constants);
@ -79,8 +79,7 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
== 0)))
{
t->m->processor->compileMethod
(t, zone, code, &size, capacity, &constants, &calls, &addresses,
method);
(t, zone, &constants, &calls, &addresses, method);
}
}
}

View File

@ -39,6 +39,7 @@ class BootImage {
unsigned types;
unsigned methodTree;
unsigned methodTreeSentinal;
unsigned virtualThunks;
uintptr_t codeBase;

View File

@ -39,6 +39,8 @@ const unsigned MaxNativeCallFootprint = 4;
const unsigned InitialZoneCapacityInBytes = 64 * 1024;
const unsigned ExecutableAreaSizeInBytes = 16 * 1024 * 1024;
class MyThread: public Thread {
public:
class CallTrace {
@ -123,6 +125,19 @@ resolveTarget(MyThread* t, void* stack, object method)
}
}
object
resolveTarget(MyThread* t, object class_, unsigned index)
{
if (classVmFlags(t, class_) & BootstrapFlag) {
PROTECT(t, class_);
resolveClass(t, className(t, class_));
if (UNLIKELY(t->exception)) return 0;
}
return arrayBody(t, classVirtualTable(t, class_), index);
}
object&
methodTree(MyThread* t);
@ -1360,12 +1375,26 @@ tryInitClass(MyThread* t, object class_)
if (UNLIKELY(t->exception)) unwind(t);
}
FixedAllocator*
codeAllocator(MyThread* t);
int64_t
findInterfaceMethodFromInstance(MyThread* t, object method, object instance)
{
if (instance) {
return methodAddress
(t, findInterfaceMethod(t, method, objectClass(t, instance)));
object target = findInterfaceMethod(t, method, objectClass(t, instance));
if (methodAddress(t, target) == defaultThunk(t)) {
PROTECT(t, target);
compile(t, codeAllocator(t), 0, target);
}
if (UNLIKELY(t->exception)) {
unwind(t);
} else {
return methodAddress(t, target);
}
} else {
t->exception = makeNullPointerException(t);
unwind(t);
@ -3093,7 +3122,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned ip,
3, c->thread(), frame->append(target),
c->peek(1, instance)),
0,
frame->trace(target, TraceElement::VirtualCall),
frame->trace(0, 0),
rSize,
parameterFootprint);
@ -3149,15 +3178,32 @@ compile(MyThread* t, Frame* initialFrame, unsigned ip,
unsigned rSize = resultSize(t, methodReturnCode(t, target));
Compiler::Operand* result = c->stackCall
(c->memory
(c->and_
(BytesPerWord, c->constant(PointerMask),
c->memory(instance, 0, 0, 1)), offset, 0, 1),
0,
frame->trace(target, TraceElement::VirtualCall),
rSize,
parameterFootprint);
bool tailCall = isTailCall(t, code, ip, context->method, target);
Compiler::Operand* result;
if (tailCall and methodParameterFootprint(t, target)
> methodParameterFootprint(t, context->method))
{
result = c->stackCall
(c->constant(tailHelperThunk(t)),
0,
frame->trace(target, TraceElement::VirtualCall),
rSize,
parameterFootprint);
} else {
c->freezeRegister(t->arch->returnLow(),
c->and_(BytesPerWord, c->constant(PointerMask),
c->memory(instance, 0, 0, 1)));
result = c->stackCall
(c->memory(c->register_(t->arch->returnLow()), offset, 0, 1),
tailCall ? Compiler::TailCall : 0,
frame->trace(0, 0),
rSize,
parameterFootprint);
c->thawRegister(t->arch->returnLow());
}
frame->pop(parameterFootprint);
@ -4155,9 +4201,6 @@ calculateFrameMaps(MyThread* t, Context* context, uintptr_t* originalRoots,
return eventIndex;
}
Zone*
codeZone(MyThread* t);
int
compareTraceElementPointers(const void* va, const void* vb)
{
@ -4532,32 +4575,21 @@ compileMethod2(MyThread* t, uintptr_t ip)
object target = callNodeTarget(t, node);
PROTECT(t, target);
if (callNodeFlags(t, node) & TraceElement::VirtualCall) {
target = resolveTarget(t, t->stack, target);
}
if (LIKELY(t->exception == 0)) {
compile(t, codeZone(t), 0, target);
compile(t, codeAllocator(t), 0, target);
}
if (UNLIKELY(t->exception)) {
return 0;
} else {
void* address = reinterpret_cast<void*>(methodAddress(t, target));
if (callNodeFlags(t, node) & TraceElement::VirtualCall) {
classVtable
(t, objectClass
(t, resolveThisPointer(t, t->stack, target)), methodOffset(t, target))
= address;
} else {
uintptr_t updateIp = ip;
if (callNode(t, node) & TraceElement::TailCall) {
updateIp -= t->arch->constantCallSize();
}
updateCall
(t, Call, true, reinterpret_cast<void*>(updateIp), address);
uintptr_t updateIp = ip;
if (callNode(t, node) & TraceElement::TailCall) {
updateIp -= t->arch->constantCallSize();
}
updateCall(t, Call, true, reinterpret_cast<void*>(updateIp), address);
return address;
}
}
@ -4582,6 +4614,46 @@ compileMethod(MyThread* t)
}
}
void*
compileVirtualMethod2(MyThread* t, object class_, unsigned index)
{
PROTECT(t, class_);
object target = resolveTarget(t, class_, index);
if (LIKELY(t->exception == 0)) {
compile(t, codeAllocator(t), 0, target);
}
if (UNLIKELY(t->exception)) {
return 0;
} else {
void* address = reinterpret_cast<void*>(methodAddress(t, target));
if (address != nativeThunk(t, method)) {
classVtable(t, class_, methodOffset(t, target)) = address;
}
return address;
}
}
uint64_t
compileVirtualMethod(MyThread* t)
{
object class_ = t->virtualCallClass;
t->virtualCallClass = 0;
unsigned index = t->virtualCallIndex;
t->virtualCallIndex = 0;
void* r = compileVirtualMethod2(t, class_, index);
if (UNLIKELY(t->exception)) {
unwind(t);
} else {
return reinterpret_cast<uintptr_t>(r);
}
}
void*
tailCall2(MyThread* t, uintptr_t ip)
{
@ -4596,7 +4668,7 @@ tailCall2(MyThread* t, uintptr_t ip)
}
if (LIKELY(t->exception == 0)) {
compile(t, codeZone(t), 0, target);
compile(t, codeAllocator(t), 0, target);
}
if (UNLIKELY(t->exception)) {
@ -5200,27 +5272,6 @@ processor(MyThread* t);
class MyProcessor: public Processor {
public:
class CodeAllocator: public Allocator {
public:
CodeAllocator(System* s): s(s) { }
virtual void* tryAllocate(unsigned size) {
return s->tryAllocateExecutable(size);
}
virtual void* allocate(unsigned size) {
void* p = tryAllocate(size);
expect(s, p);
return p;
}
virtual void free(const void* p, unsigned size) {
s->freeExecutable(p, size);
}
System* s;
};
MyProcessor(System* s, Allocator* allocator):
s(s),
allocator(allocator),
@ -5233,9 +5284,11 @@ class MyProcessor: public Processor {
methodTreeSentinal(0),
objectPools(0),
staticTableArray(0),
codeAllocator(s),
codeZone(s, &codeAllocator, 64 * 1024)
{ }
virtualThunks(0),
codeAllocator(s, 0, 0)
{
expect(s, codeAllocator.base);
}
virtual Thread*
makeThread(Machine* m, object javaThread, Thread* parent)
@ -5292,11 +5345,8 @@ class MyProcessor: public Processor {
virtual void
initVtable(Thread* t, object c)
{
void* compiled = reinterpret_cast<void*>
(::defaultThunk(static_cast<MyThread*>(t)));
for (unsigned i = 0; i < classLength(t, c); ++i) {
classVtable(t, c, i) = compiled;
classVtable(t, c, i) = virtualThunk(static_cast<MyThread*>(t), i);
}
}
@ -5399,7 +5449,8 @@ class MyProcessor: public Processor {
PROTECT(t, method);
compile(static_cast<MyThread*>(t), &codeZone, 0, method);
compile(static_cast<MyThread*>(t),
::codeAllocator(static_cast<MyThread*>(t)), 0, method);
if (LIKELY(t->exception == 0)) {
return ::invoke(t, method, &list);
@ -5430,7 +5481,8 @@ class MyProcessor: public Processor {
PROTECT(t, method);
compile(static_cast<MyThread*>(t), &codeZone, 0, method);
compile(static_cast<MyThread*>(t),
::codeAllocator(static_cast<MyThread*>(t)), 0, method);
if (LIKELY(t->exception == 0)) {
return ::invoke(t, method, &list);
@ -5460,7 +5512,8 @@ class MyProcessor: public Processor {
PROTECT(t, method);
compile(static_cast<MyThread*>(t), &codeZone, 0, method);
compile(static_cast<MyThread*>(t),
::codeAllocator(static_cast<MyThread*>(t)), 0, method);
if (LIKELY(t->exception == 0)) {
return ::invoke(t, method, &list);
@ -5483,8 +5536,10 @@ class MyProcessor: public Processor {
}
virtual void dispose() {
codeZone.dispose();
if (codeAllocator.base) {
s->freeExecutable(codeAllocator.base, codeAllocator.capacity);
}
s->handleSegFault(0);
allocator->free(this, sizeof(*this));
@ -5551,37 +5606,34 @@ class MyProcessor: public Processor {
return visitor.trace;
}
virtual void compileThunks(Thread* vmt, BootImage* image, uint8_t* code,
unsigned* offset, unsigned capacity)
virtual void initialize(Thread* vmt, BootImage* image, uint8_t* code,
unsigned capacity)
{
MyThread* t = static_cast<MyThread*>(vmt);
FixedAllocator allocator(t, code + *offset, capacity);
codeAllocator.base = code;
codeAllocator.capacity = capacity;
::compileThunks(t, &allocator, this, image, code);
*offset += allocator.offset;
::compileThunks
(static_cast<MyThread*>(vmt), &codeAllocator, this, image, code);
}
virtual void compileMethod(Thread* vmt, Zone* zone, uint8_t* code,
unsigned* offset, unsigned capacity,
object* constants, object* calls,
DelayedPromise** addresses, object method)
virtual void compileMethod(Thread* vmt, Zone* zone, object* constants,
object* calls, DelayedPromise** addresses,
object method)
{
MyThread* t = static_cast<MyThread*>(vmt);
FixedAllocator allocator(t, code + *offset, capacity);
BootContext bootContext(t, *constants, *calls, *addresses, zone);
compile(t, &allocator, &bootContext, method);
compile(t, &codeAllocator, &bootContext, method);
*constants = bootContext.constants;
*calls = bootContext.calls;
*addresses = bootContext.addresses;
*offset += allocator.offset;
}
virtual void visitRoots(BootImage* image, HeapWalker* w) {
image->methodTree = w->visitRoot(methodTree);
image->methodTreeSentinal = w->visitRoot(methodTreeSentinal);
image->virtualThunks = w->visitRoot(virtualThunks);
}
virtual unsigned* makeCallTable(Thread* t, BootImage* image, HeapWalker* w,
@ -5606,6 +5658,9 @@ class MyProcessor: public Processor {
}
virtual void boot(Thread* t, BootImage* image) {
codeAllocator.base = s->tryAllocateExecutable(ExecutableAreaSizeInBytes);
codeAllocator.capacity = ExecutableAreaSizeInBytes;
if (image) {
::boot(static_cast<MyThread*>(t), image);
} else {
@ -5615,7 +5670,7 @@ class MyProcessor: public Processor {
set(t, methodTree, TreeNodeLeft, methodTreeSentinal);
set(t, methodTree, TreeNodeRight, methodTreeSentinal);
::compileThunks(static_cast<MyThread*>(t), &codeZone, this, 0, 0);
::compileThunks(static_cast<MyThread*>(t), &codeAllocator, this, 0, 0);
}
segFaultHandler.m = t->m;
@ -5636,9 +5691,9 @@ class MyProcessor: public Processor {
object methodTreeSentinal;
object objectPools;
object staticTableArray;
object virtualThunks;
SegFaultHandler segFaultHandler;
CodeAllocator codeAllocator;
Zone codeZone;
FixedAllocator codeAllocator;
};
object
@ -5936,6 +5991,20 @@ fixupThunks(MyThread* t, BootImage* image, uint8_t* code)
#undef THUNK
}
void
fixupVirtualThunks(MyThread* t, BootImage* image, uint8_t* code)
{
MyProcessor* p = processor(t);
for (unsigned i = 0; i < wordArrayLength(t, p->virtualThunks); ++i) {
if (wordArrayBody(t, p->virtualThunks, 0)) {
wordArrayBody(t, p->virtualThunks, 0)
= (wordArrayBody(t, p->virtualThunks, 0) - image->codeBase)
+ reinterpret_cast<uintptr_t>(code);
}
}
}
void
boot(MyThread* t, BootImage* image)
{
@ -5974,6 +6043,8 @@ boot(MyThread* t, BootImage* image)
p->methodTree = bootObject(heap, image->methodTree);
p->methodTreeSentinal = bootObject(heap, image->methodTreeSentinal);
p->virtualThunks = bootObject(heap, image->virtualThunks);
fixupCode(t, codeMap, codeMapSizeInWords, code, heap);
syncInstructionCache(code, image->codeSize);
@ -5991,6 +6062,8 @@ boot(MyThread* t, BootImage* image)
fixupThunks(t, image, code);
fixupVirtualThunks(t, image, code);
fixupMethods(t, image, code);
t->m->bootstrapClassMap = makeHashMap(t, 0, 0);
@ -6046,6 +6119,38 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
a->endBlock(false)->resolve(0, 0);
}
ThunkContext defaultVirtualContext(t, &zone);
{ Assembler* a = defaultVirtualContext.context.assembler;
Assembler::Register class_(t->arch->returnLow());
Assembler::Memory virtualCallClass
(t->arch->thread(), difference(&(t->virtualCallClass), t));
a->apply(Move, BytesPerWord, RegisterOperand, &class_,
BytesPerWord, MemoryOperand, &virtualCallClass);
Assembler::Register index(t->arch->returnHigh());
Assembler::Memory virtualCallIndex
(t->arch->thread(), difference(&(t->virtualCallIndex), t));
a->apply(Move, BytesPerWord, RegisterOperand, &index,
BytesPerWord, MemoryOperand, &virtualCallIndex);
a->saveFrame(difference(&(t->stack), t), difference(&(t->base), t));
Assembler::Register thread(t->arch->thread());
a->pushFrame(1, BytesPerWord, RegisterOperand, &thread);
Assembler::Constant proc(&(defaultVirtualContext.promise));
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
a->popFrame();
Assembler::Register result(t->arch->returnLow());
a->apply(Jump, BytesPerWord, RegisterOperand, &result);
a->endBlock(false)->resolve(0, 0);
}
ThunkContext tailHelperContext(t, &zone);
{ Assembler* a = tailHelperContext.context.assembler;
@ -6119,18 +6224,6 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
p->thunkSize = pad(tableContext.context.assembler->length());
expect(t, codeZone(t)->ensure
(codeSingletonSizeInBytes
(t, defaultContext.context.assembler->length())
+ codeSingletonSizeInBytes
(t, tailHelperContext.context.assembler->length())
+ codeSingletonSizeInBytes
(t, nativeContext.context.assembler->length())
+ codeSingletonSizeInBytes
(t, aioobContext.context.assembler->length())
+ codeSingletonSizeInBytes
(t, p->thunkSize * ThunkCount)));
p->defaultTailThunk = finish
(t, allocator, defaultContext.context.assembler, "default");
@ -6147,6 +6240,20 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
}
}
p->defaultVirtualThunk = finish
(t, allocator, defaultVirtualContext.context.assembler, "defaultVirtual");
{ void* call;
defaultVirtualContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(compileVirtualMethod)), &call);
if (image) {
image->defaultVirtualThunk = p->defaultVirtualThunk - imageBase;
image->compileVirtualMethodCall
= static_cast<uint8_t*>(call) - imageBase;
}
}
p->tailHelperThunk = finish
(t, allocator, defaultContext.context.assembler, "tailHelper");
@ -6249,6 +6356,51 @@ aioobThunk(MyThread* t)
return reinterpret_cast<uintptr_t>(processor(t)->aioobThunk);
}
uintptr_t
compileVirtualThunk(MyThread* t, unsigned index)
{
Context context;
Assembler* a = context.assembler;
Assembler::Constant indexConstant(index);
Assembler::Register indexRegister(t->arch->returnHigh());
a->apply(Move, BytesPerWord, ConstantOperand, &indexConstant,
BytesPerWord, ConstantOperand, &indexRegister);
Assembler::Constant thunk(defaultVirtualThunk(t));
a->apply(Jump, BytesPerWord, ConstantOperand, &thunk);
uint8_t* start = codeAllocator(t)->allocate(a->length());
a->writeTo(start);
}
uintptr_t
virtualThunk(MyThread* t, unsigned index)
{
MyProcessor* p = processor(t);
if (p->virtualThunks == 0 or wordArrayLength(t, p->virtualThunks) <= index) {
object newArray = makeWordArray(t, nextPowerOfTwo(index));
if (p->virtualThunks) {
memcpy(&wordArrayBody(t, newArray, 0),
&wordArrayBody(t, p->virtualThunks, 0),
wordArrayLength(t, p->virtualThunks) * BytesPerWord);
}
p->virtualThunks = newArray;
}
if (wordArrayBody(t, p->virtualThunks, index) == 0) {
ACQUIRE(t, t->m->classLock);
if (wordArrayBody(t, p->virtualThunks, index) == 0) {
uintptr_t thunk = compileVirtualThunk(t, index);
wordArrayBody(t, p->virtualThunks, index) = thunk;
}
}
return wordArrayBody(t, p->virtualThunks, index);
}
void
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
object method)
@ -6330,9 +6482,10 @@ methodTreeSentinal(MyThread* t)
return processor(t)->methodTreeSentinal;
}
Zone*
codeZone(MyThread* t) {
return &(processor(t)->codeZone);
FixedAllocator*
codeAllocator(MyThread* t)
{
return &(processor(t)->codeAllocator);
}
} // namespace

View File

@ -1194,7 +1194,7 @@ pickTarget(Context* c, Read* read, bool intersectRead,
? 0 : Target::Penalty);
Target best;
if ((mask.typeMask & (1 << RegisterOperand))) {
if (mask.typeMask & (1 << RegisterOperand)) {
Target mine = pickRegisterTarget(c, read->value, mask.registerMask);
mine.cost += registerPenalty;
@ -3842,6 +3842,61 @@ appendSaveLocals(Context* c)
SaveLocalsEvent(c));
}
class FreezeRegisterEvent: public Event {
public:
FreezeRegisterEvent(Context* c, int number, Value* value):
Event(c), number(number), value(value)
{
addRead(c, this, value, fixedRegisterRead(c, number));
}
virtual const char* name() {
return "FreezeRegisterEvent";
}
virtual void compile(Context* c) {
c->registers[number].freeze(c, value);
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
}
}
int number;
Value* value;
};
void
appendFreezeRegister(Context* c, int number, Value* value)
{
append(c, new (c->zone->allocate(sizeof(FreezeRegisterEvent)))
FreezeRegisterEvent(c, number, value));
}
class ThawRegisterEvent: public Event {
public:
ThawRegisterEvent(Context* c, int number):
Event(c), number(number)
{ }
virtual const char* name() {
return "ThawRegisterEvent";
}
virtual void compile(Context* c) {
c->registers[number].thaw(c, value);
}
int number;
};
void
appendThawRegister(Context* c, int number, Value* value)
{
append(c, new (c->zone->allocate(sizeof(ThawRegisterEvent)))
ThawRegisterEvent(c, number, value));
}
class DummyEvent: public Event {
public:
DummyEvent(Context* c):
@ -4925,6 +4980,14 @@ class MyCompiler: public Compiler {
return value(&c, s, s);
}
virtual void freezeRegister(int number, Operand* value) {
appendFreezeRegister(&c, number, static_cast<Value*>(value));
}
virtual void thawRegister(int number) {
appendThawRegister(&c, number);
}
Promise* machineIp() {
return codePromise(&c, c.logicalCode[c.logicalIp]->lastEvent);
}

View File

@ -60,8 +60,11 @@ class Compiler {
Operand* index = 0,
unsigned scale = 1) = 0;
virtual Operand* stack() = 0;
virtual Operand* thread() = 0;
virtual Operand* stack() = 0;
virtual void freezeRegister(int number, Operand* value) = 0;
virtual void thawRegister(int number) = 0;
virtual void push(unsigned footprint) = 0;
virtual void push(unsigned footprint, Operand* value) = 0;

View File

@ -1454,17 +1454,17 @@ expect(Thread* t, bool v)
class FixedAllocator: public Allocator {
public:
FixedAllocator(Thread* t, uint8_t* base, unsigned capacity):
t(t), base(base), offset(0), capacity(capacity)
FixedAllocator(System* s, uint8_t* base, unsigned capacity):
s(s), base(base), offset(0), capacity(capacity)
{ }
virtual void* tryAllocate(unsigned) {
abort(t);
abort(s);
}
virtual void* allocate(unsigned size) {
unsigned paddedSize = pad(size);
expect(t, offset + paddedSize < capacity);
expect(s, offset + paddedSize < capacity);
void* p = base + offset;
offset += paddedSize;
@ -1472,10 +1472,10 @@ class FixedAllocator: public Allocator {
}
virtual void free(const void*, unsigned) {
abort(t);
abort(s);
}
Thread* t;
System* s;
uint8_t* base;
unsigned offset;
unsigned capacity;

View File

@ -63,8 +63,6 @@ const unsigned VisitSignalIndex = 0;
const unsigned SegFaultSignalIndex = 1;
const unsigned InterruptSignalIndex = 2;
const unsigned ExecutableAreaSizeInBytes = 16 * 1024 * 1024;
class MySystem;
MySystem* system;
@ -518,9 +516,7 @@ class MySystem: public System {
MySystem():
threadVisitor(0),
visitTarget(0),
executableArea(0),
executableOffset(0)
visitTarget(0)
{
expect(this, system == 0);
system = this;
@ -559,33 +555,23 @@ class MySystem: public System {
}
virtual void* tryAllocateExecutable(unsigned sizeInBytes) {
if (executableArea == 0) {
#ifdef __x86_64__
const unsigned Extra = MAP_32BIT;
const unsigned Extra = MAP_32BIT;
#else
const unsigned Extra = 0;
const unsigned Extra = 0;
#endif
void* p = mmap(0, ExecutableAreaSizeInBytes, PROT_EXEC | PROT_READ
| PROT_WRITE, MAP_PRIVATE | MAP_ANON | Extra, -1, 0);
void* p = mmap(0, sizeInBytes, PROT_EXEC | PROT_READ
| PROT_WRITE, MAP_PRIVATE | MAP_ANON | Extra, -1, 0);
if (p != MAP_FAILED) {
executableArea = static_cast<uint8_t*>(p);
}
}
if (executableArea
and executableOffset + pad(sizeInBytes) < ExecutableAreaSizeInBytes)
{
void* r = executableArea + executableOffset;
executableOffset += pad(sizeInBytes);
return r;
} else {
if (p == MAP_FAILED) {
return 0;
} else {
return static_cast<uint8_t*>(p);
}
}
virtual void freeExecutable(const void*, unsigned) {
// ignore
virtual void freeExecutable(const void* p, unsigned sizeInBytes) {
munmap(p, sizeInBytes);
}
virtual bool success(Status s) {

View File

@ -117,12 +117,11 @@ class Processor {
getStackTrace(Thread* t, Thread* target) = 0;
virtual void
compileThunks(Thread* t, BootImage* image, uint8_t* code, unsigned* size,
unsigned capacity) = 0;
initialize(Thread* t, BootImage* image, uint8_t* code,
unsigned capacity) = 0;
virtual void
compileMethod(Thread* t, Zone* zone, uint8_t* code, unsigned* offset,
unsigned capacity, object* constants, object* calls,
compileMethod(Thread* t, Zone* zone, object* constants, object* calls,
DelayedPromise** addresses, object method) = 0;
virtual void

View File

@ -97,6 +97,9 @@
(uintptr_t flags)
(object next))
(type wordArray
(array uintptr_t body))
(type array
(noassert array object body))

View File

@ -523,8 +523,6 @@ class MySystem: public System {
}
virtual void* tryAllocateExecutable(unsigned sizeInBytes) {
assert(this, sizeInBytes % LikelyPageSizeInBytes == 0);
return VirtualAlloc
(0, sizeInBytes, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
}