mirror of
https://github.com/corda/corda.git
synced 2025-04-26 05:49:44 +00:00
more work on boot image creation
This commit is contained in:
parent
6500f1eff6
commit
20cf42c5e4
@ -22,7 +22,7 @@ public class Method<T> extends AccessibleObject implements Member {
|
|||||||
private byte[] spec;
|
private byte[] spec;
|
||||||
private Class<T> class_;
|
private Class<T> class_;
|
||||||
private Object code;
|
private Object code;
|
||||||
private Object compiled;
|
private long compiled;
|
||||||
|
|
||||||
private Method() { }
|
private Method() { }
|
||||||
|
|
||||||
|
@ -80,6 +80,7 @@ class Promise {
|
|||||||
public:
|
public:
|
||||||
virtual int64_t value() = 0;
|
virtual int64_t value() = 0;
|
||||||
virtual bool resolved() = 0;
|
virtual bool resolved() = 0;
|
||||||
|
virtual bool offer(void*) { return false; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class ResolvedPromise: public Promise {
|
class ResolvedPromise: public Promise {
|
||||||
@ -97,6 +98,27 @@ class ResolvedPromise: public Promise {
|
|||||||
int64_t value_;
|
int64_t value_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class OfferPromise: public Promise {
|
||||||
|
public:
|
||||||
|
OfferPromise(System* s): s(s), offset(0) { }
|
||||||
|
|
||||||
|
virtual int64_t value() {
|
||||||
|
abort(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool resolved() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool offer(void* offset) {
|
||||||
|
this->offset = offset;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
System* s;
|
||||||
|
void* offset;
|
||||||
|
};
|
||||||
|
|
||||||
class TraceHandler {
|
class TraceHandler {
|
||||||
public:
|
public:
|
||||||
virtual void handleTrace(Promise* address) = 0;
|
virtual void handleTrace(Promise* address) = 0;
|
||||||
|
@ -1,8 +1,23 @@
|
|||||||
|
/* Copyright (c) 2008, Avian Contributors
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software
|
||||||
|
for any purpose with or without fee is hereby granted, provided
|
||||||
|
that the above copyright notice and this permission notice appear
|
||||||
|
in all copies.
|
||||||
|
|
||||||
|
There is NO WARRANTY for this software. See license.txt for
|
||||||
|
details. */
|
||||||
|
|
||||||
#include "bootimage.h"
|
#include "bootimage.h"
|
||||||
#include "heapwalk.h"
|
#include "heapwalk.h"
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "machine.h"
|
#include "machine.h"
|
||||||
#include "util.h"
|
#include "util.h"
|
||||||
|
#include "assembler.h"
|
||||||
|
|
||||||
|
// since we aren't linking against libstdc++, we must implement this
|
||||||
|
// ourselves:
|
||||||
|
extern "C" void __cxa_pure_virtual(void) { abort(); }
|
||||||
|
|
||||||
using namespace vm;
|
using namespace vm;
|
||||||
|
|
||||||
@ -26,11 +41,12 @@ object
|
|||||||
makeCodeImage(Thread* t, BootImage* image, uint8_t* code, unsigned capacity)
|
makeCodeImage(Thread* t, BootImage* image, uint8_t* code, unsigned capacity)
|
||||||
{
|
{
|
||||||
unsigned size;
|
unsigned size;
|
||||||
compileThunks(t, code, &size, image);
|
t->m->processor->compileThunks(t, image, code, &size, capacity);
|
||||||
|
|
||||||
unsigned fixupCount = 0;
|
object objectTable = makeHashMap(t, 0, 0);
|
||||||
object table = makeHashMap(t, 0, 0);
|
PROTECT(t, objectTable);
|
||||||
PROTECT(t, table);
|
|
||||||
|
Zone zone(t->m->system, t->m->heap, 64 * 1024);
|
||||||
|
|
||||||
for (Finder::Iterator it(t->m->finder); it.hasMore();) {
|
for (Finder::Iterator it(t->m->finder); it.hasMore();) {
|
||||||
unsigned nameSize;
|
unsigned nameSize;
|
||||||
@ -44,8 +60,8 @@ makeCodeImage(Thread* t, BootImage* image, uint8_t* code, unsigned capacity)
|
|||||||
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
|
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
|
||||||
object method = arrayBody(t, classMethodTable(t, c), i);
|
object method = arrayBody(t, classMethodTable(t, c), i);
|
||||||
if (methodCode(t, method)) {
|
if (methodCode(t, method)) {
|
||||||
compileMethod(t, method, code, &size, capacity,
|
t->m->processor->compileMethod
|
||||||
&table, &fixupCount);
|
(t, &zone, code, &size, capacity, objectTable, method);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -53,7 +69,7 @@ makeCodeImage(Thread* t, BootImage* image, uint8_t* code, unsigned capacity)
|
|||||||
|
|
||||||
image->codeSize = size;
|
image->codeSize = size;
|
||||||
|
|
||||||
return table;
|
return objectTable;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned
|
unsigned
|
||||||
@ -69,13 +85,23 @@ objectSize(Thread* t, object o)
|
|||||||
return baseSize(t, o, objectClass(t, o));
|
return baseSize(t, o, objectClass(t, o));
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapMap*
|
void
|
||||||
|
visitRoots(Machine* m, BootImage* image, HeapWalker* w)
|
||||||
|
{
|
||||||
|
image->loader = w->visitRoot(m->loader);
|
||||||
|
image->stringMap = w->visitRoot(m->stringMap);
|
||||||
|
image->types = w->visitRoot(m->types);
|
||||||
|
|
||||||
|
m->processor->visitRoots(image, w);
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWalker*
|
||||||
makeHeapImage(Thread* t, BootImage* image, uintptr_t* heap, uintptr_t* map,
|
makeHeapImage(Thread* t, BootImage* image, uintptr_t* heap, uintptr_t* map,
|
||||||
unsigned capacity)
|
unsigned capacity)
|
||||||
{
|
{
|
||||||
class Walker: public HeapWalker {
|
class Visitor: public HeapVisitor {
|
||||||
public:
|
public:
|
||||||
Walker(Thread* t, uintptr_t* heap, uintptr_t* map, unsigned capacity):
|
Visitor(Thread* t, uintptr_t* heap, uintptr_t* map, unsigned capacity):
|
||||||
t(t), currentObject(0), currentOffset(0), heap(heap), map(map),
|
t(t), currentObject(0), currentOffset(0), heap(heap), map(map),
|
||||||
position(0), capacity(capacity)
|
position(0), capacity(capacity)
|
||||||
{ }
|
{ }
|
||||||
@ -130,13 +156,14 @@ makeHeapImage(Thread* t, BootImage* image, uintptr_t* heap, uintptr_t* map,
|
|||||||
uintptr_t* map;
|
uintptr_t* map;
|
||||||
unsigned position;
|
unsigned position;
|
||||||
unsigned capacity;
|
unsigned capacity;
|
||||||
} walker(t, heap, map, capacity / BytesPerWord);
|
} visitor(t, heap, map, capacity / BytesPerWord);
|
||||||
|
|
||||||
HeapMap* table = walk(t, &walker);
|
HeapWalker* w = makeHeapWalker(t, &visitor);
|
||||||
|
visitRoots(t->m, image, w);
|
||||||
|
|
||||||
image->heapSize = walker.position * BytesPerWord;
|
image->heapSize = visitor.position * BytesPerWord;
|
||||||
|
|
||||||
return table;
|
return w;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -153,9 +180,13 @@ updateCodeTable(Thread* t, object codeTable, uint8_t* code, uintptr_t* codeMap,
|
|||||||
fixup;
|
fixup;
|
||||||
fixup = pairSecond(t, fixup))
|
fixup = pairSecond(t, fixup))
|
||||||
{
|
{
|
||||||
int32_t v = intValue(t, pairFirst(t, fixup));
|
OfferPromise* p = static_cast<OfferPromise*>
|
||||||
memcpy(code + v, &target, BytesPerWord);
|
(pointerValue(t, pairFirst(t, fixup)));
|
||||||
markBit(codeMap, v);
|
assert(t, p->offset);
|
||||||
|
|
||||||
|
memcpy(p->offset, &target, BytesPerWord);
|
||||||
|
markBit(codeMap, reinterpret_cast<intptr_t>(p->offset)
|
||||||
|
- reinterpret_cast<intptr_t>(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -186,25 +217,15 @@ writeBootImage(Thread* t, FILE* out)
|
|||||||
(t->m->heap->allocate(heapMapSize(HeapCapacity)));
|
(t->m->heap->allocate(heapMapSize(HeapCapacity)));
|
||||||
memset(heapMap, 0, heapMapSize(HeapCapacity));
|
memset(heapMap, 0, heapMapSize(HeapCapacity));
|
||||||
|
|
||||||
HeapMap* heapTable = makeHeapImage(t, &image, heap, heapMap, HeapCapacity);
|
HeapWalker* heapWalker = makeHeapImage
|
||||||
|
(t, &image, heap, heapMap, HeapCapacity);
|
||||||
|
|
||||||
updateCodeTable(t, codeTable, code, codeMap, heapTable);
|
updateCodeTable(t, codeTable, code, codeMap, heapWalker->map());
|
||||||
|
|
||||||
|
heapWalker->dispose();
|
||||||
|
|
||||||
image.magic = BootImage::Magic;
|
image.magic = BootImage::Magic;
|
||||||
|
|
||||||
image.codeTable = offset(codeTable, heap);
|
|
||||||
|
|
||||||
image.loader = offset(t->m->loader, heap);
|
|
||||||
image.bootstrapClassMap = offset(t->m->bootstrapClassMap, heap);
|
|
||||||
image.stringMap = offset(t->m->stringMap, heap);
|
|
||||||
image.types = offset(t->m->types, heap);
|
|
||||||
image.jniMethodTable = offset(t->m->jniMethodTable, heap);
|
|
||||||
image.finalizers = offset(t->m->finalizers, heap);
|
|
||||||
image.tenuredFinalizers = offset(t->m->tenuredFinalizers, heap);
|
|
||||||
image.finalizeQueue = offset(t->m->finalizeQueue, heap);
|
|
||||||
image.weakReferences = offset(t->m->weakReferences, heap);
|
|
||||||
image.tenuredWeakReferences = offset(t->m->tenuredWeakReferences, heap);
|
|
||||||
|
|
||||||
fwrite(&image, sizeof(BootImage), 1, out);
|
fwrite(&image, sizeof(BootImage), 1, out);
|
||||||
|
|
||||||
fwrite(heapMap, pad(heapMapSize(image.heapSize)), 1, out);
|
fwrite(heapMap, pad(heapMapSize(image.heapSize)), 1, out);
|
||||||
|
@ -1,3 +1,18 @@
|
|||||||
|
/* Copyright (c) 2008, Avian Contributors
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software
|
||||||
|
for any purpose with or without fee is hereby granted, provided
|
||||||
|
that the above copyright notice and this permission notice appear
|
||||||
|
in all copies.
|
||||||
|
|
||||||
|
There is NO WARRANTY for this software. See license.txt for
|
||||||
|
details. */
|
||||||
|
|
||||||
|
#ifndef BOOTIMAGE_H
|
||||||
|
#define BOOTIMAGE_H
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
namespace vm {
|
namespace vm {
|
||||||
|
|
||||||
class BootImage {
|
class BootImage {
|
||||||
@ -9,18 +24,15 @@ class BootImage {
|
|||||||
unsigned heapSize;
|
unsigned heapSize;
|
||||||
unsigned codeSize;
|
unsigned codeSize;
|
||||||
|
|
||||||
unsigned codeTable;
|
|
||||||
|
|
||||||
unsigned loader;
|
unsigned loader;
|
||||||
unsigned bootstrapClassMap;
|
|
||||||
unsigned stringMap;
|
unsigned stringMap;
|
||||||
unsigned types;
|
unsigned types;
|
||||||
unsigned jniMethodTable;
|
|
||||||
unsigned finalizers;
|
uintptr_t codeBase;
|
||||||
unsigned tenuredFinalizers;
|
unsigned callTable;
|
||||||
unsigned finalizeQueue;
|
unsigned methodTree;
|
||||||
unsigned weakReferences;
|
unsigned methodTreeSentinal;
|
||||||
unsigned tenuredWeakReferences;
|
unsigned objectPools;
|
||||||
|
|
||||||
unsigned defaultThunk;
|
unsigned defaultThunk;
|
||||||
unsigned nativeThunk;
|
unsigned nativeThunk;
|
||||||
@ -32,3 +44,5 @@ class BootImage {
|
|||||||
};
|
};
|
||||||
|
|
||||||
} // namespace vm
|
} // namespace vm
|
||||||
|
|
||||||
|
#endif//BOOTIMAGE_H
|
||||||
|
@ -293,6 +293,15 @@ difference(void* a, void* b)
|
|||||||
return reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b);
|
return reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
inline void*
|
||||||
|
voidPointer(T function)
|
||||||
|
{
|
||||||
|
void* p;
|
||||||
|
memcpy(&p, &function, sizeof(void*));
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
class Machine;
|
class Machine;
|
||||||
class Thread;
|
class Thread;
|
||||||
|
|
||||||
|
471
src/compile.cpp
471
src/compile.cpp
@ -113,23 +113,25 @@ methodTree(MyThread* t);
|
|||||||
object
|
object
|
||||||
methodTreeSentinal(MyThread* t);
|
methodTreeSentinal(MyThread* t);
|
||||||
|
|
||||||
|
unsigned
|
||||||
|
compiledSize(intptr_t address)
|
||||||
|
{
|
||||||
|
return reinterpret_cast<uintptr_t*>(address)[-1];
|
||||||
|
}
|
||||||
|
|
||||||
intptr_t
|
intptr_t
|
||||||
compareIpToMethodBounds(Thread* t, intptr_t ip, object method)
|
compareIpToMethodBounds(Thread* t, intptr_t ip, object method)
|
||||||
{
|
{
|
||||||
intptr_t start = reinterpret_cast<intptr_t>
|
intptr_t start = methodCompiled(t, method);
|
||||||
(&singletonValue(t, methodCompiled(t, method), 0));
|
|
||||||
|
|
||||||
if (DebugMethodTree) {
|
if (DebugMethodTree) {
|
||||||
fprintf(stderr, "find 0x%"LX" in (0x%"LX",0x%"LX")\n", ip, start,
|
fprintf(stderr, "find 0x%"LX" in (0x%"LX",0x%"LX")\n", ip, start,
|
||||||
start + (singletonCount(t, methodCompiled(t, method))
|
start + compiledSize(start));
|
||||||
* BytesPerWord));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ip < start) {
|
if (ip < start) {
|
||||||
return -1;
|
return -1;
|
||||||
} else if (ip < start + static_cast<intptr_t>
|
} else if (ip < start + compiledSize(start))
|
||||||
(singletonCount(t, methodCompiled(t, method))
|
|
||||||
* BytesPerWord))
|
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
@ -284,8 +286,7 @@ class MyStackWalker: public Processor::StackWalker {
|
|||||||
virtual int ip() {
|
virtual int ip() {
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case Method:
|
case Method:
|
||||||
return reinterpret_cast<intptr_t>(ip_) - reinterpret_cast<intptr_t>
|
return reinterpret_cast<intptr_t>(ip_) - methodCompiled(t, method_);
|
||||||
(&singletonValue(t, methodCompiled(t, method_), 0));
|
|
||||||
|
|
||||||
case NativeMethod:
|
case NativeMethod:
|
||||||
return 0;
|
return 0;
|
||||||
@ -336,14 +337,24 @@ localObject(MyThread* t, void* base, object method, unsigned index)
|
|||||||
(static_cast<uint8_t*>(base) + localOffset(t, index, method));
|
(static_cast<uint8_t*>(base) + localOffset(t, index, method));
|
||||||
}
|
}
|
||||||
|
|
||||||
class PoolElement {
|
class PoolElement: public Promise {
|
||||||
public:
|
public:
|
||||||
PoolElement(object value, Promise* address, PoolElement* next):
|
PoolElement(Thread* t, object target, PoolElement* next):
|
||||||
value(value), address(address), next(next)
|
t(t), target(target), address(0), next(next)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
object value;
|
virtual int64_t value() {
|
||||||
Promise* address;
|
assert(t, resolved());
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool resolved() {
|
||||||
|
return address != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
Thread* t;
|
||||||
|
object target;
|
||||||
|
intptr_t address;
|
||||||
PoolElement* next;
|
PoolElement* next;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -440,6 +451,28 @@ const unsigned ThunkCount = gcIfNecessaryThunk + 1;
|
|||||||
intptr_t
|
intptr_t
|
||||||
getThunk(MyThread* t, Thunk thunk);
|
getThunk(MyThread* t, Thunk thunk);
|
||||||
|
|
||||||
|
class BootContext {
|
||||||
|
public:
|
||||||
|
class MyProtector: public Thread::Protector {
|
||||||
|
public:
|
||||||
|
MyProtector(Thread* t, BootContext* c): Protector(t), c(c) { }
|
||||||
|
|
||||||
|
virtual void visit(Heap::Visitor* v) {
|
||||||
|
v->visit(&(c->objectTable));
|
||||||
|
}
|
||||||
|
|
||||||
|
BootContext* c;
|
||||||
|
};
|
||||||
|
|
||||||
|
BootContext(Thread* t, object objectTable, Zone* zone):
|
||||||
|
protector(t, this), objectTable(objectTable), zone(zone)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
MyProtector protector;
|
||||||
|
object objectTable;
|
||||||
|
Zone* zone;
|
||||||
|
};
|
||||||
|
|
||||||
class Context {
|
class Context {
|
||||||
public:
|
public:
|
||||||
class MyProtector: public Thread::Protector {
|
class MyProtector: public Thread::Protector {
|
||||||
@ -450,7 +483,7 @@ class Context {
|
|||||||
v->visit(&(c->method));
|
v->visit(&(c->method));
|
||||||
|
|
||||||
for (PoolElement* p = c->objectPool; p; p = p->next) {
|
for (PoolElement* p = c->objectPool; p; p = p->next) {
|
||||||
v->visit(&(p->value));
|
v->visit(&(p->target));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (TraceElement* p = c->traceLog; p; p = p->next) {
|
for (TraceElement* p = c->traceLog; p; p = p->next) {
|
||||||
@ -492,13 +525,14 @@ class Context {
|
|||||||
MyThread* t;
|
MyThread* t;
|
||||||
};
|
};
|
||||||
|
|
||||||
Context(MyThread* t, object method):
|
Context(MyThread* t, BootContext* bootContext, object method):
|
||||||
thread(t),
|
thread(t),
|
||||||
zone(t->m->system, t->m->heap, 16 * 1024),
|
zone(t->m->system, t->m->heap, 16 * 1024),
|
||||||
assembler(makeAssembler(t->m->system, t->m->heap, &zone)),
|
assembler(makeAssembler(t->m->system, t->m->heap, &zone)),
|
||||||
client(t),
|
client(t),
|
||||||
compiler(makeCompiler(t->m->system, assembler, &zone, &client)),
|
compiler(makeCompiler(t->m->system, assembler, &zone, &client)),
|
||||||
method(method),
|
method(method),
|
||||||
|
bootContext(bootContext),
|
||||||
objectPool(0),
|
objectPool(0),
|
||||||
traceLog(0),
|
traceLog(0),
|
||||||
traceLogCount(0),
|
traceLogCount(0),
|
||||||
@ -515,6 +549,7 @@ class Context {
|
|||||||
client(t),
|
client(t),
|
||||||
compiler(0),
|
compiler(0),
|
||||||
method(0),
|
method(0),
|
||||||
|
bootContext(0),
|
||||||
objectPool(0),
|
objectPool(0),
|
||||||
traceLog(0),
|
traceLog(0),
|
||||||
traceLogCount(0),
|
traceLogCount(0),
|
||||||
@ -535,6 +570,7 @@ class Context {
|
|||||||
MyClient client;
|
MyClient client;
|
||||||
Compiler* compiler;
|
Compiler* compiler;
|
||||||
object method;
|
object method;
|
||||||
|
BootContext* bootContext;
|
||||||
PoolElement* objectPool;
|
PoolElement* objectPool;
|
||||||
TraceElement* traceLog;
|
TraceElement* traceLog;
|
||||||
unsigned traceLogCount;
|
unsigned traceLogCount;
|
||||||
@ -599,11 +635,36 @@ class Frame {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Compiler::Operand* append(object o) {
|
Compiler::Operand* append(object o) {
|
||||||
Promise* p = c->poolAppend(0);
|
if (context->bootContext) {
|
||||||
|
BootContext* bc = context->bootContext;
|
||||||
|
|
||||||
|
object node = hashMapFindNode
|
||||||
|
(t, bc->objectTable, o, objectHash, objectEqual);
|
||||||
|
PROTECT(t, node);
|
||||||
|
|
||||||
|
Promise* p = new (bc->zone->allocate(sizeof(OfferPromise)))
|
||||||
|
OfferPromise(t->m->system);
|
||||||
|
|
||||||
|
object pointer = makePointer(t, p);
|
||||||
|
|
||||||
|
if (node) {
|
||||||
|
object fixup = makePair(t, pointer, tripleSecond(t, node));
|
||||||
|
vm::set(t, node, TripleSecond, fixup);
|
||||||
|
} else {
|
||||||
|
PROTECT(t, o);
|
||||||
|
object fixup = makePair(t, pointer, 0);
|
||||||
|
// todo: use a hash function that compares by value
|
||||||
|
hashMapInsert(t, bc->objectTable, o, fixup, objectHash);
|
||||||
|
}
|
||||||
|
|
||||||
|
return c->promiseConstant(p);
|
||||||
|
} else {
|
||||||
context->objectPool = new
|
context->objectPool = new
|
||||||
(context->zone.allocate(sizeof(PoolElement)))
|
(context->zone.allocate(sizeof(PoolElement)))
|
||||||
PoolElement(o, p, context->objectPool);
|
PoolElement(t, o, context->objectPool);
|
||||||
return c->address(p);
|
|
||||||
|
return c->address(context->objectPool);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned localSize() {
|
unsigned localSize() {
|
||||||
@ -1094,8 +1155,7 @@ findExceptionHandler(Thread* t, object method, void* ip)
|
|||||||
if (table) {
|
if (table) {
|
||||||
object index = arrayBody(t, table, 0);
|
object index = arrayBody(t, table, 0);
|
||||||
|
|
||||||
uint8_t* compiled = reinterpret_cast<uint8_t*>
|
uint8_t* compiled = reinterpret_cast<uint8_t*>(methodCompiled(t, method));
|
||||||
(&singletonValue(t, methodCompiled(t, method), 0));
|
|
||||||
|
|
||||||
for (unsigned i = 0; i < arrayLength(t, table) - 1; ++i) {
|
for (unsigned i = 0; i < arrayLength(t, table) - 1; ++i) {
|
||||||
unsigned start = intArrayBody(t, index, i * 3);
|
unsigned start = intArrayBody(t, index, i * 3);
|
||||||
@ -1192,9 +1252,9 @@ void* FORCE_ALIGN
|
|||||||
findInterfaceMethodFromInstance(MyThread* t, object method, object instance)
|
findInterfaceMethodFromInstance(MyThread* t, object method, object instance)
|
||||||
{
|
{
|
||||||
if (instance) {
|
if (instance) {
|
||||||
return &singletonValue
|
return reinterpret_cast<void*>
|
||||||
(t, methodCompiled
|
(methodCompiled
|
||||||
(t, findInterfaceMethod(t, method, objectClass(t, instance))), 0);
|
(t, findInterfaceMethod(t, method, objectClass(t, instance))));
|
||||||
} else {
|
} else {
|
||||||
t->exception = makeNullPointerException(t);
|
t->exception = makeNullPointerException(t);
|
||||||
unwind(t);
|
unwind(t);
|
||||||
@ -1656,13 +1716,16 @@ emptyMethod(MyThread* t, object method)
|
|||||||
and (codeBody(t, methodCode(t, method), 0) == return_);
|
and (codeBody(t, methodCode(t, method), 0) == return_);
|
||||||
}
|
}
|
||||||
|
|
||||||
object
|
object&
|
||||||
|
objectPools(MyThread* t);
|
||||||
|
|
||||||
|
uintptr_t
|
||||||
defaultThunk(MyThread* t);
|
defaultThunk(MyThread* t);
|
||||||
|
|
||||||
object
|
uintptr_t
|
||||||
nativeThunk(MyThread* t);
|
nativeThunk(MyThread* t);
|
||||||
|
|
||||||
object
|
uintptr_t
|
||||||
aioobThunk(MyThread* t);
|
aioobThunk(MyThread* t);
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -1677,27 +1740,27 @@ compileDirectInvoke(MyThread* t, Frame* frame, object target)
|
|||||||
if (not emptyMethod(t, target)) {
|
if (not emptyMethod(t, target)) {
|
||||||
if (methodFlags(t, target) & ACC_NATIVE) {
|
if (methodFlags(t, target) & ACC_NATIVE) {
|
||||||
result = c->call
|
result = c->call
|
||||||
(c->constant
|
(c->constant(nativeThunk(t)),
|
||||||
(reinterpret_cast<intptr_t>
|
|
||||||
(&singletonBody(t, nativeThunk(t), 0))),
|
|
||||||
0,
|
0,
|
||||||
frame->trace(target, false),
|
frame->trace(target, false),
|
||||||
rSize,
|
rSize,
|
||||||
0);
|
0);
|
||||||
} else if (methodCompiled(t, target) == defaultThunk(t)) {
|
} else if (methodCompiled(t, target) == defaultThunk(t)
|
||||||
|
or (frame->context->bootContext
|
||||||
|
and methodClass(t, target)
|
||||||
|
!= methodClass(t, frame->context->method)))
|
||||||
|
{
|
||||||
|
// todo: when creating a boot image, log intra-class calls for
|
||||||
|
// later fixup
|
||||||
result = c->call
|
result = c->call
|
||||||
(c->constant
|
(c->constant(defaultThunk(t)),
|
||||||
(reinterpret_cast<intptr_t>
|
|
||||||
(&singletonBody(t, defaultThunk(t), 0))),
|
|
||||||
Compiler::Aligned,
|
Compiler::Aligned,
|
||||||
frame->trace(target, false),
|
frame->trace(target, false),
|
||||||
rSize,
|
rSize,
|
||||||
0);
|
0);
|
||||||
} else {
|
} else {
|
||||||
result = c->call
|
result = c->call
|
||||||
(c->constant
|
(c->constant(methodCompiled(t, target)),
|
||||||
(reinterpret_cast<intptr_t>
|
|
||||||
(&singletonBody(t, methodCompiled(t, target), 0))),
|
|
||||||
0,
|
0,
|
||||||
frame->trace(0, false),
|
frame->trace(0, false),
|
||||||
rSize,
|
rSize,
|
||||||
@ -1880,8 +1943,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned ip,
|
|||||||
Compiler::Operand* array = frame->popObject();
|
Compiler::Operand* array = frame->popObject();
|
||||||
|
|
||||||
if (CheckArrayBounds) {
|
if (CheckArrayBounds) {
|
||||||
c->checkBounds(array, ArrayLength, index, reinterpret_cast<intptr_t>
|
c->checkBounds(array, ArrayLength, index, aioobThunk(t));
|
||||||
(&singletonValue(t, aioobThunk(t), 0)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (instruction) {
|
switch (instruction) {
|
||||||
@ -1936,8 +1998,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned ip,
|
|||||||
Compiler::Operand* array = frame->popObject();
|
Compiler::Operand* array = frame->popObject();
|
||||||
|
|
||||||
if (CheckArrayBounds) {
|
if (CheckArrayBounds) {
|
||||||
c->checkBounds(array, ArrayLength, index, reinterpret_cast<intptr_t>
|
c->checkBounds(array, ArrayLength, index, aioobThunk(t));
|
||||||
(&singletonValue(t, aioobThunk(t), 0)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (instruction) {
|
switch (instruction) {
|
||||||
@ -3720,43 +3781,49 @@ codeSingletonSizeInBytes(MyThread*, unsigned codeSizeInBytes)
|
|||||||
return pad(SingletonBody + (size * BytesPerWord));
|
return pad(SingletonBody + (size * BytesPerWord));
|
||||||
}
|
}
|
||||||
|
|
||||||
object
|
uint8_t*
|
||||||
allocateCode(MyThread* t, unsigned codeSizeInBytes)
|
finish(MyThread* t, Allocator* allocator, Assembler* a, const char* name)
|
||||||
{
|
{
|
||||||
unsigned count = ceiling(codeSizeInBytes, BytesPerWord);
|
uint8_t* start = static_cast<uint8_t*>
|
||||||
unsigned size = count + singletonMaskSize(count);
|
(allocator->allocate(pad(a->length())));
|
||||||
object result = allocate3
|
|
||||||
(t, codeZone(t), Machine::ImmortalAllocation,
|
|
||||||
SingletonBody + (size * BytesPerWord), true);
|
|
||||||
initSingleton(t, result, size, true);
|
|
||||||
mark(t, result, 0);
|
|
||||||
singletonMask(t, result)[0] = 1;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
object
|
|
||||||
finish(MyThread* t, Assembler* a, const char* name)
|
|
||||||
{
|
|
||||||
object result = allocateCode(t, a->length());
|
|
||||||
uint8_t* start = reinterpret_cast<uint8_t*>(&singletonValue(t, result, 0));
|
|
||||||
|
|
||||||
a->writeTo(start);
|
a->writeTo(start);
|
||||||
|
|
||||||
logCompile(t, start, a->length(), 0, name, 0);
|
logCompile(t, start, a->length(), 0, name, 0);
|
||||||
|
|
||||||
return result;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
object
|
uint8_t*
|
||||||
finish(MyThread* t, Context* context)
|
finish(MyThread* t, Allocator* allocator, Context* context)
|
||||||
{
|
{
|
||||||
Compiler* c = context->compiler;
|
Compiler* c = context->compiler;
|
||||||
|
|
||||||
unsigned codeSize = c->compile();
|
unsigned codeSize = c->compile();
|
||||||
object result = allocateCode(t, pad(codeSize) + c->poolSize());
|
uintptr_t* code = static_cast<uintptr_t*>
|
||||||
PROTECT(t, result);
|
(allocator->allocate(pad(codeSize) + BytesPerWord));
|
||||||
|
code[0] = codeSize;
|
||||||
|
uint8_t* start = reinterpret_cast<uint8_t*>(code + 1);
|
||||||
|
|
||||||
uint8_t* start = reinterpret_cast<uint8_t*>(&singletonValue(t, result, 0));
|
if (context->objectPool) {
|
||||||
|
object pool = allocate3
|
||||||
|
(t, allocator, Machine::ImmortalAllocation,
|
||||||
|
FixedSizeOfArray + c->poolSize() + BytesPerWord, true);
|
||||||
|
|
||||||
|
initArray(t, pool, (c->poolSize() / BytesPerWord) + 1, false);
|
||||||
|
|
||||||
|
set(t, pool, ArrayBody, objectPools(t));
|
||||||
|
objectPools(t) = pool;
|
||||||
|
|
||||||
|
unsigned i = 1;
|
||||||
|
for (PoolElement* p = context->objectPool; p; p = p->next) {
|
||||||
|
unsigned offset = ArrayBody + ((i++) * BytesPerWord);
|
||||||
|
|
||||||
|
p->address = reinterpret_cast<uintptr_t>(pool) + offset;
|
||||||
|
|
||||||
|
set(t, pool, offset, p->target);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c->writeTo(start);
|
c->writeTo(start);
|
||||||
|
|
||||||
@ -3828,14 +3895,6 @@ finish(MyThread* t, Context* context)
|
|||||||
set(t, methodCode(t, context->method), CodePool, map);
|
set(t, methodCode(t, context->method), CodePool, map);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (PoolElement* p = context->objectPool; p; p = p->next) {
|
|
||||||
intptr_t offset = p->address->value() - reinterpret_cast<intptr_t>(start);
|
|
||||||
|
|
||||||
singletonMarkObject(t, result, offset / BytesPerWord);
|
|
||||||
|
|
||||||
set(t, result, SingletonBody + offset, p->value);
|
|
||||||
}
|
|
||||||
|
|
||||||
logCompile
|
logCompile
|
||||||
(t, start, codeSize,
|
(t, start, codeSize,
|
||||||
reinterpret_cast<const char*>
|
reinterpret_cast<const char*>
|
||||||
@ -3859,11 +3918,11 @@ finish(MyThread* t, Context* context)
|
|||||||
asm("int3");
|
asm("int3");
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
object
|
uint8_t*
|
||||||
compile(MyThread* t, Context* context)
|
compile(MyThread* t, Allocator* allocator, Context* context)
|
||||||
{
|
{
|
||||||
Compiler* c = context->compiler;
|
Compiler* c = context->compiler;
|
||||||
|
|
||||||
@ -3970,11 +4029,12 @@ compile(MyThread* t, Context* context)
|
|||||||
calculateFrameMaps(t, context, 0, 0);
|
calculateFrameMaps(t, context, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return finish(t, context);
|
return finish(t, allocator, context);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
compile(MyThread* t, object method);
|
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
|
||||||
|
object method);
|
||||||
|
|
||||||
void*
|
void*
|
||||||
compileMethod2(MyThread* t)
|
compileMethod2(MyThread* t)
|
||||||
@ -3990,24 +4050,24 @@ compileMethod2(MyThread* t)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (LIKELY(t->exception == 0)) {
|
if (LIKELY(t->exception == 0)) {
|
||||||
compile(t, target);
|
compile(t, codeZone(t), 0, target);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UNLIKELY(t->exception)) {
|
if (UNLIKELY(t->exception)) {
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
|
void* address = reinterpret_cast<void*>(methodCompiled(t, target));
|
||||||
if (callNodeVirtualCall(t, node)) {
|
if (callNodeVirtualCall(t, node)) {
|
||||||
classVtable
|
classVtable
|
||||||
(t, objectClass
|
(t, objectClass
|
||||||
(t, resolveThisPointer(t, t->stack, target)), methodOffset(t, target))
|
(t, resolveThisPointer(t, t->stack, target)), methodOffset(t, target))
|
||||||
= &singletonValue(t, methodCompiled(t, target), 0);
|
= address;
|
||||||
} else {
|
} else {
|
||||||
Context context(t);
|
Context context(t);
|
||||||
context.assembler->updateCall
|
context.assembler->updateCall
|
||||||
(reinterpret_cast<void*>(callNodeAddress(t, node)),
|
(reinterpret_cast<void*>(callNodeAddress(t, node)), address);
|
||||||
&singletonValue(t, methodCompiled(t, target), 0));
|
|
||||||
}
|
}
|
||||||
return &singletonValue(t, methodCompiled(t, target), 0);
|
return address;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4276,7 +4336,7 @@ visitStackAndLocals(MyThread* t, Heap::Visitor* v, void* base, object method,
|
|||||||
object map = codePool(t, methodCode(t, method));
|
object map = codePool(t, methodCode(t, method));
|
||||||
int index = frameMapIndex
|
int index = frameMapIndex
|
||||||
(t, method, difference
|
(t, method, difference
|
||||||
(ip, &singletonValue(t, methodCompiled(t, method), 0)));
|
(ip, reinterpret_cast<void*>(methodCompiled(t, method))));
|
||||||
|
|
||||||
for (unsigned i = 0; i < count; ++i) {
|
for (unsigned i = 0; i < count; ++i) {
|
||||||
int j = index + i;
|
int j = index + i;
|
||||||
@ -4510,7 +4570,7 @@ invoke(Thread* thread, object method, ArgumentList* arguments)
|
|||||||
}
|
}
|
||||||
|
|
||||||
result = vmInvoke
|
result = vmInvoke
|
||||||
(t, &singletonValue(t, methodCompiled(t, method), 0), arguments->array,
|
(t, reinterpret_cast<void*>(methodCompiled(t, method)), arguments->array,
|
||||||
arguments->position, returnType);
|
arguments->position, returnType);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4593,8 +4653,41 @@ class SegFaultHandler: public System::SignalHandler {
|
|||||||
Machine* m;
|
Machine* m;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class FixedAllocator: public Allocator {
|
||||||
|
public:
|
||||||
|
FixedAllocator(Thread* t, uint8_t* base, unsigned capacity):
|
||||||
|
t(t), base(base), offset(0), capacity(capacity)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
virtual void* tryAllocate(unsigned) {
|
||||||
|
abort(t);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void* allocate(unsigned size) {
|
||||||
|
unsigned paddedSize = pad(size);
|
||||||
|
expect(t, offset + paddedSize < capacity);
|
||||||
|
|
||||||
|
void* p = base + offset;
|
||||||
|
offset += paddedSize;
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void free(const void*, unsigned) {
|
||||||
|
abort(t);
|
||||||
|
}
|
||||||
|
|
||||||
|
Thread* t;
|
||||||
|
uint8_t* base;
|
||||||
|
unsigned offset;
|
||||||
|
unsigned capacity;
|
||||||
|
};
|
||||||
|
|
||||||
class MyProcessor;
|
class MyProcessor;
|
||||||
|
|
||||||
|
void
|
||||||
|
compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
|
||||||
|
BootImage* image, uint8_t* imageBase);
|
||||||
|
|
||||||
MyProcessor*
|
MyProcessor*
|
||||||
processor(MyThread* t);
|
processor(MyThread* t);
|
||||||
|
|
||||||
@ -4690,8 +4783,8 @@ class MyProcessor: public Processor {
|
|||||||
virtual void
|
virtual void
|
||||||
initVtable(Thread* t, object c)
|
initVtable(Thread* t, object c)
|
||||||
{
|
{
|
||||||
void* compiled = &singletonBody
|
void* compiled = reinterpret_cast<void*>
|
||||||
(t, ::defaultThunk(static_cast<MyThread*>(t)), 0);
|
(::defaultThunk(static_cast<MyThread*>(t)));
|
||||||
|
|
||||||
for (unsigned i = 0; i < classLength(t, c); ++i) {
|
for (unsigned i = 0; i < classLength(t, c); ++i) {
|
||||||
classVtable(t, c, i) = compiled;
|
classVtable(t, c, i) = compiled;
|
||||||
@ -4722,13 +4815,10 @@ class MyProcessor: public Processor {
|
|||||||
MyThread* t = static_cast<MyThread*>(vmt);
|
MyThread* t = static_cast<MyThread*>(vmt);
|
||||||
|
|
||||||
if (t == t->m->rootThread) {
|
if (t == t->m->rootThread) {
|
||||||
v->visit(&defaultThunk);
|
|
||||||
v->visit(&nativeThunk);
|
|
||||||
v->visit(&aioobThunk);
|
|
||||||
v->visit(&thunkTable);
|
|
||||||
v->visit(&callTable);
|
v->visit(&callTable);
|
||||||
v->visit(&methodTree);
|
v->visit(&methodTree);
|
||||||
v->visit(&methodTreeSentinal);
|
v->visit(&methodTreeSentinal);
|
||||||
|
v->visit(&objectPools);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (MyThread::CallTrace* trace = t->trace; trace; trace = trace->next) {
|
for (MyThread::CallTrace* trace = t->trace; trace; trace = trace->next) {
|
||||||
@ -4801,7 +4891,7 @@ class MyProcessor: public Processor {
|
|||||||
|
|
||||||
PROTECT(t, method);
|
PROTECT(t, method);
|
||||||
|
|
||||||
compile(static_cast<MyThread*>(t), method);
|
compile(static_cast<MyThread*>(t), &codeZone, 0, method);
|
||||||
|
|
||||||
if (LIKELY(t->exception == 0)) {
|
if (LIKELY(t->exception == 0)) {
|
||||||
return ::invoke(t, method, &list);
|
return ::invoke(t, method, &list);
|
||||||
@ -4832,7 +4922,7 @@ class MyProcessor: public Processor {
|
|||||||
|
|
||||||
PROTECT(t, method);
|
PROTECT(t, method);
|
||||||
|
|
||||||
compile(static_cast<MyThread*>(t), method);
|
compile(static_cast<MyThread*>(t), &codeZone, 0, method);
|
||||||
|
|
||||||
if (LIKELY(t->exception == 0)) {
|
if (LIKELY(t->exception == 0)) {
|
||||||
return ::invoke(t, method, &list);
|
return ::invoke(t, method, &list);
|
||||||
@ -4862,7 +4952,7 @@ class MyProcessor: public Processor {
|
|||||||
|
|
||||||
PROTECT(t, method);
|
PROTECT(t, method);
|
||||||
|
|
||||||
compile(static_cast<MyThread*>(t), method);
|
compile(static_cast<MyThread*>(t), &codeZone, 0, method);
|
||||||
|
|
||||||
if (LIKELY(t->exception == 0)) {
|
if (LIKELY(t->exception == 0)) {
|
||||||
return ::invoke(t, method, &list);
|
return ::invoke(t, method, &list);
|
||||||
@ -4911,8 +5001,7 @@ class MyProcessor: public Processor {
|
|||||||
target->base = base;
|
target->base = base;
|
||||||
target->stack = stack;
|
target->stack = stack;
|
||||||
} else {
|
} else {
|
||||||
uint8_t* thunkStart = reinterpret_cast<uint8_t*>
|
uint8_t* thunkStart = p->thunkTable;
|
||||||
(&singletonValue(t, p->thunkTable, 0));
|
|
||||||
uint8_t* thunkEnd = thunkStart + (p->thunkSize * ThunkCount);
|
uint8_t* thunkEnd = thunkStart + (p->thunkSize * ThunkCount);
|
||||||
|
|
||||||
if (static_cast<uint8_t*>(ip) >= thunkStart
|
if (static_cast<uint8_t*>(ip) >= thunkStart
|
||||||
@ -4952,17 +5041,49 @@ class MyProcessor: public Processor {
|
|||||||
return visitor.trace;
|
return visitor.trace;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void compileThunks(Thread* vmt, BootImage* image, uint8_t* code,
|
||||||
|
unsigned* offset, unsigned capacity)
|
||||||
|
{
|
||||||
|
MyThread* t = static_cast<MyThread*>(vmt);
|
||||||
|
FixedAllocator allocator(t, code + *offset, capacity);
|
||||||
|
|
||||||
|
::compileThunks(t, &allocator, processor(t), image, code);
|
||||||
|
|
||||||
|
*offset += allocator.offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void compileMethod(Thread* vmt, Zone* zone, uint8_t* code,
|
||||||
|
unsigned* offset, unsigned capacity, object table,
|
||||||
|
object method)
|
||||||
|
{
|
||||||
|
MyThread* t = static_cast<MyThread*>(vmt);
|
||||||
|
FixedAllocator allocator(t, code + *offset, capacity);
|
||||||
|
BootContext bootContext(t, table, zone);
|
||||||
|
|
||||||
|
compile(t, &allocator, &bootContext, method);
|
||||||
|
|
||||||
|
*offset += allocator.offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void visitRoots(BootImage* image, HeapWalker* w) {
|
||||||
|
image->callTable = w->visitRoot(callTable);
|
||||||
|
image->methodTree = w->visitRoot(methodTree);
|
||||||
|
image->methodTreeSentinal = w->visitRoot(methodTreeSentinal);
|
||||||
|
image->objectPools = w->visitRoot(objectPools);
|
||||||
|
}
|
||||||
|
|
||||||
System* s;
|
System* s;
|
||||||
Allocator* allocator;
|
Allocator* allocator;
|
||||||
object defaultThunk;
|
uint8_t* defaultThunk;
|
||||||
object nativeThunk;
|
uint8_t* nativeThunk;
|
||||||
object aioobThunk;
|
uint8_t* aioobThunk;
|
||||||
object thunkTable;
|
uint8_t* thunkTable;
|
||||||
unsigned thunkSize;
|
unsigned thunkSize;
|
||||||
object callTable;
|
object callTable;
|
||||||
unsigned callTableSize;
|
unsigned callTableSize;
|
||||||
object methodTree;
|
object methodTree;
|
||||||
object methodTreeSentinal;
|
object methodTreeSentinal;
|
||||||
|
object objectPools;
|
||||||
SegFaultHandler segFaultHandler;
|
SegFaultHandler segFaultHandler;
|
||||||
CodeAllocator codeAllocator;
|
CodeAllocator codeAllocator;
|
||||||
Zone codeZone;
|
Zone codeZone;
|
||||||
@ -4974,34 +5095,19 @@ getThunk(MyThread* t, Thunk thunk)
|
|||||||
MyProcessor* p = processor(t);
|
MyProcessor* p = processor(t);
|
||||||
|
|
||||||
return reinterpret_cast<intptr_t>
|
return reinterpret_cast<intptr_t>
|
||||||
(&singletonValue(t, p->thunkTable, (thunk * p->thunkSize) / BytesPerWord));
|
(p->thunkTable + ((thunk * p->thunkSize) / BytesPerWord));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
compileThunks(MyThread* t, MyProcessor* p)
|
compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
|
||||||
|
BootImage* image, uint8_t* imageBase)
|
||||||
{
|
{
|
||||||
class ThunkContext {
|
class ThunkContext {
|
||||||
public:
|
public:
|
||||||
class MyPromise: public Promise {
|
ThunkContext(MyThread* t): context(t), promise(t->m->system) { }
|
||||||
public:
|
|
||||||
MyPromise(): resolved_(false) { }
|
|
||||||
|
|
||||||
virtual int64_t value() {
|
|
||||||
return value_;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool resolved() {
|
|
||||||
return resolved_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t value_;
|
|
||||||
bool resolved_;
|
|
||||||
};
|
|
||||||
|
|
||||||
ThunkContext(MyThread* t): context(t) { }
|
|
||||||
|
|
||||||
Context context;
|
Context context;
|
||||||
MyPromise promise;
|
OfferPromise promise;
|
||||||
};
|
};
|
||||||
|
|
||||||
ThunkContext defaultContext(t);
|
ThunkContext defaultContext(t);
|
||||||
@ -5011,9 +5117,6 @@ compileThunks(MyThread* t, MyProcessor* p)
|
|||||||
saveStackAndBase(t, a);
|
saveStackAndBase(t, a);
|
||||||
pushThread(t, a);
|
pushThread(t, a);
|
||||||
|
|
||||||
defaultContext.promise.resolved_ = true;
|
|
||||||
defaultContext.promise.value_ = reinterpret_cast<intptr_t>(compileMethod);
|
|
||||||
|
|
||||||
Assembler::Constant proc(&(defaultContext.promise));
|
Assembler::Constant proc(&(defaultContext.promise));
|
||||||
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
||||||
|
|
||||||
@ -5021,6 +5124,17 @@ compileThunks(MyThread* t, MyProcessor* p)
|
|||||||
|
|
||||||
Assembler::Register result(a->returnLow());
|
Assembler::Register result(a->returnLow());
|
||||||
a->apply(Jump, BytesPerWord, RegisterOperand, &result);
|
a->apply(Jump, BytesPerWord, RegisterOperand, &result);
|
||||||
|
|
||||||
|
if (image) {
|
||||||
|
image->defaultThunk = static_cast<uint8_t*>
|
||||||
|
(defaultContext.promise.offset) - imageBase;
|
||||||
|
|
||||||
|
memset(defaultContext.promise.offset, 0, BytesPerWord);
|
||||||
|
} else {
|
||||||
|
memcpy(defaultContext.promise.offset,
|
||||||
|
voidPointer(compileMethod),
|
||||||
|
BytesPerWord);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ThunkContext nativeContext(t);
|
ThunkContext nativeContext(t);
|
||||||
@ -5029,16 +5143,23 @@ compileThunks(MyThread* t, MyProcessor* p)
|
|||||||
|
|
||||||
saveStackAndBase(t, a);
|
saveStackAndBase(t, a);
|
||||||
pushThread(t, a);
|
pushThread(t, a);
|
||||||
|
|
||||||
nativeContext.promise.resolved_ = true;
|
|
||||||
nativeContext.promise.value_ = reinterpret_cast<intptr_t>(invokeNative);
|
|
||||||
|
|
||||||
Assembler::Constant proc(&(nativeContext.promise));
|
Assembler::Constant proc(&(nativeContext.promise));
|
||||||
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
||||||
|
|
||||||
popThread(t, a);
|
popThread(t, a);
|
||||||
|
|
||||||
a->apply(Return);
|
a->apply(Return);
|
||||||
|
|
||||||
|
if (image) {
|
||||||
|
image->nativeThunk = static_cast<uint8_t*>
|
||||||
|
(nativeContext.promise.offset) - imageBase;
|
||||||
|
|
||||||
|
memset(nativeContext.promise.offset, 0, BytesPerWord);
|
||||||
|
} else {
|
||||||
|
memcpy(nativeContext.promise.offset,
|
||||||
|
voidPointer(invokeNative),
|
||||||
|
BytesPerWord);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ThunkContext aioobContext(t);
|
ThunkContext aioobContext(t);
|
||||||
@ -5048,12 +5169,19 @@ compileThunks(MyThread* t, MyProcessor* p)
|
|||||||
saveStackAndBase(t, a);
|
saveStackAndBase(t, a);
|
||||||
pushThread(t, a);
|
pushThread(t, a);
|
||||||
|
|
||||||
aioobContext.promise.resolved_ = true;
|
|
||||||
aioobContext.promise.value_ = reinterpret_cast<intptr_t>
|
|
||||||
(throwArrayIndexOutOfBounds);
|
|
||||||
|
|
||||||
Assembler::Constant proc(&(aioobContext.promise));
|
Assembler::Constant proc(&(aioobContext.promise));
|
||||||
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
a->apply(LongCall, BytesPerWord, ConstantOperand, &proc);
|
||||||
|
|
||||||
|
if (image) {
|
||||||
|
image->aioobThunk = static_cast<uint8_t*>
|
||||||
|
(aioobContext.promise.offset) - imageBase;
|
||||||
|
|
||||||
|
memset(aioobContext.promise.offset, 0, BytesPerWord);
|
||||||
|
} else {
|
||||||
|
memcpy(aioobContext.promise.offset,
|
||||||
|
voidPointer(throwArrayIndexOutOfBounds),
|
||||||
|
BytesPerWord);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ThunkContext tableContext(t);
|
ThunkContext tableContext(t);
|
||||||
@ -5078,22 +5206,32 @@ compileThunks(MyThread* t, MyProcessor* p)
|
|||||||
+ codeSingletonSizeInBytes
|
+ codeSingletonSizeInBytes
|
||||||
(t, p->thunkSize * ThunkCount)));
|
(t, p->thunkSize * ThunkCount)));
|
||||||
|
|
||||||
p->defaultThunk = finish(t, defaultContext.context.assembler, "default");
|
p->defaultThunk = finish
|
||||||
p->nativeThunk = finish(t, nativeContext.context.assembler, "native");
|
(t, allocator, defaultContext.context.assembler, "default");
|
||||||
p->aioobThunk = finish(t, aioobContext.context.assembler, "aioob");
|
|
||||||
|
|
||||||
p->thunkTable = allocateCode(t, p->thunkSize * ThunkCount);
|
p->nativeThunk = finish
|
||||||
uint8_t* start = reinterpret_cast<uint8_t*>
|
(t, allocator, nativeContext.context.assembler, "native");
|
||||||
(&singletonValue(t, p->thunkTable, 0));
|
|
||||||
|
|
||||||
logCompile(t, start, p->thunkSize * ThunkCount, 0, "thunkTable", 0);
|
p->aioobThunk = finish
|
||||||
|
(t, allocator, aioobContext.context.assembler, "aioob");
|
||||||
|
|
||||||
tableContext.promise.resolved_ = true;
|
p->thunkTable = static_cast<uint8_t*>
|
||||||
|
(allocator->allocate(p->thunkSize * ThunkCount));
|
||||||
|
|
||||||
|
logCompile(t, p->thunkTable, p->thunkSize * ThunkCount, 0, "thunkTable", 0);
|
||||||
|
|
||||||
|
uint8_t* start = p->thunkTable;
|
||||||
|
|
||||||
#define THUNK(s) \
|
#define THUNK(s) \
|
||||||
tableContext.promise.value_ = reinterpret_cast<intptr_t>(s); \
|
|
||||||
tableContext.context.assembler->writeTo(start); \
|
tableContext.context.assembler->writeTo(start); \
|
||||||
start += p->thunkSize;
|
start += p->thunkSize; \
|
||||||
|
if (image) { \
|
||||||
|
image->s##Thunk = static_cast<uint8_t*> \
|
||||||
|
(tableContext.promise.offset) - imageBase; \
|
||||||
|
memset(tableContext.promise.offset, 0, BytesPerWord); \
|
||||||
|
} else { \
|
||||||
|
memcpy(tableContext.promise.offset, voidPointer(s), BytesPerWord); \
|
||||||
|
}
|
||||||
|
|
||||||
#include "thunks.cpp"
|
#include "thunks.cpp"
|
||||||
|
|
||||||
@ -5114,7 +5252,7 @@ processor(MyThread* t)
|
|||||||
set(t, p->methodTree, TreeNodeLeft, p->methodTreeSentinal);
|
set(t, p->methodTree, TreeNodeLeft, p->methodTreeSentinal);
|
||||||
set(t, p->methodTree, TreeNodeRight, p->methodTreeSentinal);
|
set(t, p->methodTree, TreeNodeRight, p->methodTreeSentinal);
|
||||||
|
|
||||||
compileThunks(t, p);
|
compileThunks(t, codeZone(t), p, 0, 0);
|
||||||
|
|
||||||
p->segFaultHandler.m = t->m;
|
p->segFaultHandler.m = t->m;
|
||||||
expect(t, t->m->system->success
|
expect(t, t->m->system->success
|
||||||
@ -5124,54 +5262,58 @@ processor(MyThread* t)
|
|||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
object
|
object&
|
||||||
|
objectPools(MyThread* t)
|
||||||
|
{
|
||||||
|
return processor(t)->objectPools;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t
|
||||||
defaultThunk(MyThread* t)
|
defaultThunk(MyThread* t)
|
||||||
{
|
{
|
||||||
return processor(t)->defaultThunk;
|
return reinterpret_cast<uintptr_t>(processor(t)->defaultThunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
object
|
uintptr_t
|
||||||
nativeThunk(MyThread* t)
|
nativeThunk(MyThread* t)
|
||||||
{
|
{
|
||||||
return processor(t)->nativeThunk;
|
return reinterpret_cast<uintptr_t>(processor(t)->nativeThunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
object
|
uintptr_t
|
||||||
aioobThunk(MyThread* t)
|
aioobThunk(MyThread* t)
|
||||||
{
|
{
|
||||||
return processor(t)->aioobThunk;
|
return reinterpret_cast<uintptr_t>(processor(t)->aioobThunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
compile(MyThread* t, object method)
|
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
|
||||||
|
object method)
|
||||||
{
|
{
|
||||||
MyProcessor* p = processor(t);
|
MyProcessor* p = processor(t);
|
||||||
|
|
||||||
if (methodCompiled(t, method) == p->defaultThunk) {
|
if (methodCompiled(t, method) == defaultThunk(t)) {
|
||||||
PROTECT(t, method);
|
PROTECT(t, method);
|
||||||
|
|
||||||
ACQUIRE(t, t->m->classLock);
|
ACQUIRE(t, t->m->classLock);
|
||||||
|
|
||||||
if (methodCompiled(t, method) == p->defaultThunk) {
|
if (methodCompiled(t, method) == defaultThunk(t)) {
|
||||||
initClass(t, methodClass(t, method));
|
initClass(t, methodClass(t, method));
|
||||||
if (UNLIKELY(t->exception)) return;
|
if (UNLIKELY(t->exception)) return;
|
||||||
|
|
||||||
if (methodCompiled(t, method) == p->defaultThunk) {
|
if (methodCompiled(t, method) == defaultThunk(t)) {
|
||||||
object node;
|
object node;
|
||||||
object compiled;
|
uint8_t* compiled;
|
||||||
if (methodFlags(t, method) & ACC_NATIVE) {
|
if (methodFlags(t, method) & ACC_NATIVE) {
|
||||||
node = 0;
|
node = 0;
|
||||||
compiled = p->nativeThunk;
|
compiled = p->nativeThunk;
|
||||||
} else {
|
} else {
|
||||||
Context context(t, method);
|
Context context(t, bootContext, method);
|
||||||
compiled = compile(t, &context);
|
compiled = compile(t, allocator, &context);
|
||||||
if (UNLIKELY(t->exception)) return;
|
if (UNLIKELY(t->exception)) return;
|
||||||
|
|
||||||
PROTECT(t, compiled);
|
|
||||||
|
|
||||||
if (DebugMethodTree) {
|
if (DebugMethodTree) {
|
||||||
fprintf(stderr, "insert method at %p\n",
|
fprintf(stderr, "insert method at %p\n", compiled);
|
||||||
&singletonValue(t, compiled, 0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We can't set the MethodCompiled field on the original
|
// We can't set the MethodCompiled field on the original
|
||||||
@ -5197,7 +5339,7 @@ compile(MyThread* t, object method)
|
|||||||
methodSpec(t, method),
|
methodSpec(t, method),
|
||||||
methodClass(t, method),
|
methodClass(t, method),
|
||||||
methodCode(t, method),
|
methodCode(t, method),
|
||||||
compiled);
|
reinterpret_cast<intptr_t>(compiled));
|
||||||
|
|
||||||
node = makeTreeNode
|
node = makeTreeNode
|
||||||
(t, clone, methodTreeSentinal(t), methodTreeSentinal(t));
|
(t, clone, methodTreeSentinal(t), methodTreeSentinal(t));
|
||||||
@ -5205,16 +5347,15 @@ compile(MyThread* t, object method)
|
|||||||
PROTECT(t, node);
|
PROTECT(t, node);
|
||||||
|
|
||||||
methodTree(t) = treeInsertNode
|
methodTree(t) = treeInsertNode
|
||||||
(t, methodTree(t), reinterpret_cast<intptr_t>
|
(t, methodTree(t), reinterpret_cast<intptr_t>(compiled), node,
|
||||||
(&singletonValue(t, compiled, 0)), node, methodTreeSentinal(t),
|
methodTreeSentinal(t), compareIpToMethodBounds);
|
||||||
compareIpToMethodBounds);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
set(t, method, MethodCompiled, compiled);
|
methodCompiled(t, method) = reinterpret_cast<intptr_t>(compiled);
|
||||||
|
|
||||||
if (methodVirtual(t, method)) {
|
if (methodVirtual(t, method)) {
|
||||||
classVtable(t, methodClass(t, method), methodOffset(t, method))
|
classVtable(t, methodClass(t, method), methodOffset(t, method))
|
||||||
= &singletonValue(t, compiled, 0);
|
= compiled;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (node) {
|
if (node) {
|
||||||
|
@ -56,7 +56,7 @@ class Finder {
|
|||||||
unsigned currentSize;
|
unsigned currentSize;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual IteratorImp* iterator();
|
virtual IteratorImp* iterator() = 0;
|
||||||
virtual System::Region* find(const char* name) = 0;
|
virtual System::Region* find(const char* name) = 0;
|
||||||
virtual bool exists(const char* name) = 0;
|
virtual bool exists(const char* name) = 0;
|
||||||
virtual const char* path() = 0;
|
virtual const char* path() = 0;
|
||||||
|
@ -55,9 +55,9 @@ namespace vm {
|
|||||||
void
|
void
|
||||||
dumpHeap(Thread* t, FILE* out)
|
dumpHeap(Thread* t, FILE* out)
|
||||||
{
|
{
|
||||||
class Walker: public HeapWalker {
|
class Visitor: public HeapVisitor {
|
||||||
public:
|
public:
|
||||||
Walker(Thread* t, FILE* out): t(t), out(out), nextNumber(1) { }
|
Visitor(Thread* t, FILE* out): t(t), out(out), nextNumber(1) { }
|
||||||
|
|
||||||
virtual void root() {
|
virtual void root() {
|
||||||
write1(out, Root);
|
write1(out, Root);
|
||||||
@ -102,9 +102,11 @@ dumpHeap(Thread* t, FILE* out)
|
|||||||
Thread* t;
|
Thread* t;
|
||||||
FILE* out;
|
FILE* out;
|
||||||
unsigned nextNumber;
|
unsigned nextNumber;
|
||||||
} walker(t, out);
|
} visitor(t, out);
|
||||||
|
|
||||||
walk(t, &walker)->dispose();
|
HeapWalker* w = makeHeapWalker(t, &visitor);
|
||||||
|
w->visitAllRoots();
|
||||||
|
w->dispose();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace vm
|
} // namespace vm
|
||||||
|
@ -1,3 +1,14 @@
|
|||||||
|
/* Copyright (c) 2008, Avian Contributors
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software
|
||||||
|
for any purpose with or without fee is hereby granted, provided
|
||||||
|
that the above copyright notice and this permission notice appear
|
||||||
|
in all copies.
|
||||||
|
|
||||||
|
There is NO WARRANTY for this software. See license.txt for
|
||||||
|
details. */
|
||||||
|
|
||||||
|
#include "machine.h"
|
||||||
#include "heapwalk.h"
|
#include "heapwalk.h"
|
||||||
|
|
||||||
using namespace vm;
|
using namespace vm;
|
||||||
@ -69,7 +80,11 @@ class Context {
|
|||||||
thread(thread), objects(0), stack(0)
|
thread(thread), objects(0), stack(0)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
~Context() {
|
void dispose() {
|
||||||
|
if (objects) {
|
||||||
|
objects->dispose();
|
||||||
|
}
|
||||||
|
|
||||||
while (stack) {
|
while (stack) {
|
||||||
Stack* dead = stack;
|
Stack* dead = stack;
|
||||||
stack = dead->next;
|
stack = dead->next;
|
||||||
@ -224,21 +239,22 @@ objectSize(Thread* t, object o)
|
|||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
unsigned
|
||||||
walk(Context* c, HeapWalker* w, object p)
|
walk(Context* c, HeapVisitor* v, object p)
|
||||||
{
|
{
|
||||||
Thread* t = c->thread;
|
Thread* t = c->thread;
|
||||||
|
object root = p;
|
||||||
int nextChildOffset;
|
int nextChildOffset;
|
||||||
|
|
||||||
w->root();
|
v->root();
|
||||||
|
|
||||||
visit: {
|
visit: {
|
||||||
Set::Entry* e = find(c, p);
|
Set::Entry* e = find(c, p);
|
||||||
if (e) {
|
if (e) {
|
||||||
w->visitOld(p, e->number);
|
v->visitOld(p, e->number);
|
||||||
} else {
|
} else {
|
||||||
e = add(c, p);
|
e = add(c, p);
|
||||||
e->number = w->visitNew(p);
|
e->number = v->visitNew(p);
|
||||||
|
|
||||||
nextChildOffset = walkNext(t, p, -1);
|
nextChildOffset = walkNext(t, p, -1);
|
||||||
if (nextChildOffset != -1) {
|
if (nextChildOffset != -1) {
|
||||||
@ -250,7 +266,7 @@ walk(Context* c, HeapWalker* w, object p)
|
|||||||
goto pop;
|
goto pop;
|
||||||
|
|
||||||
children: {
|
children: {
|
||||||
w->push(nextChildOffset);
|
v->push(nextChildOffset);
|
||||||
push(c, p, nextChildOffset);
|
push(c, p, nextChildOffset);
|
||||||
p = get(p, nextChildOffset);
|
p = get(p, nextChildOffset);
|
||||||
goto visit;
|
goto visit;
|
||||||
@ -258,7 +274,7 @@ walk(Context* c, HeapWalker* w, object p)
|
|||||||
|
|
||||||
pop: {
|
pop: {
|
||||||
if (pop(c, &p, &nextChildOffset)) {
|
if (pop(c, &p, &nextChildOffset)) {
|
||||||
w->pop();
|
v->pop();
|
||||||
nextChildOffset = walkNext(t, p, nextChildOffset);
|
nextChildOffset = walkNext(t, p, nextChildOffset);
|
||||||
if (nextChildOffset >= 0) {
|
if (nextChildOffset >= 0) {
|
||||||
goto children;
|
goto children;
|
||||||
@ -267,34 +283,59 @@ walk(Context* c, HeapWalker* w, object p)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return find(c, root)->number;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class MyHeapWalker: public HeapWalker {
|
||||||
|
public:
|
||||||
|
MyHeapWalker(Thread* t, HeapVisitor* v):
|
||||||
|
context(t), visitor(v)
|
||||||
|
{
|
||||||
|
add(&context, 0)->number = v->visitNew(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual unsigned visitRoot(object root) {
|
||||||
|
return walk(&context, visitor, root);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void visitAllRoots() {
|
||||||
|
class Visitor: public Heap::Visitor {
|
||||||
|
public:
|
||||||
|
Visitor(Context* c, HeapVisitor* v): c(c), v(v) { }
|
||||||
|
|
||||||
|
virtual void visit(void* p) {
|
||||||
|
walk(c, v, static_cast<object>(mask(*static_cast<void**>(p))));
|
||||||
|
}
|
||||||
|
|
||||||
|
Context* c;
|
||||||
|
HeapVisitor* v;
|
||||||
|
} v(&context, visitor);
|
||||||
|
|
||||||
|
visitRoots(context.thread->m, &v);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual HeapMap* map() {
|
||||||
|
return context.objects;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void dispose() {
|
||||||
|
context.dispose();
|
||||||
|
context.thread->m->heap->free(this, sizeof(MyHeapWalker));
|
||||||
|
}
|
||||||
|
|
||||||
|
Context context;
|
||||||
|
HeapVisitor* visitor;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
namespace vm {
|
namespace vm {
|
||||||
|
|
||||||
HeapMap*
|
HeapWalker*
|
||||||
walk(Thread* t, HeapWalker* w)
|
makeHeapWalker(Thread* t, HeapVisitor* v)
|
||||||
{
|
{
|
||||||
Context context(t);
|
return new (t->m->heap->allocate(sizeof(MyHeapWalker))) MyHeapWalker(t, v);
|
||||||
|
|
||||||
class Visitor: public Heap::Visitor {
|
|
||||||
public:
|
|
||||||
Visitor(Context* c, HeapWalker* w): c(c), w(w) { }
|
|
||||||
|
|
||||||
virtual void visit(void* p) {
|
|
||||||
walk(c, w, static_cast<object>(mask(*static_cast<void**>(p))));
|
|
||||||
}
|
|
||||||
|
|
||||||
Context* c;
|
|
||||||
HeapWalker* w;
|
|
||||||
} v(&context, w);
|
|
||||||
|
|
||||||
add(&context, 0)->number = w->visitNew(0);
|
|
||||||
|
|
||||||
visitRoots(t->m, &v);
|
|
||||||
|
|
||||||
return context.objects;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace vm
|
} // namespace vm
|
||||||
|
@ -1,14 +1,29 @@
|
|||||||
#include "machine.h"
|
/* Copyright (c) 2008, Avian Contributors
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software
|
||||||
|
for any purpose with or without fee is hereby granted, provided
|
||||||
|
that the above copyright notice and this permission notice appear
|
||||||
|
in all copies.
|
||||||
|
|
||||||
|
There is NO WARRANTY for this software. See license.txt for
|
||||||
|
details. */
|
||||||
|
|
||||||
|
#ifndef HEAPWALK_H
|
||||||
|
#define HEAPWALK_H
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
namespace vm {
|
namespace vm {
|
||||||
|
|
||||||
|
class Thread;
|
||||||
|
|
||||||
class HeapMap {
|
class HeapMap {
|
||||||
public:
|
public:
|
||||||
virtual int find(object value) = 0;
|
virtual int find(object value) = 0;
|
||||||
virtual void dispose() = 0;
|
virtual void dispose() = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
class HeapWalker {
|
class HeapVisitor {
|
||||||
public:
|
public:
|
||||||
virtual void root() = 0;
|
virtual void root() = 0;
|
||||||
virtual unsigned visitNew(object value) = 0;
|
virtual unsigned visitNew(object value) = 0;
|
||||||
@ -17,7 +32,17 @@ class HeapWalker {
|
|||||||
virtual void pop() = 0;
|
virtual void pop() = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
HeapMap*
|
class HeapWalker {
|
||||||
walk(Thread* t, HeapWalker* w);
|
public:
|
||||||
|
virtual unsigned visitRoot(object root) = 0;
|
||||||
|
virtual void visitAllRoots() = 0;
|
||||||
|
virtual HeapMap* map() = 0;
|
||||||
|
virtual void dispose() = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
HeapWalker*
|
||||||
|
makeHeapWalker(Thread* t, HeapVisitor* v);
|
||||||
|
|
||||||
} // namespace vm
|
} // namespace vm
|
||||||
|
|
||||||
|
#endif//HEAPWALK_H
|
||||||
|
@ -14,6 +14,9 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "system.h"
|
#include "system.h"
|
||||||
#include "heap.h"
|
#include "heap.h"
|
||||||
|
#include "bootimage.h"
|
||||||
|
#include "heapwalk.h"
|
||||||
|
#include "zone.h"
|
||||||
|
|
||||||
namespace vm {
|
namespace vm {
|
||||||
|
|
||||||
@ -112,6 +115,17 @@ class Processor {
|
|||||||
virtual object
|
virtual object
|
||||||
getStackTrace(Thread* t, Thread* target) = 0;
|
getStackTrace(Thread* t, Thread* target) = 0;
|
||||||
|
|
||||||
|
virtual void
|
||||||
|
compileThunks(Thread* t, BootImage* image, uint8_t* code, unsigned* size,
|
||||||
|
unsigned capacity) = 0;
|
||||||
|
|
||||||
|
virtual void
|
||||||
|
compileMethod(Thread* t, Zone* zone, uint8_t* code, unsigned* offset,
|
||||||
|
unsigned capacity, object table, object method) = 0;
|
||||||
|
|
||||||
|
virtual void
|
||||||
|
visitRoots(BootImage* image, HeapWalker* w) = 0;
|
||||||
|
|
||||||
object
|
object
|
||||||
invoke(Thread* t, object method, object this_, ...)
|
invoke(Thread* t, object method, object this_, ...)
|
||||||
{
|
{
|
||||||
|
@ -178,8 +178,12 @@ class ImmediateTask: public Task {
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
virtual void run(Context* c) {
|
virtual void run(Context* c) {
|
||||||
|
if (promise->resolved()) {
|
||||||
intptr_t v = promise->value();
|
intptr_t v = promise->value();
|
||||||
memcpy(c->result + offset, &v, BytesPerWord);
|
memcpy(c->result + offset, &v, BytesPerWord);
|
||||||
|
} else if (not promise->offer(c->result + offset)) {
|
||||||
|
abort(c);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Promise* promise;
|
Promise* promise;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user