lots of bugfixes and refactoring

This commit is contained in:
Joel Dice 2008-12-01 19:38:00 -07:00
parent 4a1dd3a8f7
commit 25ade1484a
13 changed files with 814 additions and 599 deletions

View File

@ -14,8 +14,6 @@ import java.net.URL;
import java.net.MalformedURLException;
public class SystemClassLoader extends ClassLoader {
private Object map;
protected native Class findClass(String name) throws ClassNotFoundException;
protected native Class findLoadedClass(String name);

View File

@ -81,6 +81,8 @@ class Promise {
class Listener {
public:
virtual void* resolve(int64_t value) = 0;
Listener* next;
};
virtual int64_t value() = 0;
@ -118,7 +120,10 @@ class ListenPromise: public Promise {
}
virtual Listener* listen(unsigned sizeInBytes) {
return listener = static_cast<Listener*>(allocator->allocate(sizeInBytes));
Listener* l = static_cast<Listener*>(allocator->allocate(sizeInBytes));
l->next = listener;
listener = l;
return l;
}
System* s;

View File

@ -67,8 +67,16 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
}
for (; calls; calls = tripleThird(t, calls)) {
object method = tripleFirst(t, calls);
uintptr_t address;
if (methodFlags(t, method) & ACC_NATIVE) {
address = reinterpret_cast<uintptr_t>(code + image->nativeThunk);
} else {
address = methodCompiled(t, method);
}
static_cast<ListenPromise*>(pointerValue(t, tripleSecond(t, calls)))
->listener->resolve(methodCompiled(t, tripleFirst(t, calls)));
->listener->resolve(address);
}
image->codeSize = size;
@ -80,6 +88,7 @@ unsigned
objectSize(Thread* t, object o)
{
assert(t, not objectExtended(t, o));
return baseSize(t, o, objectClass(t, o));
}
@ -88,8 +97,11 @@ visitRoots(Thread* t, BootImage* image, HeapWalker* w, object constants)
{
Machine* m = t->m;
for (HashMapIterator it(t, m->classMap); it.hasMore();) {
w->visitRoot(tripleSecond(t, it.next()));
}
image->loader = w->visitRoot(m->loader);
image->stringMap = w->visitRoot(m->stringMap);
image->types = w->visitRoot(m->types);
m->processor->visitRoots(image, w);
@ -99,21 +111,6 @@ visitRoots(Thread* t, BootImage* image, HeapWalker* w, object constants)
}
}
void
visitReference(Thread* t, HeapWalker* w, uintptr_t* heap, uintptr_t* map,
object r)
{
int target = w->map()->find(jreferenceTarget(t, r));
assert(t, target > 0);
int reference = w->map()->find(r);
assert(t, reference > 0);
unsigned index = reference - 1 + (JreferenceTarget / BytesPerWord);
markBit(map, index);
heap[index] = target;
}
HeapWalker*
makeHeapImage(Thread* t, BootImage* image, uintptr_t* heap, uintptr_t* map,
unsigned capacity, object constants)
@ -121,29 +118,56 @@ makeHeapImage(Thread* t, BootImage* image, uintptr_t* heap, uintptr_t* map,
class Visitor: public HeapVisitor {
public:
Visitor(Thread* t, uintptr_t* heap, uintptr_t* map, unsigned capacity):
t(t), current(0), heap(heap), map(map), position(0), capacity(capacity)
t(t), currentObject(0), currentNumber(0), currentOffset(0), heap(heap),
map(map), position(0), capacity(capacity)
{ }
void visit(unsigned number) {
if (current) {
if (number) markBit(map, current - 1);
heap[current - 1] = number;
if (currentObject) {
unsigned offset = currentNumber - 1 + currentOffset;
unsigned mark = heap[offset] & (~PointerMask);
unsigned value = number | (mark << BootShift);
if (value) markBit(map, offset);
heap[offset] = value;
}
}
virtual void root() {
current = 0;
currentObject = 0;
}
virtual unsigned visitNew(object p) {
if (p) {
unsigned size = objectSize(t, p);
assert(t, position + size < capacity);
memcpy(heap + position, p, size * BytesPerWord);
unsigned number;
if (currentObject
and (currentOffset * BytesPerWord) == ClassStaticTable)
{
FixedAllocator allocator
(t, reinterpret_cast<uint8_t*>(heap + position),
(capacity - position) * BytesPerWord);
unsigned number = position + 1;
position += size;
unsigned totalInBytes;
uintptr_t* dst = static_cast<uintptr_t*>
(t->m->heap->allocateImmortalFixed
(&allocator, size, true, &totalInBytes));
memcpy(dst, p, size * BytesPerWord);
dst[0] |= FixedMark;
number = (dst - heap) + 1;
position += ceiling(totalInBytes, BytesPerWord);
} else {
assert(t, position + size < capacity);
memcpy(heap + position, p, size * BytesPerWord);
number = position + 1;
position += size;
}
visit(number);
@ -157,16 +181,20 @@ makeHeapImage(Thread* t, BootImage* image, uintptr_t* heap, uintptr_t* map,
visit(number);
}
virtual void push(object, unsigned number, unsigned offset) {
current = number + offset;
virtual void push(object object, unsigned number, unsigned offset) {
currentObject = object;
currentNumber = number;
currentOffset = offset;
}
virtual void pop() {
current = 0;
currentObject = 0;
}
Thread* t;
unsigned current;
object currentObject;
unsigned currentNumber;
unsigned currentOffset;
uintptr_t* heap;
uintptr_t* map;
unsigned position;
@ -176,14 +204,6 @@ makeHeapImage(Thread* t, BootImage* image, uintptr_t* heap, uintptr_t* map,
HeapWalker* w = makeHeapWalker(t, &visitor);
visitRoots(t, image, w, constants);
for (object r = t->m->weakReferences; r; r = jreferenceVmNext(t, r)) {
visitReference(t, w, heap, map, r);
}
for (object r = t->m->tenuredWeakReferences; r; r = jreferenceVmNext(t, r)) {
visitReference(t, w, heap, map, r);
}
image->heapSize = visitor.position * BytesPerWord;
return w;
@ -197,14 +217,18 @@ updateConstants(Thread* t, object constants, uint8_t* code, uintptr_t* codeMap,
unsigned target = heapTable->find(tripleFirst(t, constants));
assert(t, target > 0);
void* dst = static_cast<ListenPromise*>
(pointerValue(t, tripleSecond(t, constants)))->listener->resolve(target);
for (Promise::Listener* pl = static_cast<ListenPromise*>
(pointerValue(t, tripleSecond(t, constants)))->listener;
pl; pl = pl->next)
{
void* dst = pl->resolve(target);
assert(t, reinterpret_cast<intptr_t>(dst)
>= reinterpret_cast<intptr_t>(code));
assert(t, reinterpret_cast<intptr_t>(dst)
>= reinterpret_cast<intptr_t>(code));
markBit(codeMap, reinterpret_cast<intptr_t>(dst)
- reinterpret_cast<intptr_t>(code));
markBit(codeMap, reinterpret_cast<intptr_t>(dst)
- reinterpret_cast<intptr_t>(code));
}
}
}
@ -227,6 +251,7 @@ writeBootImage(Thread* t, FILE* out)
memset(codeMap, 0, codeMapSize(CodeCapacity));
object constants = makeCodeImage(t, &zone, &image, code, CodeCapacity);
PROTECT(t, constants);
const unsigned HeapCapacity = 32 * 1024 * 1024;
uintptr_t* heap = static_cast<uintptr_t*>
@ -235,7 +260,6 @@ writeBootImage(Thread* t, FILE* out)
(t->m->heap->allocate(heapMapSize(HeapCapacity)));
memset(heapMap, 0, heapMapSize(HeapCapacity));
PROTECT(t, constants);
collect(t, Heap::MajorCollection);
HeapWalker* heapWalker = makeHeapImage
@ -243,17 +267,57 @@ writeBootImage(Thread* t, FILE* out)
updateConstants(t, constants, code, codeMap, heapWalker->map());
image.classCount = hashMapSize(t, t->m->classMap);
unsigned* classTable = static_cast<unsigned*>
(t->m->heap->allocate(image.classCount * sizeof(unsigned)));
{ unsigned i = 0;
for (HashMapIterator it(t, t->m->classMap); it.hasMore();) {
classTable[i++] = heapWalker->map()->find(tripleSecond(t, it.next()));
}
}
image.stringCount = hashMapSize(t, t->m->stringMap);
unsigned* stringTable = static_cast<unsigned*>
(t->m->heap->allocate(image.stringCount * sizeof(unsigned)));
{ unsigned i = 0;
for (HashMapIterator it(t, t->m->stringMap); it.hasMore();) {
stringTable[i++] = heapWalker->map()->find
(jreferenceTarget(t, tripleFirst(t, it.next())));
}
}
unsigned* callTable = t->m->processor->makeCallTable
(t, &image, heapWalker, code);
heapWalker->dispose();
image.magic = BootImage::Magic;
image.codeBase = reinterpret_cast<uintptr_t>(code);
fprintf(stderr, "heap size %d code size %d\n",
image.heapSize, image.codeSize);
fprintf(stderr, "class count %d string count %d call count %d\n"
"heap size %d code size %d\n",
image.classCount, image.stringCount, image.callCount, image.heapSize,
image.codeSize);
if (true) {
fwrite(&image, sizeof(BootImage), 1, out);
fwrite(classTable, image.classCount * sizeof(unsigned), 1, out);
fwrite(stringTable, image.stringCount * sizeof(unsigned), 1, out);
fwrite(callTable, image.callCount * sizeof(unsigned) * 2, 1, out);
unsigned offset = (image.classCount * sizeof(unsigned))
+ (image.stringCount * sizeof(unsigned))
+ (image.callCount * sizeof(unsigned) * 2);
while (offset % BytesPerWord) {
uint8_t c = 0;
fwrite(&c, 1, 1, out);
++ offset;
}
fwrite(heapMap, pad(heapMapSize(image.heapSize)), 1, out);
fwrite(heap, pad(image.heapSize), 1, out);

View File

@ -15,6 +15,10 @@
namespace vm {
const unsigned BootMask = (~static_cast<unsigned>(0)) / BytesPerWord;
const unsigned BootShift = 32 - log(BytesPerWord);
class BootImage {
public:
static const unsigned Magic = 0x22377322;
@ -24,11 +28,12 @@ class BootImage {
unsigned heapSize;
unsigned codeSize;
unsigned loader;
unsigned stringMap;
unsigned types;
unsigned classCount;
unsigned stringCount;
unsigned callCount;
unsigned callTable;
unsigned loader;
unsigned types;
unsigned methodTree;
unsigned methodTreeSentinal;

View File

@ -1244,12 +1244,34 @@ tryInitClass(MyThread* t, object class_)
if (UNLIKELY(t->exception)) unwind(t);
}
object&
objectPools(MyThread* t);
uintptr_t
defaultThunk(MyThread* t);
uintptr_t
nativeThunk(MyThread* t);
uintptr_t
aioobThunk(MyThread* t);
uintptr_t
methodAddress(Thread* t, object method)
{
if (methodFlags(t, method) & ACC_NATIVE) {
return nativeThunk(static_cast<MyThread*>(t));
} else {
return methodCompiled(t, method);
}
}
void* FORCE_ALIGN
findInterfaceMethodFromInstance(MyThread* t, object method, object instance)
{
if (instance) {
return reinterpret_cast<void*>
(methodCompiled
(methodAddress
(t, findInterfaceMethod(t, method, objectClass(t, instance))));
} else {
t->exception = makeNullPointerException(t);
@ -1748,18 +1770,6 @@ emptyMethod(MyThread* t, object method)
and (codeBody(t, methodCode(t, method), 0) == return_);
}
object&
objectPools(MyThread* t);
uintptr_t
defaultThunk(MyThread* t);
uintptr_t
nativeThunk(MyThread* t);
uintptr_t
aioobThunk(MyThread* t);
void
compileDirectInvoke(MyThread* t, Frame* frame, object target)
{
@ -1782,10 +1792,13 @@ compileDirectInvoke(MyThread* t, Frame* frame, object target)
object pointer = makePointer(t, p);
bc->calls = makeTriple(t, target, pointer, bc->calls);
object traceTarget
= (methodFlags(t, target) & ACC_NATIVE) ? target : 0;
result = c->call
(c->promiseConstant(p),
0,
frame->trace(0, false),
frame->trace(traceTarget, false),
rSize,
0);
} else {
@ -1796,14 +1809,7 @@ compileDirectInvoke(MyThread* t, Frame* frame, object target)
rSize,
0);
}
} else if (methodFlags(t, target) & ACC_NATIVE) {
result = c->call
(c->constant(nativeThunk(t)),
0,
frame->trace(target, false),
rSize,
0);
} else if (methodCompiled(t, target) == defaultThunk(t)
} else if (methodAddress(t, target) == defaultThunk(t)
or classNeedsInit(t, methodClass(t, target)))
{
result = c->call
@ -1813,10 +1819,13 @@ compileDirectInvoke(MyThread* t, Frame* frame, object target)
rSize,
0);
} else {
object traceTarget
= (methodFlags(t, target) & ACC_NATIVE) ? target : 0;
result = c->call
(c->constant(methodCompiled(t, target)),
(c->constant(methodAddress(t, target)),
0,
frame->trace(0, false),
frame->trace(traceTarget, false),
rSize,
0);
}
@ -2460,7 +2469,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned ip,
if (instruction == getstatic) {
if (fieldClass(t, field) != methodClass(t, context->method)
and classNeedsInit(t, fieldClass(t, field)));
and classNeedsInit(t, fieldClass(t, field)))
{
c->call
(c->constant(getThunk(t, tryInitClassThunk)),
@ -4082,7 +4091,7 @@ compileMethod2(MyThread* t)
if (UNLIKELY(t->exception)) {
return 0;
} else {
void* address = reinterpret_cast<void*>(methodCompiled(t, target));
void* address = reinterpret_cast<void*>(methodAddress(t, target));
if (callNodeVirtualCall(t, node)) {
classVtable
(t, objectClass
@ -4090,7 +4099,7 @@ compileMethod2(MyThread* t)
= address;
} else {
updateCall
(t, LongCall, true, reinterpret_cast<void*>(callNodeAddress(t, node)),
(t, Call, true, reinterpret_cast<void*>(callNodeAddress(t, node)),
address);
}
return address;
@ -4119,7 +4128,7 @@ invokeNative2(MyThread* t, object method)
initClass(t, methodClass(t, method));
if (UNLIKELY(t->exception)) return 0;
if (methodCode(t, method) == 0) {
if (methodCompiled(t, method) == defaultThunk(t)) {
void* function = resolveNativeMethod(t, method);
if (UNLIKELY(function == 0)) {
object message = makeString
@ -4131,8 +4140,7 @@ invokeNative2(MyThread* t, object method)
return 0;
}
object p = makePointer(t, function);
set(t, method, MethodCode, p);
methodCompiled(t, method) = reinterpret_cast<uintptr_t>(function);
}
object class_ = methodClass(t, method);
@ -4198,7 +4206,7 @@ invokeNative2(MyThread* t, object method)
}
}
void* function = pointerValue(t, methodCode(t, method));
void* function = reinterpret_cast<void*>(methodCompiled(t, method));
unsigned returnCode = methodReturnCode(t, method);
unsigned returnType = fieldType(t, returnCode);
uint64_t result;
@ -4362,7 +4370,7 @@ visitStackAndLocals(MyThread* t, Heap::Visitor* v, void* base, object method,
object map = codePool(t, methodCode(t, method));
int index = frameMapIndex
(t, method, difference
(ip, reinterpret_cast<void*>(methodCompiled(t, method))));
(ip, reinterpret_cast<void*>(methodAddress(t, method))));
for (unsigned i = 0; i < count; ++i) {
int j = index + i;
@ -4596,8 +4604,8 @@ invoke(Thread* thread, object method, ArgumentList* arguments)
}
result = vmInvoke
(t, reinterpret_cast<void*>(methodCompiled(t, method)), arguments->array,
arguments->position, returnType);
(t, reinterpret_cast<void*>(methodAddress(t, method)),
arguments->array, arguments->position, returnType);
}
if (t->exception) {
@ -4679,60 +4687,8 @@ class SegFaultHandler: public System::SignalHandler {
Machine* m;
};
object
fixupCallTable(MyThread* t, object oldTable, uintptr_t oldBase,
uintptr_t newBase)
{
PROTECT(t, oldTable);
object newTable = makeArray(t, arrayLength(t, oldTable), true);
for (unsigned i = 0; i < arrayLength(t, oldTable); ++i) {
object next;
for (object p = arrayBody(t, oldTable, i); p; p = next) {
next = callNodeNext(t, p);
intptr_t k = (callNodeAddress(t, p) - oldBase) + newBase;
callNodeAddress(t, p) = k;
unsigned index = k & (arrayLength(t, newTable) - 1);
set(t, p, CallNodeNext, arrayBody(t, newTable, index));
set(t, newTable, ArrayBody + (index * BytesPerWord), p);
}
}
return newTable;
}
class FixedAllocator: public Allocator {
public:
FixedAllocator(Thread* t, uint8_t* base, unsigned capacity):
t(t), base(base), offset(0), capacity(capacity)
{ }
virtual void* tryAllocate(unsigned) {
abort(t);
}
virtual void* allocate(unsigned size) {
unsigned paddedSize = pad(size);
expect(t, offset + paddedSize < capacity);
void* p = base + offset;
offset += paddedSize;
return p;
}
virtual void free(const void*, unsigned) {
abort(t);
}
Thread* t;
uint8_t* base;
unsigned offset;
unsigned capacity;
};
void
boot(MyThread* t, BootImage* image);
class MyProcessor;
@ -4777,6 +4733,7 @@ class MyProcessor: public Processor {
methodTree(0),
methodTreeSentinal(0),
objectPools(0),
staticTableArray(0),
codeAllocator(s),
codeZone(s, &codeAllocator, 64 * 1024)
{ }
@ -4870,6 +4827,7 @@ class MyProcessor: public Processor {
v->visit(&methodTree);
v->visit(&methodTreeSentinal);
v->visit(&objectPools);
v->visit(&staticTableArray);
}
for (MyThread::CallTrace* trace = t->trace; trace; trace = trace->next) {
@ -5034,7 +4992,7 @@ class MyProcessor: public Processor {
virtual object getStackTrace(Thread* vmt, Thread* vmTarget) {
MyThread* t = static_cast<MyThread*>(vmt);
MyThread* target = static_cast<MyThread*>(vmTarget);
MyProcessor* p = processor(t);
MyProcessor* p = this;
class Visitor: public System::ThreadVisitor {
public:
@ -5098,7 +5056,7 @@ class MyProcessor: public Processor {
MyThread* t = static_cast<MyThread*>(vmt);
FixedAllocator allocator(t, code + *offset, capacity);
::compileThunks(t, &allocator, processor(t), image, code);
::compileThunks(t, &allocator, this, image, code);
*offset += allocator.offset;
}
@ -5119,47 +5077,47 @@ class MyProcessor: public Processor {
}
virtual void visitRoots(BootImage* image, HeapWalker* w) {
image->callTable = w->visitRoot(callTable);
image->methodTree = w->visitRoot(methodTree);
image->methodTreeSentinal = w->visitRoot(methodTreeSentinal);
}
virtual void boot(Thread* vmt, BootImage* image, uintptr_t* heap,
uint8_t* code)
virtual unsigned* makeCallTable(Thread* t, BootImage* image, HeapWalker* w,
uint8_t* code)
{
MyThread* t = static_cast<MyThread*>(vmt);
methodTree = bootObject(heap, image->methodTree);
methodTreeSentinal = bootObject(heap, image->methodTreeSentinal);
image->callCount = callTableSize;
callTable = fixupCallTable
(t, bootObject(heap, image->callTable), image->codeBase,
reinterpret_cast<uintptr_t>(code));
unsigned* table = static_cast<unsigned*>
(t->m->heap->allocate(callTableSize * sizeof(unsigned) * 2));
defaultThunk = code + image->defaultThunk;
unsigned index = 0;
for (unsigned i = 0; i < arrayLength(t, callTable); ++i) {
for (object p = arrayBody(t, callTable, i); p; p = callNodeNext(t, p)) {
table[index++] = callNodeAddress(t, p)
- reinterpret_cast<uintptr_t>(code);
table[index++] = w->map()->find(callNodeTarget(t, p))
| (static_cast<unsigned>(callNodeVirtualCall(t, p)) << BootShift);
}
}
updateCall(t, LongCall, false, code + image->compileMethodCall,
voidPointer(::compileMethod));
return table;
}
nativeThunk = code + image->nativeThunk;
virtual void boot(Thread* t, BootImage* image) {
if (image) {
::boot(static_cast<MyThread*>(t), image);
} else {
callTable = makeArray(t, 128, true);
updateCall(t, LongCall, false, code + image->invokeNativeCall,
voidPointer(invokeNative));
methodTree = methodTreeSentinal = makeTreeNode(t, 0, 0, 0);
set(t, methodTree, TreeNodeLeft, methodTreeSentinal);
set(t, methodTree, TreeNodeRight, methodTreeSentinal);
aioobThunk = code + image->aioobThunk;
::compileThunks(static_cast<MyThread*>(t), &codeZone, this, 0, 0);
}
updateCall(t, LongCall, false,
code + image->throwArrayIndexOutOfBoundsCall,
voidPointer(throwArrayIndexOutOfBounds));
thunkTable = code + image->thunkTable;
thunkSize = image->thunkSize;
#define THUNK(s) \
updateCall(t, LongJump, false, code + image->s##Call, voidPointer(s));
#include "thunks.cpp"
#undef THUNK
segFaultHandler.m = t->m;
expect(t, t->m->system->success
(t->m->system->handleSegFault(&segFaultHandler)));
}
System* s;
@ -5174,11 +5132,344 @@ class MyProcessor: public Processor {
object methodTree;
object methodTreeSentinal;
object objectPools;
object staticTableArray;
SegFaultHandler segFaultHandler;
CodeAllocator codeAllocator;
Zone codeZone;
};
object
findCallNode(MyThread* t, void* address)
{
if (DebugCallTable) {
fprintf(stderr, "find call node %p\n", address);
}
MyProcessor* p = processor(t);
object table = p->callTable;
intptr_t key = reinterpret_cast<intptr_t>(address);
unsigned index = static_cast<uintptr_t>(key)
& (arrayLength(t, table) - 1);
for (object n = arrayBody(t, table, index);
n; n = callNodeNext(t, n))
{
intptr_t k = callNodeAddress(t, n);
if (k == key) {
return n;
}
}
return 0;
}
object
resizeTable(MyThread* t, object oldTable, unsigned newLength)
{
PROTECT(t, oldTable);
object oldNode = 0;
PROTECT(t, oldNode);
object newTable = makeArray(t, newLength, true);
PROTECT(t, newTable);
for (unsigned i = 0; i < arrayLength(t, oldTable); ++i) {
for (oldNode = arrayBody(t, oldTable, i);
oldNode;
oldNode = callNodeNext(t, oldNode))
{
intptr_t k = callNodeAddress(t, oldNode);
unsigned index = k & (newLength - 1);
object newNode = makeCallNode
(t, callNodeAddress(t, oldNode),
callNodeTarget(t, oldNode),
callNodeVirtualCall(t, oldNode),
arrayBody(t, newTable, index));
set(t, newTable, ArrayBody + (index * BytesPerWord), newNode);
}
}
return newTable;
}
object
insertCallNode(MyThread* t, object table, unsigned* size, object node)
{
if (DebugCallTable) {
fprintf(stderr, "insert call node %p\n",
reinterpret_cast<void*>(callNodeAddress(t, node)));
}
PROTECT(t, table);
PROTECT(t, node);
++ (*size);
if (*size >= arrayLength(t, table) * 2) {
table = resizeTable(t, table, arrayLength(t, table) * 2);
}
intptr_t key = callNodeAddress(t, node);
unsigned index = static_cast<uintptr_t>(key) & (arrayLength(t, table) - 1);
set(t, node, CallNodeNext, arrayBody(t, table, index));
set(t, table, ArrayBody + (index * BytesPerWord), node);
return table;
}
void
insertCallNode(MyThread* t, object node)
{
MyProcessor* p = processor(t);
p->callTable = insertCallNode(t, p->callTable, &(p->callTableSize), node);
}
object
makeClassMap(Thread* t, unsigned* table, unsigned count, uintptr_t* heap)
{
object array = makeArray(t, nextPowerOfTwo(count), true);
object map = makeHashMap(t, 0, array);
PROTECT(t, map);
for (unsigned i = 0; i < count; ++i) {
object c = bootObject(heap, table[i]);
hashMapInsert(t, map, className(t, c), c, byteArrayHash);
}
return map;
}
object
makeStaticTableArray(Thread* t, unsigned* table, unsigned count,
uintptr_t* heap)
{
object array = makeArray(t, count, false);
for (unsigned i = 0; i < count; ++i) {
set(t, array, ArrayBody + (i * BytesPerWord),
classStaticTable(t, bootObject(heap, table[i])));
}
return array;
}
object
makeStringMap(Thread* t, unsigned* table, unsigned count, uintptr_t* heap)
{
object array = makeArray(t, nextPowerOfTwo(count), true);
object map = makeWeakHashMap(t, 0, array);
PROTECT(t, map);
for (unsigned i = 0; i < count; ++i) {
object s = bootObject(heap, table[i]);
hashMapInsert(t, map, s, 0, stringHash);
}
return map;
}
object
makeCallTable(MyThread* t, uintptr_t* heap, unsigned* calls, unsigned count,
uintptr_t base)
{
object table = makeArray(t, nextPowerOfTwo(count), true);
PROTECT(t, table);
unsigned size = 0;
for (unsigned i = 0; i < count; ++i) {
unsigned address = calls[i * 2];
unsigned target = calls[(i * 2) + 1];
object node = makeCallNode
(t, base + address, bootObject(heap, target & BootMask),
target >> BootShift, 0);
table = insertCallNode(t, table, &size, node);
}
return table;
}
void
fixupHeap(MyThread* t, uintptr_t* map, unsigned size, uintptr_t* heap)
{
for (unsigned word = 0; word < size; ++word) {
uintptr_t w = map[word];
if (w) {
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
if (w & (static_cast<uintptr_t>(1) << bit)) {
unsigned index = indexOf(word, bit);
uintptr_t* p = heap + index;
assert(t, *p);
uintptr_t number = *p & BootMask;
uintptr_t mark = *p >> BootShift;
if (number) {
*p = reinterpret_cast<uintptr_t>(heap + (number - 1)) | mark;
} else {
*p = mark;
}
}
}
}
}
}
void
fixupCode(Thread*, uintptr_t* map, unsigned size, uint8_t* code,
uintptr_t* heap)
{
for (unsigned word = 0; word < size; ++word) {
uintptr_t w = map[word];
if (w) {
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
if (w & (static_cast<uintptr_t>(1) << bit)) {
unsigned index = indexOf(word, bit);
uintptr_t v; memcpy(&v, code + index, BytesPerWord);
v = reinterpret_cast<uintptr_t>(heap + v - 1);
memcpy(code + index, &v, BytesPerWord);
}
}
}
}
}
void
fixupMethods(Thread* t, BootImage* image, uint8_t* code)
{
for (HashMapIterator it(t, t->m->classMap); it.hasMore();) {
object c = tripleSecond(t, it.next());
if (classMethodTable(t, c)) {
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
object method = arrayBody(t, classMethodTable(t, c), i);
if (methodCode(t, method) or (methodFlags(t, method) & ACC_NATIVE)) {
assert(t, (methodCompiled(t, method) - image->codeBase)
<= image->codeSize);
methodCompiled(t, method)
= (methodCompiled(t, method) - image->codeBase)
+ reinterpret_cast<uintptr_t>(code);
if (DebugCompile and (methodFlags(t, method) & ACC_NATIVE) == 0) {
logCompile
(static_cast<MyThread*>(t),
reinterpret_cast<uint8_t*>(methodCompiled(t, method)),
reinterpret_cast<uintptr_t*>
(methodCompiled(t, method))[-1],
reinterpret_cast<char*>
(&byteArrayBody(t, className(t, methodClass(t, method)), 0)),
reinterpret_cast<char*>
(&byteArrayBody(t, methodName(t, method), 0)),
reinterpret_cast<char*>
(&byteArrayBody(t, methodSpec(t, method), 0)));
}
}
}
}
t->m->processor->initVtable(t, c);
}
}
void
fixupThunks(MyThread* t, BootImage* image, uint8_t* code)
{
MyProcessor* p = processor(t);
p->defaultThunk = code + image->defaultThunk;
updateCall(t, LongCall, false, code + image->compileMethodCall,
voidPointer(::compileMethod));
p->nativeThunk = code + image->nativeThunk;
updateCall(t, LongCall, false, code + image->invokeNativeCall,
voidPointer(invokeNative));
p->aioobThunk = code + image->aioobThunk;
updateCall(t, LongCall, false,
code + image->throwArrayIndexOutOfBoundsCall,
voidPointer(throwArrayIndexOutOfBounds));
p->thunkTable = code + image->thunkTable;
p->thunkSize = image->thunkSize;
#define THUNK(s) \
updateCall(t, LongJump, false, code + image->s##Call, voidPointer(s));
#include "thunks.cpp"
#undef THUNK
}
void
boot(MyThread* t, BootImage* image)
{
assert(t, image->magic == BootImage::Magic);
unsigned* classTable = reinterpret_cast<unsigned*>(image + 1);
unsigned* stringTable = classTable + image->classCount;
unsigned* callTable = stringTable + image->stringCount;
uintptr_t* heapMap = reinterpret_cast<uintptr_t*>
(pad(reinterpret_cast<uintptr_t>(callTable + (image->callCount * 2))));
unsigned heapMapSizeInWords = ceiling
(heapMapSize(image->heapSize), BytesPerWord);
uintptr_t* heap = heapMap + heapMapSizeInWords;
// fprintf(stderr, "heap from %p to %p\n",
// heap, heap + ceiling(image->heapSize, BytesPerWord));
uintptr_t* codeMap = heap + ceiling(image->heapSize, BytesPerWord);
unsigned codeMapSizeInWords = ceiling
(codeMapSize(image->codeSize), BytesPerWord);
uint8_t* code = reinterpret_cast<uint8_t*>(codeMap + codeMapSizeInWords);
fprintf(stderr, "code from %p to %p\n",
code, code + image->codeSize);
fixupHeap(t, heapMap, heapMapSizeInWords, heap);
t->m->heap->setImmortalHeap(heap, image->heapSize / BytesPerWord);
t->m->loader = bootObject(heap, image->loader);
t->m->types = bootObject(heap, image->types);
MyProcessor* p = static_cast<MyProcessor*>(t->m->processor);
p->methodTree = bootObject(heap, image->methodTree);
p->methodTreeSentinal = bootObject(heap, image->methodTreeSentinal);
fixupCode(t, codeMap, codeMapSizeInWords, code, heap);
t->m->classMap = makeClassMap(t, classTable, image->classCount, heap);
t->m->stringMap = makeStringMap(t, stringTable, image->stringCount, heap);
p->callTableSize = image->callCount;
p->callTable = makeCallTable
(t, heap, callTable, image->callCount,
reinterpret_cast<uintptr_t>(code));
p->staticTableArray = makeStaticTableArray
(t, classTable, image->classCount, heap);
fixupThunks(t, image, code);
fixupMethods(t, image, code);
t->m->bootstrapClassMap = makeHashMap(t, 0, 0);
}
intptr_t
getThunk(MyThread* t, Thunk thunk)
{
@ -5271,8 +5562,7 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
{ uint8_t* call = static_cast<uint8_t*>
(defaultContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(compileMethod))))
+ BytesPerWord;
(reinterpret_cast<intptr_t>(voidPointer(compileMethod))));
if (image) {
image->defaultThunk = p->defaultThunk - imageBase;
@ -5285,8 +5575,7 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
{ uint8_t* call = static_cast<uint8_t*>
(nativeContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(invokeNative))))
+ BytesPerWord;
(reinterpret_cast<intptr_t>(voidPointer(invokeNative))));
if (image) {
image->nativeThunk = p->nativeThunk - imageBase;
@ -5299,8 +5588,7 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
{ uint8_t* call = static_cast<uint8_t*>
(aioobContext.promise.listener->resolve
(reinterpret_cast<intptr_t>(voidPointer(throwArrayIndexOutOfBounds))))
+ BytesPerWord;
(reinterpret_cast<intptr_t>(voidPointer(throwArrayIndexOutOfBounds))));
if (image) {
image->aioobThunk = p->aioobThunk - imageBase;
@ -5325,7 +5613,7 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
start += p->thunkSize; \
{ uint8_t* call = static_cast<uint8_t*> \
(tableContext.promise.listener->resolve \
(reinterpret_cast<intptr_t>(voidPointer(s)))) + BytesPerWord; \
(reinterpret_cast<intptr_t>(voidPointer(s)))); \
if (image) { \
image->s##Call = call - imageBase; \
} \
@ -5339,25 +5627,7 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p,
MyProcessor*
processor(MyThread* t)
{
MyProcessor* p = static_cast<MyProcessor*>(t->m->processor);
if (p->callTable == 0) {
ACQUIRE(t, t->m->classLock);
if (p->callTable == 0) {
p->callTable = makeArray(t, 128, true);
p->methodTree = p->methodTreeSentinal = makeTreeNode(t, 0, 0, 0);
set(t, p->methodTree, TreeNodeLeft, p->methodTreeSentinal);
set(t, p->methodTree, TreeNodeRight, p->methodTreeSentinal);
compileThunks(t, codeZone(t), p, 0, 0);
p->segFaultHandler.m = t->m;
expect(t, t->m->system->success
(t->m->system->handleSegFault(&(p->segFaultHandler))));
}
}
return p;
return static_cast<MyProcessor*>(t->m->processor);
}
object&
@ -5395,164 +5665,62 @@ compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
if (UNLIKELY(t->exception)) return;
}
MyProcessor* p = processor(t);
if (methodCompiled(t, method) == defaultThunk(t)) {
if (methodAddress(t, method) == defaultThunk(t)) {
ACQUIRE(t, t->m->classLock);
if (methodCompiled(t, method) == defaultThunk(t)) {
if (methodCompiled(t, method) == defaultThunk(t)) {
object node;
uint8_t* compiled;
if (methodFlags(t, method) & ACC_NATIVE) {
node = 0;
compiled = p->nativeThunk;
} else {
Context context(t, bootContext, method);
compiled = compile(t, allocator, &context);
if (UNLIKELY(t->exception)) return;
if (methodAddress(t, method) == defaultThunk(t)) {
assert(t, (methodFlags(t, method) & ACC_NATIVE) == 0);
if (DebugMethodTree) {
fprintf(stderr, "insert method at %p\n", compiled);
}
Context context(t, bootContext, method);
uint8_t* compiled = compile(t, allocator, &context);
if (UNLIKELY(t->exception)) return;
// We can't set the MethodCompiled field on the original
// method before it is placed into the method tree, since
// another thread might call the method, from which stack
// unwinding would fail (since there is not yet an entry in
// the method tree). However, we can't insert the original
// method into the tree before setting the MethodCompiled
// field on it since we rely on that field to determine its
// position in the tree. Therefore, we insert a clone in
// its place. Later, we'll replace the clone with the
// original to save memory.
object clone = makeMethod
(t, methodVmFlags(t, method),
methodReturnCode(t, method),
methodParameterCount(t, method),
methodParameterFootprint(t, method),
methodFlags(t, method),
methodOffset(t, method),
methodNativeID(t, method),
methodName(t, method),
methodSpec(t, method),
methodClass(t, method),
methodCode(t, method),
reinterpret_cast<intptr_t>(compiled));
node = makeTreeNode
(t, clone, methodTreeSentinal(t), methodTreeSentinal(t));
PROTECT(t, node);
methodTree(t) = treeInsertNode
(t, &(context.zone), methodTree(t),
reinterpret_cast<intptr_t>(compiled), node, methodTreeSentinal(t),
compareIpToMethodBounds);
}
methodCompiled(t, method) = reinterpret_cast<intptr_t>(compiled);
if (methodVirtual(t, method)) {
classVtable(t, methodClass(t, method), methodOffset(t, method))
= compiled;
}
if (node) {
set(t, node, TreeNodeValue, method);
}
if (DebugMethodTree) {
fprintf(stderr, "insert method at %p\n", compiled);
}
// We can't set the MethodCompiled field on the original method
// before it is placed into the method tree, since another
// thread might call the method, from which stack unwinding
// would fail (since there is not yet an entry in the method
// tree). However, we can't insert the original method into the
// tree before setting the MethodCompiled field on it since we
// rely on that field to determine its position in the tree.
// Therefore, we insert a clone in its place. Later, we'll
// replace the clone with the original to save memory.
object clone = makeMethod
(t, methodVmFlags(t, method),
methodReturnCode(t, method),
methodParameterCount(t, method),
methodParameterFootprint(t, method),
methodFlags(t, method),
methodOffset(t, method),
methodNativeID(t, method),
methodName(t, method),
methodSpec(t, method),
methodClass(t, method),
methodCode(t, method),
reinterpret_cast<intptr_t>(compiled));
methodTree(t) = treeInsert
(t, &(context.zone), methodTree(t),
reinterpret_cast<intptr_t>(compiled), clone, methodTreeSentinal(t),
compareIpToMethodBounds);
methodCompiled(t, method) = reinterpret_cast<intptr_t>(compiled);
if (methodVirtual(t, method)) {
classVtable(t, methodClass(t, method), methodOffset(t, method))
= compiled;
}
treeUpdate(t, methodTree(t), reinterpret_cast<intptr_t>(compiled),
method, methodTreeSentinal(t), compareIpToMethodBounds);
}
}
}
object
findCallNode(MyThread* t, void* address)
{
if (DebugCallTable) {
fprintf(stderr, "find call node %p\n", address);
}
MyProcessor* p = processor(t);
object table = p->callTable;
intptr_t key = reinterpret_cast<intptr_t>(address);
unsigned index = static_cast<uintptr_t>(key)
& (arrayLength(t, table) - 1);
for (object n = arrayBody(t, table, index);
n; n = callNodeNext(t, n))
{
intptr_t k = callNodeAddress(t, n);
if (k == key) {
return n;
}
}
return 0;
}
object
resizeTable(MyThread* t, object oldTable, unsigned newLength)
{
PROTECT(t, oldTable);
object oldNode = 0;
PROTECT(t, oldNode);
object newTable = makeArray(t, newLength, true);
PROTECT(t, newTable);
for (unsigned i = 0; i < arrayLength(t, oldTable); ++i) {
for (oldNode = arrayBody(t, oldTable, i);
oldNode;
oldNode = callNodeNext(t, oldNode))
{
intptr_t k = callNodeAddress(t, oldNode);
unsigned index = k & (newLength - 1);
object newNode = makeCallNode
(t, callNodeAddress(t, oldNode),
callNodeTarget(t, oldNode),
callNodeVirtualCall(t, oldNode),
arrayBody(t, newTable, index));
set(t, newTable, ArrayBody + (index * BytesPerWord), newNode);
}
}
return newTable;
}
void
insertCallNode(MyThread* t, object node)
{
if (DebugCallTable) {
fprintf(stderr, "insert call node %p\n",
reinterpret_cast<void*>(callNodeAddress(t, node)));
}
MyProcessor* p = processor(t);
PROTECT(t, node);
++ p->callTableSize;
if (p->callTableSize >= arrayLength(t, p->callTable) * 2) {
p->callTable = resizeTable
(t, p->callTable, arrayLength(t, p->callTable) * 2);
}
intptr_t key = callNodeAddress(t, node);
unsigned index = static_cast<uintptr_t>(key)
& (arrayLength(t, p->callTable) - 1);
set(t, node, CallNodeNext, arrayBody(t, p->callTable, index));
set(t, p->callTable, ArrayBody + (index * BytesPerWord), node);
}
object&
methodTree(MyThread* t)
{

View File

@ -528,12 +528,8 @@ class Context {
lowMemoryThreshold(limit / 2),
lock(0),
immortalPointerMap(&immortalHeap, 1, 1, 0, true),
immortalPageMap(&immortalHeap, 1, LikelyPageSizeInBytes / BytesPerWord,
&immortalPointerMap, true),
immortalHeapMap(&immortalHeap, 1, immortalPageMap.scale * 1024,
&immortalPageMap, true),
immortalHeap(this, &immortalHeapMap, 0, 0),
immortalHeapStart(0),
immortalHeapEnd(0),
ageMap(&gen1, max(1, log(TenureThreshold)), 1, 0, false),
gen1(this, &ageMap, 0, 0),
@ -586,15 +582,6 @@ class Context {
nextGen1.dispose();
gen2.dispose();
nextGen2.dispose();
if (immortalHeapMap.data) {
free(this, immortalHeapMap.data, immortalHeapMap.size() * BytesPerWord);
}
if (immortalPageMap.data) {
free(this, immortalPageMap.data, immortalPageMap.size() * BytesPerWord);
}
lock->dispose();
}
@ -613,10 +600,8 @@ class Context {
System::Mutex* lock;
Segment::Map immortalPointerMap;
Segment::Map immortalPageMap;
Segment::Map immortalHeapMap;
Segment immortalHeap;
uintptr_t* immortalHeapStart;
uintptr_t* immortalHeapEnd;
Segment::Map ageMap;
Segment gen1;
@ -821,6 +806,7 @@ free(Context* c, Fixie** fixies)
{
for (Fixie** p = fixies; *p;) {
Fixie* f = *p;
if (f->immortal()) {
p = &(f->next);
} else {
@ -959,6 +945,12 @@ copy(Context* c, void* o)
return r;
}
bool
immortalHeapContains(Context* c, void* p)
{
return p < c->immortalHeapEnd and p >= c->immortalHeapStart;
}
void*
update3(Context* c, void* o, bool* needsVisit)
{
@ -976,6 +968,9 @@ update3(Context* c, void* o, bool* needsVisit)
}
*needsVisit = false;
return o;
} else if (immortalHeapContains(c, o)) {
*needsVisit = false;
return o;
} else if (wasCollected(c, o)) {
*needsVisit = false;
return follow(c, o);
@ -988,9 +983,7 @@ update3(Context* c, void* o, bool* needsVisit)
void*
update2(Context* c, void* o, bool* needsVisit)
{
if (c->immortalHeap.contains(o)
or (c->mode == Heap::MinorCollection and c->gen2.contains(o)))
{
if (c->mode == Heap::MinorCollection and c->gen2.contains(o)) {
*needsVisit = false;
return o;
}
@ -998,17 +991,6 @@ update2(Context* c, void* o, bool* needsVisit)
return update3(c, o, needsVisit);
}
void*
update(Context* c, void** p, bool* needsVisit)
{
if (mask(*p) == 0) {
*needsVisit = false;
return 0;
}
return update2(c, mask(*p), needsVisit);
}
void
markDirty(Context* c, Fixie* f)
{
@ -1023,7 +1005,11 @@ markClean(Context* c, Fixie* f)
{
if (f->dirty) {
f->dirty = false;
f->move(c, &(c->tenuredFixies));
if (f->immortal()) {
f->remove(c);
} else {
f->move(c, &(c->tenuredFixies));
}
}
}
@ -1041,9 +1027,10 @@ updateHeapMap(Context* c, void* p, void* target, unsigned offset, void* result)
map = &(c->nextHeapMap);
}
if (not (c->client->isFixed(result)
and fixie(result)->age >= FixieTenureThreshold)
and not seg->contains(result))
if (not (immortalHeapContains(c, result)
or (c->client->isFixed(result)
and fixie(result)->age >= FixieTenureThreshold)
or seg->contains(result)))
{
if (target and c->client->isFixed(target)) {
Fixie* f = fixie(target);
@ -1051,8 +1038,8 @@ updateHeapMap(Context* c, void* p, void* target, unsigned offset, void* result)
if (static_cast<unsigned>(f->age + 1) >= FixieTenureThreshold) {
if (DebugFixies) {
fprintf(stderr, "dirty fixie %p at %d (%p)\n",
f, offset, f->body() + offset);
fprintf(stderr, "dirty fixie %p at %d (%p): %p\n",
f, offset, f->body() + offset, result);
}
f->dirty = true;
@ -1711,34 +1698,9 @@ class MyHeap: public Heap {
c.client = client;
}
virtual void setImmortalHeap(uintptr_t* start, unsigned sizeInWords,
uintptr_t* map)
{
new (&(c.immortalPointerMap)) Segment::Map
(&(c.immortalHeap), map, 1, 1, 0, false);
unsigned pageMapScale = LikelyPageSizeInBytes / BytesPerWord;
unsigned pageMapSize = Segment::Map::calculateSize
(&c, sizeInWords, pageMapScale, 1);
uintptr_t* pageMap = static_cast<uintptr_t*>
(allocate(pageMapSize * BytesPerWord));
new (&(c.immortalPageMap)) Segment::Map
(&(c.immortalHeap), pageMap, 1, pageMapScale, &(c.immortalPointerMap),
true);
unsigned heapMapScale = pageMapScale * 1024;
unsigned heapMapSize = Segment::Map::calculateSize
(&c, sizeInWords, heapMapScale, 1);
uintptr_t* heapMap = static_cast<uintptr_t*>
(allocate(heapMapSize * BytesPerWord));
new (&(c.immortalHeapMap)) Segment::Map
(&(c.immortalHeap), heapMap, 1, heapMapScale, &(c.immortalPageMap),
true);
new (&(c.immortalHeap)) Segment
(&c, &(c.immortalHeapMap), start, sizeInWords, sizeInWords);
virtual void setImmortalHeap(uintptr_t* start, unsigned sizeInWords) {
c.immortalHeapStart = start;
c.immortalHeapEnd = start + sizeInWords;
}
virtual void* tryAllocate(unsigned size) {
@ -1776,10 +1738,12 @@ class MyHeap: public Heap {
{
*totalInBytes = Fixie::totalSize(sizeInWords, objectMask);
return (new (allocator->allocate(*totalInBytes))
Fixie(sizeInWords, objectMask, &(c.tenuredFixies), true))->body();
Fixie(sizeInWords, objectMask, 0, true))->body();
}
virtual bool needsMark(void* p) {
assert(&c, c.client->isFixed(p) or (not immortalHeapContains(&c, p)));
if (c.client->isFixed(p)) {
return fixie(p)->age >= FixieTenureThreshold;
} else {
@ -1809,8 +1773,8 @@ class MyHeap: public Heap {
void** target = static_cast<void**>(p) + offset + i;
if (targetNeedsMark(mask(*target))) {
if (DebugFixies) {
fprintf(stderr, "dirty fixie %p at %d (%p)\n",
f, offset, f->body() + offset);
fprintf(stderr, "dirty fixie %p at %d (%p): %p\n",
f, offset, f->body() + offset, mask(*target));
}
dirty = true;

View File

@ -52,8 +52,7 @@ class Heap: public Allocator {
};
virtual void setClient(Client* client) = 0;
virtual void setImmortalHeap(uintptr_t* start, unsigned sizeInWords,
uintptr_t* map) = 0;
virtual void setImmortalHeap(uintptr_t* start, unsigned sizeInWords) = 0;
virtual void collect(CollectionType type, unsigned footprint) = 0;
virtual void* allocateFixed(Allocator* allocator, unsigned sizeInWords,
bool objectMask, unsigned* totalInBytes) = 0;

View File

@ -1502,93 +1502,6 @@ bootJavaClass(Thread* t, Machine::Type type, int superType, const char* name,
hashMapInsert(t, t->m->bootstrapClassMap, n, class_, byteArrayHash);
}
void
boot(Thread* t, BootImage* image)
{
assert(t, image->magic == BootImage::Magic);
uintptr_t* heapMap = reinterpret_cast<uintptr_t*>(image + 1);
unsigned heapMapSizeInWords = ceiling
(heapMapSize(image->heapSize), BytesPerWord);
uintptr_t* heap = heapMap + heapMapSizeInWords;
for (unsigned word = 0; word < heapMapSizeInWords; ++word) {
uintptr_t w = heapMap[word];
if (w) {
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
if (w & (static_cast<uintptr_t>(1) << bit)) {
uintptr_t* p = heap + indexOf(word, bit);
*p = reinterpret_cast<uintptr_t>(heap + (*p - 1));
}
}
heapMap[word] = 0;
}
}
t->m->heap->setImmortalHeap(heap, image->heapSize, heapMap);
t->m->loader = bootObject(heap, image->loader);
t->m->stringMap = bootObject(heap, image->stringMap);
t->m->types = bootObject(heap, image->types);
uintptr_t* codeMap = heap + ceiling(image->heapSize, BytesPerWord);
unsigned codeMapSizeInWords = ceiling
(codeMapSize(image->codeSize), BytesPerWord);
uint8_t* code = reinterpret_cast<uint8_t*>(codeMap + codeMapSizeInWords);
for (unsigned word = 0; word < codeMapSizeInWords; ++word) {
uintptr_t w = codeMap[word];
if (w) {
for (unsigned bit = 0; bit < BitsPerWord; ++bit) {
if (w & (static_cast<uintptr_t>(1) << bit)) {
unsigned index = indexOf(word, bit);
uintptr_t v; memcpy(&v, code + index, BytesPerWord);
v = reinterpret_cast<uintptr_t>(heap + v - 1);
memcpy(code + index, &v, BytesPerWord);
}
}
}
}
t->m->processor->boot(t, image, heap, code);
for (HashMapIterator it(t, systemClassLoaderMap(t, t->m->loader));
it.hasMore();)
{
object c = tripleSecond(t, it.next());
if (classMethodTable(t, c)) {
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
object method = arrayBody(t, classMethodTable(t, c), i);
if (methodCode(t, method) or (methodFlags(t, method) & ACC_NATIVE)) {
assert(t, (methodCompiled(t, method) - image->codeBase)
<= image->codeSize);
methodCompiled(t, method)
= (methodCompiled(t, method) - image->codeBase)
+ reinterpret_cast<uintptr_t>(code);
if (false and (methodFlags(t, method) & ACC_NATIVE) == 0) {
fprintf(stderr, "%p %p %s.%s%s\n",
reinterpret_cast<uint8_t*>(methodCompiled(t, method)),
reinterpret_cast<uint8_t*>(methodCompiled(t, method)) +
reinterpret_cast<uintptr_t*>
(methodCompiled(t, method))[-1],
&byteArrayBody
(t, className(t, methodClass(t, method)), 0),
&byteArrayBody(t, methodName(t, method), 0),
&byteArrayBody(t, methodSpec(t, method), 0));
}
}
}
}
t->m->processor->initVtable(t, c);
}
t->m->bootstrapClassMap = makeHashMap(t, 0, 0);
}
void
boot(Thread* t)
{
@ -1655,9 +1568,7 @@ boot(Thread* t)
m->bootstrapClassMap = makeHashMap(t, 0, 0);
{ object loaderMap = makeHashMap(t, 0, 0);
set(t, m->loader, SystemClassLoaderMap, loaderMap);
}
m->classMap = makeHashMap(t, 0, 0);
m->stringMap = makeWeakHashMap(t, 0, 0);
@ -1785,6 +1696,7 @@ Machine::Machine(System* system, Heap* heap, Finder* finder,
referenceLock(0),
libraries(0),
loader(0),
classMap(0),
bootstrapClassMap(0),
monitorMap(0),
stringMap(0),
@ -1901,9 +1813,10 @@ Thread::init()
m->unsafe = false;
if (image) {
boot(this, image);
m->processor->boot(this, image);
} else {
boot(this);
m->processor->boot(this, 0);
}
m->monitorMap = makeWeakHashMap(this, 0, 0);
@ -2412,8 +2325,7 @@ findLoadedClass(Thread* t, object spec)
PROTECT(t, spec);
ACQUIRE(t, t->m->classLock);
return hashMapFind(t, systemClassLoaderMap(t, t->m->loader),
spec, byteArrayHash, byteArrayEqual);
return hashMapFind(t, t->m->classMap, spec, byteArrayHash, byteArrayEqual);
}
object
@ -2517,8 +2429,9 @@ resolveClass(Thread* t, object spec)
PROTECT(t, spec);
ACQUIRE(t, t->m->classLock);
object class_ = hashMapFind(t, systemClassLoaderMap(t, t->m->loader),
spec, byteArrayHash, byteArrayEqual);
object class_ = hashMapFind
(t, t->m->classMap, spec, byteArrayHash, byteArrayEqual);
if (class_ == 0) {
if (byteArrayBody(t, spec, 0) == '[') {
class_ = hashMapFind
@ -2570,8 +2483,7 @@ resolveClass(Thread* t, object spec)
if (class_) {
PROTECT(t, class_);
hashMapInsert(t, systemClassLoaderMap(t, t->m->loader),
spec, class_, byteArrayHash);
hashMapInsert(t, t->m->classMap, spec, class_, byteArrayHash);
} else if (t->exception == 0) {
object message = makeString(t, "%s", &byteArrayBody(t, spec, 0));
t->exception = makeClassNotFoundException(t, message);
@ -2908,6 +2820,7 @@ void
visitRoots(Machine* m, Heap::Visitor* v)
{
v->visit(&(m->loader));
v->visit(&(m->classMap));
v->visit(&(m->bootstrapClassMap));
v->visit(&(m->monitorMap));
v->visit(&(m->stringMap));

View File

@ -1159,6 +1159,7 @@ class Machine {
System::Monitor* referenceLock;
System::Library* libraries;
object loader;
object classMap;
object bootstrapClassMap;
object monitorMap;
object stringMap;
@ -1433,6 +1434,35 @@ expect(Thread* t, bool v)
expect(t->m->system, v);
}
class FixedAllocator: public Allocator {
public:
FixedAllocator(Thread* t, uint8_t* base, unsigned capacity):
t(t), base(base), offset(0), capacity(capacity)
{ }
virtual void* tryAllocate(unsigned) {
abort(t);
}
virtual void* allocate(unsigned size) {
unsigned paddedSize = pad(size);
expect(t, offset + paddedSize < capacity);
void* p = base + offset;
offset += paddedSize;
return p;
}
virtual void free(const void*, unsigned) {
abort(t);
}
Thread* t;
uint8_t* base;
unsigned offset;
unsigned capacity;
};
inline void
ensure(Thread* t, unsigned sizeInBytes)
{

View File

@ -127,8 +127,11 @@ class Processor {
virtual void
visitRoots(BootImage* image, HeapWalker* w) = 0;
virtual unsigned*
makeCallTable(Thread* t, BootImage* image, HeapWalker* w, uint8_t* code) = 0;
virtual void
boot(Thread* t, BootImage* image, uintptr_t* heap, uint8_t* code) = 0;
boot(Thread* t, BootImage* image) = 0;
object
invoke(Thread* t, object method, object this_, ...)

View File

@ -106,6 +106,25 @@ cloneTreeNode(Thread* t, object n)
return newNode;
}
object
treeFind(Thread* t, object tree, intptr_t key, object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b))
{
object node = tree;
while (node != sentinal) {
intptr_t difference = compare(t, key, getTreeNodeValue(t, node));
if (difference < 0) {
node = treeNodeLeft(t, node);
} else if (difference > 0) {
node = treeNodeRight(t, node);
} else {
return node;
}
}
return 0;
}
void
treeFind(Thread* t, TreeContext* c, object old, intptr_t key, object node,
object sentinal,
@ -531,29 +550,20 @@ object
treeQuery(Thread* t, object tree, intptr_t key, object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b))
{
object node = tree;
while (node != sentinal) {
intptr_t difference = compare(t, key, getTreeNodeValue(t, node));
if (difference < 0) {
node = treeNodeLeft(t, node);
} else if (difference > 0) {
node = treeNodeRight(t, node);
} else {
return getTreeNodeValue(t, node);
}
}
return 0;
object node = treeFind(t, tree, key, sentinal, compare);
return (node ? getTreeNodeValue(t, node) : 0);
}
object
treeInsertNode(Thread* t, Zone* zone, object tree, intptr_t key, object node,
object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b))
treeInsert(Thread* t, Zone* zone, object tree, intptr_t key, object value,
object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b))
{
PROTECT(t, tree);
PROTECT(t, sentinal);
object node = makeTreeNode(t, value, sentinal, sentinal);
TreeContext c(t, zone);
treeFind(t, &c, tree, key, node, sentinal, compare);
expect(t, c.fresh);
@ -561,4 +571,11 @@ treeInsertNode(Thread* t, Zone* zone, object tree, intptr_t key, object node,
return treeAdd(t, &c);
}
void
treeUpdate(Thread* t, object tree, intptr_t key, object value, object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b))
{
setTreeNodeValue(t, treeFind(t, tree, key, sentinal, compare), value);
}
} // namespace vm

View File

@ -89,9 +89,13 @@ treeQuery(Thread* t, object tree, intptr_t key, object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b));
object
treeInsertNode(Thread* t, Zone* zone, object tree, intptr_t key, object node,
object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b));
treeInsert(Thread* t, Zone* zone, object tree, intptr_t key, object value,
object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b));
void
treeUpdate(Thread* t, object tree, intptr_t key, object value, object sentinal,
intptr_t (*compare)(Thread* t, intptr_t key, object b));
class HashMapIterator: public Thread::Protector {
public:

View File

@ -146,7 +146,7 @@ resolveOffset(System* s, uint8_t* instruction, unsigned instructionSize,
int32_t v4 = v;
memcpy(instruction + instructionSize - 4, &v4, 4);
return instruction + instructionSize - 4;
return instruction + instructionSize;
}
class OffsetListener: public Promise::Listener {
@ -221,27 +221,30 @@ copy(System* s, void* dst, int64_t src, unsigned size)
class ImmediateListener: public Promise::Listener {
public:
ImmediateListener(System* s, void* dst, unsigned size):
s(s), dst(dst), size(size)
ImmediateListener(System* s, void* dst, unsigned size, unsigned offset):
s(s), dst(dst), size(size), offset(offset)
{ }
virtual void* resolve(int64_t value) {
copy(s, dst, value, size);
return dst;
return static_cast<uint8_t*>(dst) + offset;
}
System* s;
void* dst;
unsigned size;
unsigned offset;
};
class ImmediateTask: public Task {
public:
ImmediateTask(Task* next, Promise* promise, unsigned offset, unsigned size):
ImmediateTask(Task* next, Promise* promise, unsigned offset, unsigned size,
unsigned promiseOffset):
Task(next),
promise(promise),
offset(offset),
size(size)
size(size),
promiseOffset(promiseOffset)
{ }
virtual void run(Context* c) {
@ -249,21 +252,22 @@ class ImmediateTask: public Task {
copy(c->s, c->result + offset, promise->value(), size);
} else {
new (promise->listen(sizeof(ImmediateListener)))
ImmediateListener(c->s, c->result + offset, size);
ImmediateListener(c->s, c->result + offset, size, promiseOffset);
}
}
Promise* promise;
unsigned offset;
unsigned size;
unsigned promiseOffset;
};
void
appendImmediateTask(Context* c, Promise* promise, unsigned offset,
unsigned size)
unsigned size, unsigned promiseOffset = 0)
{
c->tasks = new (c->zone->allocate(sizeof(ImmediateTask))) ImmediateTask
(c->tasks, promise, offset, size);
(c->tasks, promise, offset, size, promiseOffset);
}
void
@ -378,6 +382,10 @@ conditional(Context* c, unsigned condition, Assembler::Constant* a)
void
moveCR(Context*, unsigned, Assembler::Constant*, Assembler::Register*);
void
moveCR2(Context*, unsigned, Assembler::Constant*, Assembler::Register*,
unsigned promiseOffset);
void
callR(Context*, unsigned, Assembler::Register*);
@ -396,7 +404,7 @@ longCallC(Context* c, unsigned size, Assembler::Constant* a)
if (BytesPerWord == 8) {
Assembler::Register r(r10);
moveCR(c, size, a, &r);
moveCR2(c, size, a, &r, 11);
callR(c, size, &r);
} else {
callC(c, size, a);
@ -455,7 +463,7 @@ longJumpC(Context* c, unsigned size, Assembler::Constant* a)
if (BytesPerWord == 8) {
Assembler::Register r(r10);
moveCR(c, size, a, &r);
moveCR2(c, size, a, &r, 11);
jumpR(c, size, &r);
} else {
jumpC(c, size, a);
@ -700,8 +708,8 @@ leaMR(Context* c, unsigned size, Assembler::Memory* b, Assembler::Register* a)
}
void
moveCR(Context* c, unsigned size, Assembler::Constant* a,
Assembler::Register* b)
moveCR2(Context* c, unsigned size, Assembler::Constant* a,
Assembler::Register* b, unsigned promiseOffset)
{
if (BytesPerWord == 4 and size == 8) {
int64_t v = a->value->value();
@ -722,54 +730,18 @@ moveCR(Context* c, unsigned size, Assembler::Constant* a,
if (a->value->resolved()) {
c->code.appendAddress(a->value->value());
} else {
appendImmediateTask(c, a->value, c->code.length(), BytesPerWord);
appendImmediateTask
(c, a->value, c->code.length(), BytesPerWord, promiseOffset);
c->code.appendAddress(static_cast<uintptr_t>(0));
}
}
}
void
moveCM(Context* c, unsigned size, Assembler::Constant* a,
Assembler::Memory* b)
moveCR(Context* c, unsigned size, Assembler::Constant* a,
Assembler::Register* b)
{
switch (size) {
case 1:
encode(c, 0xc6, 0, b, false);
c->code.append(a->value->value());
break;
case 2:
encode2(c, 0x66c7, 0, b, false);
c->code.append2(a->value->value());
break;
case 4:
encode(c, 0xc7, 0, b, false);
if (a->value->resolved()) {
c->code.append4(a->value->value());
} else {
appendImmediateTask(c, a->value, c->code.length(), 4);
c->code.append4(0);
}
break;
case 8: {
int64_t v = a->value->value();
ResolvedPromise high((v >> 32) & 0xFFFFFFFF);
Assembler::Constant ah(&high);
ResolvedPromise low(v & 0xFFFFFFFF);
Assembler::Constant al(&low);
Assembler::Memory bh(b->base, b->offset + 4, b->index, b->scale);
moveCM(c, 4, &al, b);
moveCM(c, 4, &ah, &bh);
} break;
default: abort(c);
}
moveCR2(c, size, a, b, 0);
}
void
@ -857,6 +829,62 @@ moveRM(Context* c, unsigned size, Assembler::Register* a, Assembler::Memory* b)
}
}
void
moveCM(Context* c, unsigned size, Assembler::Constant* a,
Assembler::Memory* b)
{
switch (size) {
case 1:
encode(c, 0xc6, 0, b, false);
c->code.append(a->value->value());
break;
case 2:
encode2(c, 0x66c7, 0, b, false);
c->code.append2(a->value->value());
break;
case 4:
encode(c, 0xc7, 0, b, false);
if (a->value->resolved()) {
c->code.append4(a->value->value());
} else {
appendImmediateTask(c, a->value, c->code.length(), 4);
c->code.append4(0);
}
break;
case 8: {
if (BytesPerWord == 8) {
if(a->value->resolved() and isInt32(a->value->value())) {
encode(c, 0xc7, 0, b, true);
c->code.append4(a->value->value());
} else {
Assembler::Register tmp(c->client->acquireTemporary());
moveCR(c, 8, a, &tmp);
moveRM(c, 8, &tmp, b);
c->client->releaseTemporary(tmp.low);
}
} else {
int64_t v = a->value->value();
ResolvedPromise high((v >> 32) & 0xFFFFFFFF);
Assembler::Constant ah(&high);
ResolvedPromise low(v & 0xFFFFFFFF);
Assembler::Constant al(&low);
Assembler::Memory bh(b->base, b->offset + 4, b->index, b->scale);
moveCM(c, 4, &al, b);
moveCM(c, 4, &ah, &bh);
}
} break;
default: abort(c);
}
}
void
move4To8CR(Context* c, unsigned, Assembler::Constant* a,
Assembler::Register* b)
@ -2274,15 +2302,32 @@ class MyAssembler: public Assembler {
bool assertAlignment UNUSED, void* returnAddress,
void* newTarget)
{
uint8_t* instruction = static_cast<uint8_t*>(returnAddress) - 5;
assert(&c, (op == LongCall and *instruction == 0xE8)
or (op == LongJump and *instruction == 0xE9));
assert(&c, (not assertAlignment)
or reinterpret_cast<uintptr_t>(instruction + 1) % 4 == 0);
if (BytesPerWord == 4 or op == Call or op == Jump) {
uint8_t* instruction = static_cast<uint8_t*>(returnAddress) - 5;
int32_t v = static_cast<uint8_t*>(newTarget)
- static_cast<uint8_t*>(returnAddress);
memcpy(instruction + 1, &v, 4);
assert(&c, ((op == Call or op == LongCall) and *instruction == 0xE8)
or ((op == Jump or op == LongJump) and *instruction == 0xE9));
assert(&c, (not assertAlignment)
or reinterpret_cast<uintptr_t>(instruction + 1) % 4 == 0);
int32_t v = static_cast<uint8_t*>(newTarget)
- static_cast<uint8_t*>(returnAddress);
memcpy(instruction + 1, &v, 4);
} else {
uint8_t* instruction = static_cast<uint8_t*>(returnAddress) - 13;
assert(&c, instruction[0] == 0x49 and instruction[1] == 0xBA);
assert(&c, instruction[10] == 0x41 and instruction[11] == 0xFF);
assert(&c, (op == LongCall and instruction[12] == 0xD2)
or (op == LongJump and instruction[12] == 0xE2));
assert(&c, (not assertAlignment)
or reinterpret_cast<uintptr_t>(instruction + 2) % 8 == 0);
memcpy(instruction + 2, &newTarget, 8);
}
}
virtual void dispose() {