Merge branch 'ios' into armvfp

This commit is contained in:
JET 2011-09-28 15:10:02 -06:00
commit 253b2c264d
9 changed files with 63 additions and 35 deletions

View File

@ -1847,6 +1847,10 @@ class MyArchitecture: public Assembler::Architecture {
return 0;
}
virtual int scratch() {
return 5;
}
virtual int stack() {
return StackRegister;
}

View File

@ -329,6 +329,7 @@ class Assembler {
virtual uint32_t generalRegisterMask() = 0;
virtual uint32_t floatRegisterMask() = 0;
virtual int scratch() = 0;
virtual int stack() = 0;
virtual int thread() = 0;
virtual int returnLow() = 0;

View File

@ -496,8 +496,10 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
}
if (fieldFlags(t, field) & ACC_STATIC) {
while (targetStaticOffset % targetSize) {
++ targetStaticOffset;
unsigned excess = (targetStaticOffset % targetSize)
% TargetBytesPerWord;
if (excess) {
targetStaticOffset += TargetBytesPerWord - excess;
}
buildStaticOffset = fieldOffset(t, field);
@ -672,7 +674,7 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
expect(t, value >= code);
addresses->listener->resolve
(targetVW(static_cast<target_intptr_t>(value - code)), 0);
(static_cast<target_intptr_t>(value - code), 0);
}
for (; methods; methods = pairSecond(t, methods)) {
@ -987,7 +989,7 @@ copy(Thread* t, object typeMaps, object p, uint8_t* dst)
if (field->type == Type_object) {
unsigned offset = field->targetOffset / TargetBytesPerWord;
reinterpret_cast<uint32_t*>(mask)[offset / 32]
|= static_cast<uint32_t>(1) << (offset % 32);
|= targetV4(static_cast<uint32_t>(1) << (offset % 32));
}
}
@ -1027,14 +1029,15 @@ copy(Thread* t, object typeMaps, object p, uint8_t* dst)
switch (field->type) {
case Type_object:
reinterpret_cast<uint32_t*>(objectMask)[i / 32]
|= static_cast<uint32_t>(1) << (i % 32);
|= targetV4(static_cast<uint32_t>(1) << (i % 32));
break;
case Type_float:
case Type_double:
reinterpret_cast<target_uintptr_t*>(poolMask)
[i / TargetBitsPerWord]
|= static_cast<target_uintptr_t>(1) << (i % TargetBitsPerWord);
|= targetVW
(static_cast<target_uintptr_t>(1) << (i % TargetBitsPerWord));
break;
default:
@ -1083,7 +1086,7 @@ copy(Thread* t, object typeMaps, object referer, unsigned refererOffset,
if (field->type == Type_object) {
unsigned offset = field->targetOffset / TargetBytesPerWord;
reinterpret_cast<uint32_t*>(dst + (TargetBytesPerWord * 2))
[offset / 32] |= static_cast<uint32_t>(1) << (offset % 32);
[offset / 32] |= targetV4(static_cast<uint32_t>(1) << (offset % 32));
}
}
@ -1092,7 +1095,7 @@ copy(Thread* t, object typeMaps, object referer, unsigned refererOffset,
{
unsigned offset = map->targetFixedSizeInWords;
reinterpret_cast<uint32_t*>(dst + (TargetBytesPerWord * 2))
[offset / 32] |= static_cast<uint32_t>(1) << (offset % 32);
[offset / 32] |= targetV4(static_cast<uint32_t>(1) << (offset % 32));
}
} else {
copy(t, typeMaps, p, dst);
@ -1241,7 +1244,7 @@ makeHeapImage(Thread* t, BootImage* image, target_uintptr_t* heap,
HeapWalker* w = makeHeapWalker(t, &visitor);
visitRoots(t, image, w, constants);
image->heapSize = visitor.position * BytesPerWord;
image->heapSize = visitor.position * TargetBytesPerWord;
return w;
}
@ -1642,7 +1645,7 @@ main(int ac, const char** av)
{
if (ac < 4 or ac > 7) {
fprintf(stderr, "usage: %s <classpath> <bootimage file> <code file>"
"[<class name> [<method name> [<method spec>]]]\n", av[0]);
" [<class name> [<method name> [<method spec>]]]\n", av[0]);
return -1;
}

View File

@ -380,7 +380,7 @@ compareIpToMethodBounds(Thread* t, intptr_t ip, object method)
if (ip < start) {
return -1;
} else if (ip < start + static_cast<intptr_t>
(compiledSize(start) + BytesPerWord))
(compiledSize(start) + TargetBytesPerWord))
{
return 0;
} else {
@ -6554,10 +6554,11 @@ simpleFrameMapTableSize(MyThread* t, object method, object map)
}
uint8_t*
finish(MyThread* t, Allocator* allocator, Assembler* a, const char* name,
finish(MyThread* t, FixedAllocator* allocator, Assembler* a, const char* name,
unsigned length)
{
uint8_t* start = static_cast<uint8_t*>(allocator->allocate(pad(length)));
uint8_t* start = static_cast<uint8_t*>
(allocator->allocate(length, TargetBytesPerWord));
a->setDestination(start);
a->write();
@ -6872,10 +6873,11 @@ finish(MyThread* t, FixedAllocator* allocator, Context* context)
unsigned codeSize = c->resolve
(allocator->base + allocator->offset + TargetBytesPerWord);
unsigned total = pad(codeSize) + pad(c->poolSize()) + TargetBytesPerWord;
unsigned total = pad(codeSize, TargetBytesPerWord)
+ pad(c->poolSize(), TargetBytesPerWord) + TargetBytesPerWord;
target_uintptr_t* code = static_cast<target_uintptr_t*>
(allocator->allocate(total));
(allocator->allocate(total, TargetBytesPerWord));
code[0] = codeSize;
uint8_t* start = reinterpret_cast<uint8_t*>(code + 1);
@ -8291,7 +8293,7 @@ MyProcessor*
processor(MyThread* t);
void
compileThunks(MyThread* t, Allocator* allocator);
compileThunks(MyThread* t, FixedAllocator* allocator);
class MyProcessor: public Processor {
public:
@ -8347,8 +8349,14 @@ class MyProcessor: public Processor {
#define THUNK(s) thunkTable[s##Index] = voidPointer(s);
#include "thunks.cpp"
#undef THUNK
// Set the dummyIndex entry to a constant which should require the
// maximum number of bytes to represent in assembly code
// (i.e. can't be represented by a smaller number of bytes and
// implicitly sign- or zero-extended). We'll use this property
// later to determine the maximum size of a thunk in the thunk
// table.
thunkTable[dummyIndex] = reinterpret_cast<void*>
(~static_cast<uintptr_t>(0));
(static_cast<uintptr_t>(UINT64_C(0x5555555555555555)));
}
virtual Thread*
@ -9396,10 +9404,7 @@ compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
if (processor(t)->bootImage) {
Assembler::Memory table(t->arch->thread(), TargetThreadThunkTable);
// use Architecture::virtualCallTarget register here as a scratch
// register; any register that isn't used to pass arguments would
// be acceptable:
Assembler::Register scratch(t->arch->virtualCallTarget());
Assembler::Register scratch(t->arch->scratch());
a->apply(Move, TargetBytesPerWord, MemoryOperand, &table,
TargetBytesPerWord, RegisterOperand, &scratch);
Assembler::Memory proc(scratch.low, index * TargetBytesPerWord);
@ -9418,7 +9423,7 @@ compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
}
void
compileThunks(MyThread* t, Allocator* allocator)
compileThunks(MyThread* t, FixedAllocator* allocator)
{
MyProcessor* p = processor(t);
@ -9559,7 +9564,8 @@ compileThunks(MyThread* t, Allocator* allocator)
p->thunks.table.length = a->endBlock(false)->resolve(0, 0);
p->thunks.table.start = static_cast<uint8_t*>
(allocator->allocate(p->thunks.table.length * ThunkCount));
(allocator->allocate
(p->thunks.table.length * ThunkCount, TargetBytesPerWord));
}
uint8_t* start = p->thunks.table.start;
@ -9678,7 +9684,8 @@ compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
*size = a->endBlock(false)->resolve(0, 0);
uint8_t* start = static_cast<uint8_t*>(codeAllocator(t)->allocate(*size));
uint8_t* start = static_cast<uint8_t*>
(codeAllocator(t)->allocate(*size, TargetBytesPerWord));
a->setDestination(start);
a->write();

View File

@ -6953,14 +6953,14 @@ class MyCompiler: public Compiler {
(c.machineCode + pad(c.machineCodeSize, TargetBytesPerWord) + i);
if (n->promise->resolved()) {
*target = n->promise->value();
*target = targetVW(n->promise->value());
} else {
class Listener: public Promise::Listener {
public:
Listener(target_intptr_t* target): target(target){ }
virtual bool resolve(int64_t value, void** location) {
*target = value;
*target = targetVW(value);
if (location) *location = target;
return true;
}

View File

@ -1145,8 +1145,9 @@ parseFieldTable(Thread* t, Stream& s, object class_, object pool)
unsigned size = fieldSize(t, code);
if (flags & ACC_STATIC) {
while (staticOffset % size) {
++ staticOffset;
unsigned excess = (staticOffset % size) % BytesPerWord;
if (excess) {
staticOffset += BytesPerWord - excess;
}
fieldOffset(t, field) = staticOffset;

View File

@ -1796,8 +1796,8 @@ class FixedAllocator: public Allocator {
abort(s);
}
virtual void* allocate(unsigned size) {
unsigned paddedSize = pad(size);
void* allocate(unsigned size, unsigned padAlignment) {
unsigned paddedSize = pad(size, padAlignment);
expect(s, offset + paddedSize < capacity);
void* p = base + offset;
@ -1805,6 +1805,10 @@ class FixedAllocator: public Allocator {
return p;
}
virtual void* allocate(unsigned size) {
return allocate(size, BytesPerWord);
}
virtual void free(const void* p, unsigned size) {
if (p >= base and static_cast<const uint8_t*>(p) + size == base + offset) {
offset -= size;

View File

@ -789,14 +789,14 @@ updateImmediate(System* s, void* dst, int32_t src, unsigned size, bool address)
switch (size) {
case 4: {
int32_t* p = static_cast<int32_t*>(dst);
int r = (p[1] >> 21) & 31;
int r = (targetV4(p[1]) >> 21) & 31;
if (address) {
p[0] = lis(r, ha16(src));
p[1] |= (src & 0xFFFF);
p[0] = targetV4(lis(r, ha16(src)));
p[1] |= targetV4(src & 0xFFFF);
} else {
p[0] = lis(r, src >> 16);
p[1] = ori(r, r, src);
p[0] = targetV4(lis(r, src >> 16));
p[1] = targetV4(ori(r, r, src));
}
} break;
@ -2076,6 +2076,10 @@ class MyArchitecture: public Assembler::Architecture {
return 0;
}
virtual int scratch() {
return 31;
}
virtual int stack() {
return StackRegister;
}

View File

@ -2763,6 +2763,10 @@ class MyArchitecture: public Assembler::Architecture {
return useSSE(&c) ? FloatRegisterMask : 0;
}
virtual int scratch() {
return rax;
}
virtual int stack() {
return rsp;
}