mirror of
https://github.com/corda/corda.git
synced 2025-06-19 15:43:52 +00:00
Merge branch 'ios' into armvfp
This commit is contained in:
@ -1847,6 +1847,10 @@ class MyArchitecture: public Assembler::Architecture {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual int scratch() {
|
||||||
|
return 5;
|
||||||
|
}
|
||||||
|
|
||||||
virtual int stack() {
|
virtual int stack() {
|
||||||
return StackRegister;
|
return StackRegister;
|
||||||
}
|
}
|
||||||
|
@ -329,6 +329,7 @@ class Assembler {
|
|||||||
virtual uint32_t generalRegisterMask() = 0;
|
virtual uint32_t generalRegisterMask() = 0;
|
||||||
virtual uint32_t floatRegisterMask() = 0;
|
virtual uint32_t floatRegisterMask() = 0;
|
||||||
|
|
||||||
|
virtual int scratch() = 0;
|
||||||
virtual int stack() = 0;
|
virtual int stack() = 0;
|
||||||
virtual int thread() = 0;
|
virtual int thread() = 0;
|
||||||
virtual int returnLow() = 0;
|
virtual int returnLow() = 0;
|
||||||
|
@ -496,8 +496,10 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (fieldFlags(t, field) & ACC_STATIC) {
|
if (fieldFlags(t, field) & ACC_STATIC) {
|
||||||
while (targetStaticOffset % targetSize) {
|
unsigned excess = (targetStaticOffset % targetSize)
|
||||||
++ targetStaticOffset;
|
% TargetBytesPerWord;
|
||||||
|
if (excess) {
|
||||||
|
targetStaticOffset += TargetBytesPerWord - excess;
|
||||||
}
|
}
|
||||||
|
|
||||||
buildStaticOffset = fieldOffset(t, field);
|
buildStaticOffset = fieldOffset(t, field);
|
||||||
@ -672,7 +674,7 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
|
|||||||
expect(t, value >= code);
|
expect(t, value >= code);
|
||||||
|
|
||||||
addresses->listener->resolve
|
addresses->listener->resolve
|
||||||
(targetVW(static_cast<target_intptr_t>(value - code)), 0);
|
(static_cast<target_intptr_t>(value - code), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (; methods; methods = pairSecond(t, methods)) {
|
for (; methods; methods = pairSecond(t, methods)) {
|
||||||
@ -987,7 +989,7 @@ copy(Thread* t, object typeMaps, object p, uint8_t* dst)
|
|||||||
if (field->type == Type_object) {
|
if (field->type == Type_object) {
|
||||||
unsigned offset = field->targetOffset / TargetBytesPerWord;
|
unsigned offset = field->targetOffset / TargetBytesPerWord;
|
||||||
reinterpret_cast<uint32_t*>(mask)[offset / 32]
|
reinterpret_cast<uint32_t*>(mask)[offset / 32]
|
||||||
|= static_cast<uint32_t>(1) << (offset % 32);
|
|= targetV4(static_cast<uint32_t>(1) << (offset % 32));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1027,14 +1029,15 @@ copy(Thread* t, object typeMaps, object p, uint8_t* dst)
|
|||||||
switch (field->type) {
|
switch (field->type) {
|
||||||
case Type_object:
|
case Type_object:
|
||||||
reinterpret_cast<uint32_t*>(objectMask)[i / 32]
|
reinterpret_cast<uint32_t*>(objectMask)[i / 32]
|
||||||
|= static_cast<uint32_t>(1) << (i % 32);
|
|= targetV4(static_cast<uint32_t>(1) << (i % 32));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case Type_float:
|
case Type_float:
|
||||||
case Type_double:
|
case Type_double:
|
||||||
reinterpret_cast<target_uintptr_t*>(poolMask)
|
reinterpret_cast<target_uintptr_t*>(poolMask)
|
||||||
[i / TargetBitsPerWord]
|
[i / TargetBitsPerWord]
|
||||||
|= static_cast<target_uintptr_t>(1) << (i % TargetBitsPerWord);
|
|= targetVW
|
||||||
|
(static_cast<target_uintptr_t>(1) << (i % TargetBitsPerWord));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -1083,7 +1086,7 @@ copy(Thread* t, object typeMaps, object referer, unsigned refererOffset,
|
|||||||
if (field->type == Type_object) {
|
if (field->type == Type_object) {
|
||||||
unsigned offset = field->targetOffset / TargetBytesPerWord;
|
unsigned offset = field->targetOffset / TargetBytesPerWord;
|
||||||
reinterpret_cast<uint32_t*>(dst + (TargetBytesPerWord * 2))
|
reinterpret_cast<uint32_t*>(dst + (TargetBytesPerWord * 2))
|
||||||
[offset / 32] |= static_cast<uint32_t>(1) << (offset % 32);
|
[offset / 32] |= targetV4(static_cast<uint32_t>(1) << (offset % 32));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1092,7 +1095,7 @@ copy(Thread* t, object typeMaps, object referer, unsigned refererOffset,
|
|||||||
{
|
{
|
||||||
unsigned offset = map->targetFixedSizeInWords;
|
unsigned offset = map->targetFixedSizeInWords;
|
||||||
reinterpret_cast<uint32_t*>(dst + (TargetBytesPerWord * 2))
|
reinterpret_cast<uint32_t*>(dst + (TargetBytesPerWord * 2))
|
||||||
[offset / 32] |= static_cast<uint32_t>(1) << (offset % 32);
|
[offset / 32] |= targetV4(static_cast<uint32_t>(1) << (offset % 32));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
copy(t, typeMaps, p, dst);
|
copy(t, typeMaps, p, dst);
|
||||||
@ -1241,7 +1244,7 @@ makeHeapImage(Thread* t, BootImage* image, target_uintptr_t* heap,
|
|||||||
HeapWalker* w = makeHeapWalker(t, &visitor);
|
HeapWalker* w = makeHeapWalker(t, &visitor);
|
||||||
visitRoots(t, image, w, constants);
|
visitRoots(t, image, w, constants);
|
||||||
|
|
||||||
image->heapSize = visitor.position * BytesPerWord;
|
image->heapSize = visitor.position * TargetBytesPerWord;
|
||||||
|
|
||||||
return w;
|
return w;
|
||||||
}
|
}
|
||||||
|
@ -380,7 +380,7 @@ compareIpToMethodBounds(Thread* t, intptr_t ip, object method)
|
|||||||
if (ip < start) {
|
if (ip < start) {
|
||||||
return -1;
|
return -1;
|
||||||
} else if (ip < start + static_cast<intptr_t>
|
} else if (ip < start + static_cast<intptr_t>
|
||||||
(compiledSize(start) + BytesPerWord))
|
(compiledSize(start) + TargetBytesPerWord))
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
@ -6554,10 +6554,11 @@ simpleFrameMapTableSize(MyThread* t, object method, object map)
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint8_t*
|
uint8_t*
|
||||||
finish(MyThread* t, Allocator* allocator, Assembler* a, const char* name,
|
finish(MyThread* t, FixedAllocator* allocator, Assembler* a, const char* name,
|
||||||
unsigned length)
|
unsigned length)
|
||||||
{
|
{
|
||||||
uint8_t* start = static_cast<uint8_t*>(allocator->allocate(pad(length)));
|
uint8_t* start = static_cast<uint8_t*>
|
||||||
|
(allocator->allocate(length, TargetBytesPerWord));
|
||||||
|
|
||||||
a->setDestination(start);
|
a->setDestination(start);
|
||||||
a->write();
|
a->write();
|
||||||
@ -6872,10 +6873,11 @@ finish(MyThread* t, FixedAllocator* allocator, Context* context)
|
|||||||
unsigned codeSize = c->resolve
|
unsigned codeSize = c->resolve
|
||||||
(allocator->base + allocator->offset + TargetBytesPerWord);
|
(allocator->base + allocator->offset + TargetBytesPerWord);
|
||||||
|
|
||||||
unsigned total = pad(codeSize) + pad(c->poolSize()) + TargetBytesPerWord;
|
unsigned total = pad(codeSize, TargetBytesPerWord)
|
||||||
|
+ pad(c->poolSize(), TargetBytesPerWord) + TargetBytesPerWord;
|
||||||
|
|
||||||
target_uintptr_t* code = static_cast<target_uintptr_t*>
|
target_uintptr_t* code = static_cast<target_uintptr_t*>
|
||||||
(allocator->allocate(total));
|
(allocator->allocate(total, TargetBytesPerWord));
|
||||||
code[0] = codeSize;
|
code[0] = codeSize;
|
||||||
uint8_t* start = reinterpret_cast<uint8_t*>(code + 1);
|
uint8_t* start = reinterpret_cast<uint8_t*>(code + 1);
|
||||||
|
|
||||||
@ -8291,7 +8293,7 @@ MyProcessor*
|
|||||||
processor(MyThread* t);
|
processor(MyThread* t);
|
||||||
|
|
||||||
void
|
void
|
||||||
compileThunks(MyThread* t, Allocator* allocator);
|
compileThunks(MyThread* t, FixedAllocator* allocator);
|
||||||
|
|
||||||
class MyProcessor: public Processor {
|
class MyProcessor: public Processor {
|
||||||
public:
|
public:
|
||||||
@ -8347,8 +8349,14 @@ class MyProcessor: public Processor {
|
|||||||
#define THUNK(s) thunkTable[s##Index] = voidPointer(s);
|
#define THUNK(s) thunkTable[s##Index] = voidPointer(s);
|
||||||
#include "thunks.cpp"
|
#include "thunks.cpp"
|
||||||
#undef THUNK
|
#undef THUNK
|
||||||
|
// Set the dummyIndex entry to a constant which should require the
|
||||||
|
// maximum number of bytes to represent in assembly code
|
||||||
|
// (i.e. can't be represented by a smaller number of bytes and
|
||||||
|
// implicitly sign- or zero-extended). We'll use this property
|
||||||
|
// later to determine the maximum size of a thunk in the thunk
|
||||||
|
// table.
|
||||||
thunkTable[dummyIndex] = reinterpret_cast<void*>
|
thunkTable[dummyIndex] = reinterpret_cast<void*>
|
||||||
(~static_cast<uintptr_t>(0));
|
(static_cast<uintptr_t>(UINT64_C(0x5555555555555555)));
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Thread*
|
virtual Thread*
|
||||||
@ -9396,10 +9404,7 @@ compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
|
|||||||
|
|
||||||
if (processor(t)->bootImage) {
|
if (processor(t)->bootImage) {
|
||||||
Assembler::Memory table(t->arch->thread(), TargetThreadThunkTable);
|
Assembler::Memory table(t->arch->thread(), TargetThreadThunkTable);
|
||||||
// use Architecture::virtualCallTarget register here as a scratch
|
Assembler::Register scratch(t->arch->scratch());
|
||||||
// register; any register that isn't used to pass arguments would
|
|
||||||
// be acceptable:
|
|
||||||
Assembler::Register scratch(t->arch->virtualCallTarget());
|
|
||||||
a->apply(Move, TargetBytesPerWord, MemoryOperand, &table,
|
a->apply(Move, TargetBytesPerWord, MemoryOperand, &table,
|
||||||
TargetBytesPerWord, RegisterOperand, &scratch);
|
TargetBytesPerWord, RegisterOperand, &scratch);
|
||||||
Assembler::Memory proc(scratch.low, index * TargetBytesPerWord);
|
Assembler::Memory proc(scratch.low, index * TargetBytesPerWord);
|
||||||
@ -9418,7 +9423,7 @@ compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
compileThunks(MyThread* t, Allocator* allocator)
|
compileThunks(MyThread* t, FixedAllocator* allocator)
|
||||||
{
|
{
|
||||||
MyProcessor* p = processor(t);
|
MyProcessor* p = processor(t);
|
||||||
|
|
||||||
@ -9559,7 +9564,8 @@ compileThunks(MyThread* t, Allocator* allocator)
|
|||||||
p->thunks.table.length = a->endBlock(false)->resolve(0, 0);
|
p->thunks.table.length = a->endBlock(false)->resolve(0, 0);
|
||||||
|
|
||||||
p->thunks.table.start = static_cast<uint8_t*>
|
p->thunks.table.start = static_cast<uint8_t*>
|
||||||
(allocator->allocate(p->thunks.table.length * ThunkCount));
|
(allocator->allocate
|
||||||
|
(p->thunks.table.length * ThunkCount, TargetBytesPerWord));
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t* start = p->thunks.table.start;
|
uint8_t* start = p->thunks.table.start;
|
||||||
@ -9678,7 +9684,8 @@ compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
|
|||||||
|
|
||||||
*size = a->endBlock(false)->resolve(0, 0);
|
*size = a->endBlock(false)->resolve(0, 0);
|
||||||
|
|
||||||
uint8_t* start = static_cast<uint8_t*>(codeAllocator(t)->allocate(*size));
|
uint8_t* start = static_cast<uint8_t*>
|
||||||
|
(codeAllocator(t)->allocate(*size, TargetBytesPerWord));
|
||||||
|
|
||||||
a->setDestination(start);
|
a->setDestination(start);
|
||||||
a->write();
|
a->write();
|
||||||
|
@ -6953,14 +6953,14 @@ class MyCompiler: public Compiler {
|
|||||||
(c.machineCode + pad(c.machineCodeSize, TargetBytesPerWord) + i);
|
(c.machineCode + pad(c.machineCodeSize, TargetBytesPerWord) + i);
|
||||||
|
|
||||||
if (n->promise->resolved()) {
|
if (n->promise->resolved()) {
|
||||||
*target = n->promise->value();
|
*target = targetVW(n->promise->value());
|
||||||
} else {
|
} else {
|
||||||
class Listener: public Promise::Listener {
|
class Listener: public Promise::Listener {
|
||||||
public:
|
public:
|
||||||
Listener(target_intptr_t* target): target(target){ }
|
Listener(target_intptr_t* target): target(target){ }
|
||||||
|
|
||||||
virtual bool resolve(int64_t value, void** location) {
|
virtual bool resolve(int64_t value, void** location) {
|
||||||
*target = value;
|
*target = targetVW(value);
|
||||||
if (location) *location = target;
|
if (location) *location = target;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -1145,8 +1145,9 @@ parseFieldTable(Thread* t, Stream& s, object class_, object pool)
|
|||||||
|
|
||||||
unsigned size = fieldSize(t, code);
|
unsigned size = fieldSize(t, code);
|
||||||
if (flags & ACC_STATIC) {
|
if (flags & ACC_STATIC) {
|
||||||
while (staticOffset % size) {
|
unsigned excess = (staticOffset % size) % BytesPerWord;
|
||||||
++ staticOffset;
|
if (excess) {
|
||||||
|
staticOffset += BytesPerWord - excess;
|
||||||
}
|
}
|
||||||
|
|
||||||
fieldOffset(t, field) = staticOffset;
|
fieldOffset(t, field) = staticOffset;
|
||||||
|
@ -1796,8 +1796,8 @@ class FixedAllocator: public Allocator {
|
|||||||
abort(s);
|
abort(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void* allocate(unsigned size) {
|
void* allocate(unsigned size, unsigned padAlignment) {
|
||||||
unsigned paddedSize = pad(size);
|
unsigned paddedSize = pad(size, padAlignment);
|
||||||
expect(s, offset + paddedSize < capacity);
|
expect(s, offset + paddedSize < capacity);
|
||||||
|
|
||||||
void* p = base + offset;
|
void* p = base + offset;
|
||||||
@ -1805,6 +1805,10 @@ class FixedAllocator: public Allocator {
|
|||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void* allocate(unsigned size) {
|
||||||
|
return allocate(size, BytesPerWord);
|
||||||
|
}
|
||||||
|
|
||||||
virtual void free(const void* p, unsigned size) {
|
virtual void free(const void* p, unsigned size) {
|
||||||
if (p >= base and static_cast<const uint8_t*>(p) + size == base + offset) {
|
if (p >= base and static_cast<const uint8_t*>(p) + size == base + offset) {
|
||||||
offset -= size;
|
offset -= size;
|
||||||
|
@ -789,14 +789,14 @@ updateImmediate(System* s, void* dst, int32_t src, unsigned size, bool address)
|
|||||||
switch (size) {
|
switch (size) {
|
||||||
case 4: {
|
case 4: {
|
||||||
int32_t* p = static_cast<int32_t*>(dst);
|
int32_t* p = static_cast<int32_t*>(dst);
|
||||||
int r = (p[1] >> 21) & 31;
|
int r = (targetV4(p[1]) >> 21) & 31;
|
||||||
|
|
||||||
if (address) {
|
if (address) {
|
||||||
p[0] = lis(r, ha16(src));
|
p[0] = targetV4(lis(r, ha16(src)));
|
||||||
p[1] |= (src & 0xFFFF);
|
p[1] |= targetV4(src & 0xFFFF);
|
||||||
} else {
|
} else {
|
||||||
p[0] = lis(r, src >> 16);
|
p[0] = targetV4(lis(r, src >> 16));
|
||||||
p[1] = ori(r, r, src);
|
p[1] = targetV4(ori(r, r, src));
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
@ -2076,6 +2076,10 @@ class MyArchitecture: public Assembler::Architecture {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual int scratch() {
|
||||||
|
return 31;
|
||||||
|
}
|
||||||
|
|
||||||
virtual int stack() {
|
virtual int stack() {
|
||||||
return StackRegister;
|
return StackRegister;
|
||||||
}
|
}
|
||||||
|
@ -2763,6 +2763,10 @@ class MyArchitecture: public Assembler::Architecture {
|
|||||||
return useSSE(&c) ? FloatRegisterMask : 0;
|
return useSSE(&c) ? FloatRegisterMask : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual int scratch() {
|
||||||
|
return rax;
|
||||||
|
}
|
||||||
|
|
||||||
virtual int stack() {
|
virtual int stack() {
|
||||||
return rsp;
|
return rsp;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user