progress towards cross-endian bootimage builds

This fixes a number of bugs concerning cross-architecture bootimage
builds involving diffent endianesses.  There will be more work to do
before it works.
This commit is contained in:
Joel Dice 2011-09-16 20:53:08 -06:00
parent 178dd7af34
commit 349d381d95
9 changed files with 271 additions and 194 deletions

View File

@ -161,7 +161,7 @@ inline bool isInt24(target_intptr_t v) { return v == (v & 0xffffff); }
inline bool isInt32(target_intptr_t v) { return v == static_cast<int32_t>(v); } inline bool isInt32(target_intptr_t v) { return v == static_cast<int32_t>(v); }
inline int carry16(target_intptr_t v) { return static_cast<int16_t>(v) < 0 ? 1 : 0; } inline int carry16(target_intptr_t v) { return static_cast<int16_t>(v) < 0 ? 1 : 0; }
inline bool isOfWidth(long long i, int size) { return static_cast<unsigned long long>(i) >> size == 0; } inline bool isOfWidth(int64_t i, int size) { return static_cast<uint64_t>(i) >> size == 0; }
inline bool isOfWidth(int i, int size) { return static_cast<unsigned>(i) >> size == 0; } inline bool isOfWidth(int i, int size) { return static_cast<unsigned>(i) >> size == 0; }
const unsigned FrameHeaderSize = 1; const unsigned FrameHeaderSize = 1;

View File

@ -265,7 +265,7 @@ targetFieldOffset(Thread* t, object typeMaps, object field)
object object
makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code, makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
uintptr_t* codeMap, const char* className, target_uintptr_t* codeMap, const char* className,
const char* methodName, const char* methodSpec, object typeMaps) const char* methodName, const char* methodSpec, object typeMaps)
{ {
PROTECT(t, typeMaps); PROTECT(t, typeMaps);
@ -678,16 +678,21 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
if (flat) { if (flat) {
offset |= TargetBootFlatConstant; offset |= TargetBootFlatConstant;
} }
offset = targetVW(offset);
memcpy(location, &offset, TargetBytesPerWord); memcpy(location, &offset, TargetBytesPerWord);
expect(t, reinterpret_cast<intptr_t>(location) expect(t, reinterpret_cast<intptr_t>(location)
>= reinterpret_cast<intptr_t>(code)); >= reinterpret_cast<intptr_t>(code));
markBit(codeMap, reinterpret_cast<intptr_t>(location) targetMarkBit(codeMap, reinterpret_cast<intptr_t>(location)
- reinterpret_cast<intptr_t>(code)); - reinterpret_cast<intptr_t>(code));
} }
for (; methods; methods = pairSecond(t, methods)) { for (; methods; methods = pairSecond(t, methods)) {
intptr_t address = codeCompiled(t, methodCode(t, pairFirst(t, methods)));
reinterpret_cast<target_uintptr_t*>(address)[-1]
= targetVW(reinterpret_cast<target_uintptr_t*>(address)[-1]);
codeCompiled(t, methodCode(t, pairFirst(t, methods))) codeCompiled(t, methodCode(t, pairFirst(t, methods)))
-= reinterpret_cast<uintptr_t>(code); -= reinterpret_cast<uintptr_t>(code);
} }
@ -815,21 +820,21 @@ copy(Thread* t, uint8_t* src, uint8_t* dst, Type type)
case Type_int16_t: { case Type_int16_t: {
int16_t s; memcpy(&s, src, 2); int16_t s; memcpy(&s, src, 2);
int16_t d = TARGET_V2(s); int16_t d = targetV2(s);
memcpy(dst, &d, 2); memcpy(dst, &d, 2);
} break; } break;
case Type_int32_t: case Type_int32_t:
case Type_float: { case Type_float: {
int32_t s; memcpy(&s, src, 4); int32_t s; memcpy(&s, src, 4);
int32_t d = TARGET_V4(s); int32_t d = targetV4(s);
memcpy(dst, &d, 4); memcpy(dst, &d, 4);
} break; } break;
case Type_int64_t: case Type_int64_t:
case Type_double: { case Type_double: {
int64_t s; memcpy(&s, src, 8); int64_t s; memcpy(&s, src, 8);
int64_t d = TARGET_V8(s); int64_t d = targetV8(s);
memcpy(dst, &d, 8); memcpy(dst, &d, 8);
} break; } break;
@ -839,7 +844,7 @@ copy(Thread* t, uint8_t* src, uint8_t* dst, Type type)
case Type_intptr_t: { case Type_intptr_t: {
intptr_t s; memcpy(&s, src, BytesPerWord); intptr_t s; memcpy(&s, src, BytesPerWord);
target_intptr_t d = TARGET_VW(s); target_intptr_t d = targetVW(s);
memcpy(dst, &d, TargetBytesPerWord); memcpy(dst, &d, TargetBytesPerWord);
} break; } break;
@ -946,14 +951,14 @@ copy(Thread* t, object typeMaps, object p, uint8_t* dst)
TypeMap* classMap = reinterpret_cast<TypeMap*> TypeMap* classMap = reinterpret_cast<TypeMap*>
(&byteArrayBody(t, array, 0)); (&byteArrayBody(t, array, 0));
fixedSize = TARGET_V2 fixedSize = targetV2
(classMap->targetFixedSizeInWords * TargetBytesPerWord); (classMap->targetFixedSizeInWords * TargetBytesPerWord);
arrayElementSize = classMap->targetArrayElementSizeInBytes; arrayElementSize = classMap->targetArrayElementSizeInBytes;
} else if (classFixedSize(t, p) == BytesPerWord * 2 } else if (classFixedSize(t, p) == BytesPerWord * 2
and classArrayElementSize(t, p) == BytesPerWord) and classArrayElementSize(t, p) == BytesPerWord)
{ {
fixedSize = TARGET_V2(TargetBytesPerWord * 2); fixedSize = targetV2(TargetBytesPerWord * 2);
arrayElementSize = TargetBytesPerWord; arrayElementSize = TargetBytesPerWord;
} else { } else {
@ -974,7 +979,7 @@ copy(Thread* t, object typeMaps, object p, uint8_t* dst)
switch (map->kind) { switch (map->kind) {
case TypeMap::NormalKind: case TypeMap::NormalKind:
if (objectClass(t, p) == type(t, Machine::FieldType)) { if (objectClass(t, p) == type(t, Machine::FieldType)) {
uint16_t offset = TARGET_V2(targetFieldOffset(t, typeMaps, p)); uint16_t offset = targetV2(targetFieldOffset(t, typeMaps, p));
memcpy(dst + TargetFieldOffset, &offset, 2); memcpy(dst + TargetFieldOffset, &offset, 2);
} }
break; break;
@ -983,7 +988,7 @@ copy(Thread* t, object typeMaps, object p, uint8_t* dst)
unsigned maskSize = singletonMaskSize unsigned maskSize = singletonMaskSize
(map->targetFixedSizeInWords - 2, TargetBitsPerWord); (map->targetFixedSizeInWords - 2, TargetBitsPerWord);
target_uintptr_t targetLength = TARGET_VW target_uintptr_t targetLength = targetVW
(map->targetFixedSizeInWords - 2 + maskSize); (map->targetFixedSizeInWords - 2 + maskSize);
memcpy(dst + TargetBytesPerWord, &targetLength, TargetBytesPerWord); memcpy(dst + TargetBytesPerWord, &targetLength, TargetBytesPerWord);
@ -1016,7 +1021,7 @@ copy(Thread* t, object typeMaps, object p, uint8_t* dst)
unsigned objectMaskSize = singletonMaskSize unsigned objectMaskSize = singletonMaskSize
(map->targetFixedSizeInWords - 2 + poolMaskSize, TargetBitsPerWord); (map->targetFixedSizeInWords - 2 + poolMaskSize, TargetBitsPerWord);
target_uintptr_t targetLength = TARGET_VW target_uintptr_t targetLength = targetVW
(map->targetFixedSizeInWords - 2 + poolMaskSize + objectMaskSize); (map->targetFixedSizeInWords - 2 + poolMaskSize + objectMaskSize);
memcpy(dst + TargetBytesPerWord, &targetLength, TargetBytesPerWord); memcpy(dst + TargetBytesPerWord, &targetLength, TargetBytesPerWord);
@ -1080,7 +1085,7 @@ copy(Thread* t, object typeMaps, object referer, unsigned refererOffset,
unsigned length = ceiling(objectMaskCount(map), 32); unsigned length = ceiling(objectMaskCount(map), 32);
target_uintptr_t targetLength = TARGET_VW(length); target_uintptr_t targetLength = targetVW(length);
memcpy(dst + TargetBytesPerWord, &targetLength, TargetBytesPerWord); memcpy(dst + TargetBytesPerWord, &targetLength, TargetBytesPerWord);
@ -1143,9 +1148,9 @@ makeHeapImage(Thread* t, BootImage* image, target_uintptr_t* heap,
unsigned mark = heap[offset] & (~TargetPointerMask); unsigned mark = heap[offset] & (~TargetPointerMask);
unsigned value = number | (mark << TargetBootShift); unsigned value = number | (mark << TargetBootShift);
if (value) markBit(map, offset); if (value) targetMarkBit(map, offset);
heap[offset] = value; heap[offset] = targetVW(value);
} }
} }
@ -1189,7 +1194,7 @@ makeHeapImage(Thread* t, BootImage* image, target_uintptr_t* heap,
memcpy(reinterpret_cast<uint8_t*>(heap + position) memcpy(reinterpret_cast<uint8_t*>(heap + position)
+ TargetFixieHasMask, &hasMask, 1); + TargetFixieHasMask, &hasMask, 1);
uint32_t targetSize = TARGET_V4(size); uint32_t targetSize = targetV4(size);
memcpy(reinterpret_cast<uint8_t*>(heap + position) memcpy(reinterpret_cast<uint8_t*>(heap + position)
+ TargetFixieSize, &targetSize, 4); + TargetFixieSize, &targetSize, 4);
@ -1255,8 +1260,8 @@ makeHeapImage(Thread* t, BootImage* image, target_uintptr_t* heap,
} }
void void
updateConstants(Thread* t, object constants, uint8_t* code, uintptr_t* codeMap, updateConstants(Thread* t, object constants, uint8_t* code,
HeapMap* heapTable) target_uintptr_t* codeMap, HeapMap* heapTable)
{ {
for (; constants; constants = tripleThird(t, constants)) { for (; constants; constants = tripleThird(t, constants)) {
unsigned target = heapTable->find(tripleFirst(t, constants)); unsigned target = heapTable->find(tripleFirst(t, constants));
@ -1272,6 +1277,7 @@ updateConstants(Thread* t, object constants, uint8_t* code, uintptr_t* codeMap,
if (flat) { if (flat) {
offset |= TargetBootFlatConstant; offset |= TargetBootFlatConstant;
} }
offset = targetVW(offset);
memcpy(location, &offset, TargetBytesPerWord); memcpy(location, &offset, TargetBytesPerWord);
expect(t, reinterpret_cast<intptr_t>(location) expect(t, reinterpret_cast<intptr_t>(location)
@ -1283,8 +1289,8 @@ updateConstants(Thread* t, object constants, uint8_t* code, uintptr_t* codeMap,
// - reinterpret_cast<intptr_t>(code)), // - reinterpret_cast<intptr_t>(code)),
// static_cast<unsigned>(offset)); // static_cast<unsigned>(offset));
markBit(codeMap, reinterpret_cast<intptr_t>(location) targetMarkBit(codeMap, reinterpret_cast<intptr_t>(location)
- reinterpret_cast<intptr_t>(code)); - reinterpret_cast<intptr_t>(code));
} }
} }
} }
@ -1295,6 +1301,13 @@ offset(object a, uintptr_t* b)
return reinterpret_cast<uintptr_t>(b) - reinterpret_cast<uintptr_t>(a); return reinterpret_cast<uintptr_t>(b) - reinterpret_cast<uintptr_t>(a);
} }
BootImage::Thunk
targetThunk(BootImage::Thunk t)
{
return BootImage::Thunk
(targetV4(t.start), targetV4(t.frameSavedOffset), targetV4(t.length));
}
void void
writeBootImage2(Thread* t, FILE* out, BootImage* image, uint8_t* code, writeBootImage2(Thread* t, FILE* out, BootImage* image, uint8_t* code,
unsigned codeCapacity, const char* className, unsigned codeCapacity, const char* className,
@ -1302,7 +1315,7 @@ writeBootImage2(Thread* t, FILE* out, BootImage* image, uint8_t* code,
{ {
Zone zone(t->m->system, t->m->heap, 64 * 1024); Zone zone(t->m->system, t->m->heap, 64 * 1024);
uintptr_t* codeMap = static_cast<uintptr_t*> target_uintptr_t* codeMap = static_cast<target_uintptr_t*>
(t->m->heap->allocate(codeMapSize(codeCapacity))); (t->m->heap->allocate(codeMapSize(codeCapacity)));
memset(codeMap, 0, codeMapSize(codeCapacity)); memset(codeMap, 0, codeMapSize(codeCapacity));
@ -1549,8 +1562,8 @@ writeBootImage2(Thread* t, FILE* out, BootImage* image, uint8_t* code,
(t, classLoaderMap(t, root(t, Machine::BootLoader))); (t, classLoaderMap(t, root(t, Machine::BootLoader)));
it.hasMore();) it.hasMore();)
{ {
bootClassTable[i++] = heapWalker->map()->find bootClassTable[i++] = targetVW
(tripleSecond(t, it.next())); (heapWalker->map()->find(tripleSecond(t, it.next())));
} }
} }
@ -1565,7 +1578,8 @@ writeBootImage2(Thread* t, FILE* out, BootImage* image, uint8_t* code,
(t, classLoaderMap(t, root(t, Machine::AppLoader))); (t, classLoaderMap(t, root(t, Machine::AppLoader)));
it.hasMore();) it.hasMore();)
{ {
appClassTable[i++] = heapWalker->map()->find(tripleSecond(t, it.next())); appClassTable[i++] = targetVW
(heapWalker->map()->find(tripleSecond(t, it.next())));
} }
} }
@ -1575,8 +1589,9 @@ writeBootImage2(Thread* t, FILE* out, BootImage* image, uint8_t* code,
{ unsigned i = 0; { unsigned i = 0;
for (HashMapIterator it(t, root(t, Machine::StringMap)); it.hasMore();) { for (HashMapIterator it(t, root(t, Machine::StringMap)); it.hasMore();) {
stringTable[i++] = heapWalker->map()->find stringTable[i++] = targetVW
(jreferenceTarget(t, tripleFirst(t, it.next()))); (heapWalker->map()->find
(jreferenceTarget(t, tripleFirst(t, it.next()))));
} }
} }
@ -1592,7 +1607,19 @@ writeBootImage2(Thread* t, FILE* out, BootImage* image, uint8_t* code,
image->heapSize, image->codeSize); image->heapSize, image->codeSize);
if (true) { if (true) {
fwrite(image, sizeof(BootImage), 1, out); { BootImage targetImage;
#define FIELD(name) targetImage.name = targetV4(image->name);
#include "bootimage-fields.cpp"
#undef FIELD
#define THUNK_FIELD(name) \
targetImage.thunks.name = targetThunk(image->thunks.name);
#include "bootimage-fields.cpp"
#undef THUNK_FIELD
fwrite(&targetImage, sizeof(BootImage), 1, out);
}
fwrite(bootClassTable, image->bootClassCount * sizeof(unsigned), 1, out); fwrite(bootClassTable, image->bootClassCount * sizeof(unsigned), 1, out);
fwrite(appClassTable, image->appClassCount * sizeof(unsigned), 1, out); fwrite(appClassTable, image->appClassCount * sizeof(unsigned), 1, out);

View File

@ -36,42 +36,16 @@ class BootImage {
class ThunkCollection { class ThunkCollection {
public: public:
Thunk default_; #define THUNK_FIELD(name) Thunk name;
Thunk defaultVirtual; #include "bootimage-fields.cpp"
Thunk native; #undef THUNK_FIELD
Thunk aioob;
Thunk stackOverflow;
Thunk table;
} PACKED; } PACKED;
static const uint32_t Magic = 0x22377322; static const uint32_t Magic = 0x22377322;
uint32_t magic; #define FIELD(name) uint32_t name;
#include "bootimage-fields.cpp"
uint32_t heapSize; #undef FIELD
uint32_t codeSize;
uint32_t bootClassCount;
uint32_t appClassCount;
uint32_t stringCount;
uint32_t callCount;
uint32_t bootLoader;
uint32_t appLoader;
uint32_t types;
uint32_t methodTree;
uint32_t methodTreeSentinal;
uint32_t virtualThunks;
uint32_t compileMethodCall;
uint32_t compileVirtualMethodCall;
uint32_t invokeNativeCall;
uint32_t throwArrayIndexOutOfBoundsCall;
uint32_t throwStackOverflowCall;
#define THUNK(s) uint32_t s##Call;
#include "thunks.cpp"
#undef THUNK
ThunkCollection thunks; ThunkCollection thunks;
} PACKED; } PACKED;

View File

@ -11,6 +11,10 @@
#ifndef COMMON_H #ifndef COMMON_H
#define COMMON_H #define COMMON_H
#ifndef __STDC_CONSTANT_MACROS
# define __STDC_CONSTANT_MACROS
#endif
#include "stdlib.h" #include "stdlib.h"
#include "stdarg.h" #include "stdarg.h"
#include "stddef.h" #include "stddef.h"

View File

@ -6500,14 +6500,6 @@ simpleFrameMapTableSize(MyThread* t, object method, object map)
return ceiling(intArrayLength(t, map) * size, 32 + size); return ceiling(intArrayLength(t, map) * size, 32 + size);
} }
unsigned
codeSingletonSizeInBytes(MyThread*, unsigned codeSizeInBytes)
{
unsigned count = ceiling(codeSizeInBytes, BytesPerWord);
unsigned size = count + singletonMaskSize(count);
return pad(SingletonBody + (size * BytesPerWord));
}
uint8_t* uint8_t*
finish(MyThread* t, Allocator* allocator, Assembler* a, const char* name, finish(MyThread* t, Allocator* allocator, Assembler* a, const char* name,
unsigned length) unsigned length)
@ -8751,10 +8743,12 @@ class MyProcessor: public Processor {
for (object p = arrayBody(t, root(t, CallTable), i); for (object p = arrayBody(t, root(t, CallTable), i);
p; p = callNodeNext(t, p)) p; p = callNodeNext(t, p))
{ {
table[index++] = callNodeAddress(t, p) table[index++] = targetVW
- reinterpret_cast<uintptr_t>(codeAllocator.base); (callNodeAddress(t, p)
table[index++] = w->map()->find(callNodeTarget(t, p)) - reinterpret_cast<uintptr_t>(codeAllocator.base));
| (static_cast<unsigned>(callNodeFlags(t, p)) << TargetBootShift); table[index++] = targetVW
(w->map()->find(callNodeTarget(t, p))
| (static_cast<unsigned>(callNodeFlags(t, p)) << TargetBootShift));
} }
} }
@ -9128,6 +9122,7 @@ fixupHeap(MyThread* t UNUSED, uintptr_t* map, unsigned size, uintptr_t* heap)
if (number) { if (number) {
*p = reinterpret_cast<uintptr_t>(heap + (number - 1)) | mark; *p = reinterpret_cast<uintptr_t>(heap + (number - 1)) | mark;
// fprintf(stderr, "fixup %d: %d 0x%x\n", index, static_cast<unsigned>(number), static_cast<unsigned>(*p));
} else { } else {
*p = mark; *p = mark;
} }

View File

@ -8,7 +8,9 @@
There is NO WARRANTY for this software. See license.txt for There is NO WARRANTY for this software. See license.txt for
details. */ details. */
#define __STDC_CONSTANT_MACROS #ifndef __STDC_CONSTANT_MACROS
# define __STDC_CONSTANT_MACROS
#endif
#ifdef __APPLE__ #ifdef __APPLE__
# include "CoreFoundation/CoreFoundation.h" # include "CoreFoundation/CoreFoundation.h"

View File

@ -151,13 +151,13 @@ inline int unha16(int32_t high, int32_t low) {
} }
inline bool inline bool
isInt16(intptr_t v) isInt16(target_intptr_t v)
{ {
return v == static_cast<int16_t>(v); return v == static_cast<int16_t>(v);
} }
inline int inline int
carry16(intptr_t v) carry16(target_intptr_t v)
{ {
return static_cast<int16_t>(v) < 0 ? 1 : 0; return static_cast<int16_t>(v) < 0 ? 1 : 0;
} }
@ -173,7 +173,8 @@ const unsigned AlignArguments = true;
#endif #endif
const unsigned StackAlignmentInBytes = 16; const unsigned StackAlignmentInBytes = 16;
const unsigned StackAlignmentInWords = StackAlignmentInBytes / BytesPerWord; const unsigned StackAlignmentInWords
= StackAlignmentInBytes / TargetBytesPerWord;
const int StackRegister = 1; const int StackRegister = 1;
const int ThreadRegister = 13; const int ThreadRegister = 13;
@ -380,7 +381,7 @@ updateOffset(System* s, uint8_t* instruction, bool conditional, int64_t value,
} }
int32_t* p = reinterpret_cast<int32_t*>(instruction); int32_t* p = reinterpret_cast<int32_t*>(instruction);
*p = (v & mask) | ((~mask) & *p); *p = targetV4((v & mask) | ((~mask) & targetV4(*p)));
return instruction + 4; return instruction + 4;
} }
@ -512,11 +513,11 @@ padding(MyBlock* b, unsigned offset)
for (JumpEvent* e = b->jumpEventHead; e; e = e->next) { for (JumpEvent* e = b->jumpEventHead; e; e = e->next) {
if (e->offset <= offset) { if (e->offset <= offset) {
for (JumpOffset* o = e->jumpOffsetHead; o; o = o->next) { for (JumpOffset* o = e->jumpOffsetHead; o; o = o->next) {
total += BytesPerWord; total += TargetBytesPerWord;
} }
if (needJump(b)) { if (needJump(b)) {
total += BytesPerWord; total += TargetBytesPerWord;
} }
} else { } else {
break; break;
@ -571,7 +572,7 @@ resolve(MyBlock* b)
if (b->next == 0 or b->next->jumpEventHead) { if (b->next == 0 or b->next->jumpEventHead) {
append = true; append = true;
} else { } else {
int32_t v = (b->start + b->size + b->next->size + BytesPerWord) int32_t v = (b->start + b->size + b->next->size + TargetBytesPerWord)
- (c->jumpOffsetHead->offset + c->jumpOffsetHead->block->start); - (c->jumpOffsetHead->offset + c->jumpOffsetHead->block->start);
append = not bounded(2, 16, v); append = not bounded(2, 16, v);
@ -661,7 +662,7 @@ branchIndex(ArchitectureContext* c UNUSED, OperandType operand1,
using namespace isa; using namespace isa;
inline void emit(Context* con, int code) { con->code.append4(code); } inline void emit(Context* con, int code) { con->code.append4(targetV4(code)); }
inline int newTemp(Context* con) { return con->client->acquireTemporary(); } inline int newTemp(Context* con) { return con->client->acquireTemporary(); }
inline void freeTemp(Context* con, int r) { con->client->releaseTemporary(r); } inline void freeTemp(Context* con, int r) { con->client->releaseTemporary(r); }
inline int64_t getValue(Assembler::Constant* c) { return c->value->value(); } inline int64_t getValue(Assembler::Constant* c) { return c->value->value(); }
@ -895,7 +896,7 @@ appendConstantPoolEntry(Context* c, Promise* constant)
void void
jumpR(Context* c, unsigned size UNUSED, Assembler::Register* target) jumpR(Context* c, unsigned size UNUSED, Assembler::Register* target)
{ {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
emit(c, mtctr(target->low)); emit(c, mtctr(target->low));
emit(c, bctr()); emit(c, bctr());
@ -905,8 +906,8 @@ void
swapRR(Context* c, unsigned aSize, Assembler::Register* a, swapRR(Context* c, unsigned aSize, Assembler::Register* a,
unsigned bSize, Assembler::Register* b) unsigned bSize, Assembler::Register* b)
{ {
assert(c, aSize == BytesPerWord); assert(c, aSize == TargetBytesPerWord);
assert(c, bSize == BytesPerWord); assert(c, bSize == TargetBytesPerWord);
Assembler::Register tmp(c->client->acquireTemporary()); Assembler::Register tmp(c->client->acquireTemporary());
moveRR(c, aSize, a, bSize, &tmp); moveRR(c, aSize, a, bSize, &tmp);
@ -985,7 +986,7 @@ moveCR2(Context* c, unsigned, Assembler::Constant* src,
} }
} else { } else {
appendImmediateTask appendImmediateTask
(c, src->value, offset(c), BytesPerWord, promiseOffset, false); (c, src->value, offset(c), TargetBytesPerWord, promiseOffset, false);
emit(c, lis(dst->low, 0)); emit(c, lis(dst->low, 0));
emit(c, ori(dst->low, dst->low, 0)); emit(c, ori(dst->low, dst->low, 0));
} }
@ -1011,7 +1012,7 @@ void addR(Context* con, unsigned size, Assembler::Register* a, Assembler::Regist
} }
void addC(Context* con, unsigned size, Assembler::Constant* a, Assembler::Register* b, Assembler::Register* t) { void addC(Context* con, unsigned size, Assembler::Constant* a, Assembler::Register* b, Assembler::Register* t) {
assert(con, size == BytesPerWord); assert(con, size == TargetBytesPerWord);
int32_t i = getValue(a); int32_t i = getValue(a);
if(i) { if(i) {
@ -1033,7 +1034,7 @@ void subR(Context* con, unsigned size, Assembler::Register* a, Assembler::Regist
} }
void subC(Context* c, unsigned size, Assembler::Constant* a, Assembler::Register* b, Assembler::Register* t) { void subC(Context* c, unsigned size, Assembler::Constant* a, Assembler::Register* b, Assembler::Register* t) {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
ResolvedPromise promise(- a->value->value()); ResolvedPromise promise(- a->value->value());
Assembler::Constant constant(&promise); Assembler::Constant constant(&promise);
@ -1113,7 +1114,7 @@ normalize(Context* c, int offset, int index, unsigned scale,
ResolvedPromise scalePromise(log(scale)); ResolvedPromise scalePromise(log(scale));
Assembler::Constant scaleConstant(&scalePromise); Assembler::Constant scaleConstant(&scalePromise);
shiftLeftC(c, BytesPerWord, &scaleConstant, shiftLeftC(c, TargetBytesPerWord, &scaleConstant,
&unscaledIndex, &normalizedIndex); &unscaledIndex, &normalizedIndex);
scaled = normalizedIndex.low; scaled = normalizedIndex.low;
@ -1127,7 +1128,7 @@ normalize(Context* c, int offset, int index, unsigned scale,
ResolvedPromise offsetPromise(offset); ResolvedPromise offsetPromise(offset);
Assembler::Constant offsetConstant(&offsetPromise); Assembler::Constant offsetConstant(&offsetPromise);
addC(c, BytesPerWord, &offsetConstant, addC(c, TargetBytesPerWord, &offsetConstant,
&untranslatedIndex, &normalizedIndex); &untranslatedIndex, &normalizedIndex);
} }
@ -1208,8 +1209,8 @@ void
moveAndUpdateRM(Context* c, unsigned srcSize UNUSED, Assembler::Register* src, moveAndUpdateRM(Context* c, unsigned srcSize UNUSED, Assembler::Register* src,
unsigned dstSize UNUSED, Assembler::Memory* dst) unsigned dstSize UNUSED, Assembler::Memory* dst)
{ {
assert(c, srcSize == BytesPerWord); assert(c, srcSize == TargetBytesPerWord);
assert(c, dstSize == BytesPerWord); assert(c, dstSize == TargetBytesPerWord);
if (dst->index == NoRegister) { if (dst->index == NoRegister) {
emit(c, stwu(src->low, dst->base, dst->offset)); emit(c, stwu(src->low, dst->base, dst->offset));
@ -1512,7 +1513,7 @@ moveAR2(Context* c, unsigned srcSize UNUSED, Assembler::Address* src,
Assembler::Memory memory(dst->low, 0, -1, 0); Assembler::Memory memory(dst->low, 0, -1, 0);
appendImmediateTask appendImmediateTask
(c, src->address, offset(c), BytesPerWord, promiseOffset, true); (c, src->address, offset(c), TargetBytesPerWord, promiseOffset, true);
emit(c, lis(dst->low, 0)); emit(c, lis(dst->low, 0));
moveMR(c, dstSize, &memory, dstSize, dst); moveMR(c, dstSize, &memory, dstSize, dst);
@ -1722,7 +1723,7 @@ branchRR(Context* c, TernaryOperation op, unsigned size,
Assembler::Register* a, Assembler::Register* b, Assembler::Register* a, Assembler::Register* b,
Assembler::Constant* target) Assembler::Constant* target)
{ {
if (size > BytesPerWord) { if (size > TargetBytesPerWord) {
Assembler::Register ah(a->high); Assembler::Register ah(a->high);
Assembler::Register bh(b->high); Assembler::Register bh(b->high);
@ -1739,13 +1740,13 @@ branchCR(Context* c, TernaryOperation op, unsigned size,
Assembler::Constant* a, Assembler::Register* b, Assembler::Constant* a, Assembler::Register* b,
Assembler::Constant* target) Assembler::Constant* target)
{ {
if (size > BytesPerWord) { if (size > TargetBytesPerWord) {
int64_t v = a->value->value(); int64_t v = a->value->value();
ResolvedPromise low(v & ~static_cast<uintptr_t>(0)); ResolvedPromise low(v & ~static_cast<target_uintptr_t>(0));
Assembler::Constant al(&low); Assembler::Constant al(&low);
ResolvedPromise high((v >> 32) & ~static_cast<uintptr_t>(0)); ResolvedPromise high((v >> 32) & ~static_cast<target_uintptr_t>(0));
Assembler::Constant ah(&high); Assembler::Constant ah(&high);
Assembler::Register bh(b->high); Assembler::Register bh(b->high);
@ -1763,7 +1764,7 @@ branchRM(Context* c, TernaryOperation op, unsigned size,
Assembler::Register* a, Assembler::Memory* b, Assembler::Register* a, Assembler::Memory* b,
Assembler::Constant* target) Assembler::Constant* target)
{ {
assert(c, size <= BytesPerWord); assert(c, size <= TargetBytesPerWord);
compareRM(c, size, a, size, b); compareRM(c, size, a, size, b);
branch(c, op, target); branch(c, op, target);
@ -1774,7 +1775,7 @@ branchCM(Context* c, TernaryOperation op, unsigned size,
Assembler::Constant* a, Assembler::Memory* b, Assembler::Constant* a, Assembler::Memory* b,
Assembler::Constant* target) Assembler::Constant* target)
{ {
assert(c, size <= BytesPerWord); assert(c, size <= TargetBytesPerWord);
compareCM(c, size, a, size, b); compareCM(c, size, a, size, b);
branch(c, op, target); branch(c, op, target);
@ -1832,7 +1833,7 @@ negateRR(Context* c, unsigned srcSize, Assembler::Register* src,
void void
callR(Context* c, unsigned size UNUSED, Assembler::Register* target) callR(Context* c, unsigned size UNUSED, Assembler::Register* target)
{ {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
emit(c, mtctr(target->low)); emit(c, mtctr(target->low));
emit(c, bctrl()); emit(c, bctrl());
@ -1841,7 +1842,7 @@ callR(Context* c, unsigned size UNUSED, Assembler::Register* target)
void void
callC(Context* c, unsigned size UNUSED, Assembler::Constant* target) callC(Context* c, unsigned size UNUSED, Assembler::Constant* target)
{ {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
appendOffsetTask(c, target->value, offset(c), false); appendOffsetTask(c, target->value, offset(c), false);
emit(c, bl(0)); emit(c, bl(0));
@ -1850,51 +1851,51 @@ callC(Context* c, unsigned size UNUSED, Assembler::Constant* target)
void void
longCallC(Context* c, unsigned size UNUSED, Assembler::Constant* target) longCallC(Context* c, unsigned size UNUSED, Assembler::Constant* target)
{ {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
Assembler::Register tmp(0); Assembler::Register tmp(0);
moveCR2(c, BytesPerWord, target, BytesPerWord, &tmp, 12); moveCR2(c, TargetBytesPerWord, target, TargetBytesPerWord, &tmp, 12);
callR(c, BytesPerWord, &tmp); callR(c, TargetBytesPerWord, &tmp);
} }
void void
alignedLongCallC(Context* c, unsigned size UNUSED, Assembler::Constant* target) alignedLongCallC(Context* c, unsigned size UNUSED, Assembler::Constant* target)
{ {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
Assembler::Register tmp(c->client->acquireTemporary()); Assembler::Register tmp(c->client->acquireTemporary());
Assembler::Address address(appendConstantPoolEntry(c, target->value)); Assembler::Address address(appendConstantPoolEntry(c, target->value));
moveAR2(c, BytesPerWord, &address, BytesPerWord, &tmp, 12); moveAR2(c, TargetBytesPerWord, &address, TargetBytesPerWord, &tmp, 12);
callR(c, BytesPerWord, &tmp); callR(c, TargetBytesPerWord, &tmp);
c->client->releaseTemporary(tmp.low); c->client->releaseTemporary(tmp.low);
} }
void void
longJumpC(Context* c, unsigned size UNUSED, Assembler::Constant* target) longJumpC(Context* c, unsigned size UNUSED, Assembler::Constant* target)
{ {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
Assembler::Register tmp(0); Assembler::Register tmp(0);
moveCR2(c, BytesPerWord, target, BytesPerWord, &tmp, 12); moveCR2(c, TargetBytesPerWord, target, TargetBytesPerWord, &tmp, 12);
jumpR(c, BytesPerWord, &tmp); jumpR(c, TargetBytesPerWord, &tmp);
} }
void void
alignedLongJumpC(Context* c, unsigned size UNUSED, Assembler::Constant* target) alignedLongJumpC(Context* c, unsigned size UNUSED, Assembler::Constant* target)
{ {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
Assembler::Register tmp(c->client->acquireTemporary()); Assembler::Register tmp(c->client->acquireTemporary());
Assembler::Address address(appendConstantPoolEntry(c, target->value)); Assembler::Address address(appendConstantPoolEntry(c, target->value));
moveAR2(c, BytesPerWord, &address, BytesPerWord, &tmp, 12); moveAR2(c, TargetBytesPerWord, &address, TargetBytesPerWord, &tmp, 12);
jumpR(c, BytesPerWord, &tmp); jumpR(c, TargetBytesPerWord, &tmp);
c->client->releaseTemporary(tmp.low); c->client->releaseTemporary(tmp.low);
} }
void void
jumpC(Context* c, unsigned size UNUSED, Assembler::Constant* target) jumpC(Context* c, unsigned size UNUSED, Assembler::Constant* target)
{ {
assert(c, size == BytesPerWord); assert(c, size == TargetBytesPerWord);
appendOffsetTask(c, target->value, offset(c), false); appendOffsetTask(c, target->value, offset(c), false);
emit(c, b(0)); emit(c, b(0));
@ -2088,7 +2089,7 @@ class MyArchitecture: public Assembler::Architecture {
} }
virtual int returnHigh() { virtual int returnHigh() {
return (BytesPerWord == 4 ? 3 : NoRegister); return (TargetBytesPerWord == 4 ? 3 : NoRegister);
} }
virtual int virtualCallTarget() { virtual int virtualCallTarget() {
@ -2180,9 +2181,9 @@ class MyArchitecture: public Assembler::Architecture {
case LongCall: case LongCall:
case LongJump: { case LongJump: {
updateImmediate(c.s, static_cast<uint8_t*>(returnAddress) - 12, updateImmediate
reinterpret_cast<intptr_t>(newTarget), BytesPerWord, (c.s, static_cast<uint8_t*>(returnAddress) - 12,
false); reinterpret_cast<intptr_t>(newTarget), TargetBytesPerWord, false);
} break; } break;
case AlignedLongCall: case AlignedLongCall:
@ -2200,12 +2201,12 @@ class MyArchitecture: public Assembler::Architecture {
return 4; return 4;
} }
virtual void setConstant(void* dst, uintptr_t constant) { virtual void setConstant(void* dst, uint64_t constant) {
updateImmediate(c.s, dst, constant, BytesPerWord, false); updateImmediate(c.s, dst, constant, TargetBytesPerWord, false);
} }
virtual unsigned alignFrameSize(unsigned sizeInWords) { virtual unsigned alignFrameSize(unsigned sizeInWords) {
const unsigned alignment = StackAlignmentInBytes / BytesPerWord; const unsigned alignment = StackAlignmentInWords;
return (ceiling(sizeInWords + FrameFooterSize, alignment) * alignment); return (ceiling(sizeInWords + FrameFooterSize, alignment) * alignment);
} }
@ -2369,7 +2370,7 @@ class MyArchitecture: public Assembler::Architecture {
// need to do the checks ourselves. Using an inline check // need to do the checks ourselves. Using an inline check
// should be faster than calling an out-of-line thunk, but the // should be faster than calling an out-of-line thunk, but the
// thunk is easier, so they's what we do for now. // thunk is easier, so they's what we do for now.
if (true) {//if (BytesPerWord == 4 and aSize == 8) { if (true) {//if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true; *thunk = true;
} else { } else {
*aTypeMask = (1 << RegisterOperand); *aTypeMask = (1 << RegisterOperand);
@ -2451,7 +2452,7 @@ class MyAssembler: public Assembler {
Constant handlerConstant Constant handlerConstant
(new (c.zone->allocate(sizeof(ResolvedPromise))) (new (c.zone->allocate(sizeof(ResolvedPromise)))
ResolvedPromise(handler)); ResolvedPromise(handler));
branchRM(&c, JumpIfGreaterOrEqual, BytesPerWord, &stack, &stackLimit, branchRM(&c, JumpIfGreaterOrEqual, TargetBytesPerWord, &stack, &stackLimit,
&handlerConstant); &handlerConstant);
} }
@ -2459,12 +2460,14 @@ class MyAssembler: public Assembler {
Register returnAddress(0); Register returnAddress(0);
emit(&c, mflr(returnAddress.low)); emit(&c, mflr(returnAddress.low));
Memory returnAddressDst(StackRegister, ReturnAddressOffset * BytesPerWord); Memory returnAddressDst
moveRM(&c, BytesPerWord, &returnAddress, BytesPerWord, &returnAddressDst); (StackRegister, ReturnAddressOffset * TargetBytesPerWord);
moveRM(&c, TargetBytesPerWord, &returnAddress, TargetBytesPerWord,
&returnAddressDst);
Register stack(StackRegister); Register stack(StackRegister);
Memory stackDst(ThreadRegister, stackOffset); Memory stackDst(ThreadRegister, stackOffset);
moveRM(&c, BytesPerWord, &stack, BytesPerWord, &stackDst); moveRM(&c, TargetBytesPerWord, &stack, TargetBytesPerWord, &stackDst);
} }
virtual void pushFrame(unsigned argumentCount, ...) { virtual void pushFrame(unsigned argumentCount, ...) {
@ -2480,7 +2483,7 @@ class MyAssembler: public Assembler {
arguments[i].size = va_arg(a, unsigned); arguments[i].size = va_arg(a, unsigned);
arguments[i].type = static_cast<OperandType>(va_arg(a, int)); arguments[i].type = static_cast<OperandType>(va_arg(a, int));
arguments[i].operand = va_arg(a, Operand*); arguments[i].operand = va_arg(a, Operand*);
footprint += ceiling(arguments[i].size, BytesPerWord); footprint += ceiling(arguments[i].size, TargetBytesPerWord);
} }
va_end(a); va_end(a);
@ -2493,17 +2496,19 @@ class MyAssembler: public Assembler {
apply(Move, apply(Move,
arguments[i].size, arguments[i].type, arguments[i].operand, arguments[i].size, arguments[i].type, arguments[i].operand,
pad(arguments[i].size), RegisterOperand, &dst); pad(arguments[i].size, TargetBytesPerWord), RegisterOperand,
&dst);
offset += ceiling(arguments[i].size, BytesPerWord); offset += ceiling(arguments[i].size, TargetBytesPerWord);
} else { } else {
Memory dst(ThreadRegister, (offset + FrameFooterSize) * BytesPerWord); Memory dst
(ThreadRegister, (offset + FrameFooterSize) * TargetBytesPerWord);
apply(Move, apply(Move,
arguments[i].size, arguments[i].type, arguments[i].operand, arguments[i].size, arguments[i].type, arguments[i].operand,
pad(arguments[i].size), MemoryOperand, &dst); pad(arguments[i].size, TargetBytesPerWord), MemoryOperand, &dst);
offset += ceiling(arguments[i].size, BytesPerWord); offset += ceiling(arguments[i].size, TargetBytesPerWord);
} }
} }
} }
@ -2512,31 +2517,37 @@ class MyAssembler: public Assembler {
Register returnAddress(0); Register returnAddress(0);
emit(&c, mflr(returnAddress.low)); emit(&c, mflr(returnAddress.low));
Memory returnAddressDst(StackRegister, ReturnAddressOffset * BytesPerWord); Memory returnAddressDst
moveRM(&c, BytesPerWord, &returnAddress, BytesPerWord, &returnAddressDst); (StackRegister, ReturnAddressOffset * TargetBytesPerWord);
moveRM(&c, TargetBytesPerWord, &returnAddress, TargetBytesPerWord,
&returnAddressDst);
Register stack(StackRegister); Register stack(StackRegister);
Memory stackDst(StackRegister, -footprint * BytesPerWord); Memory stackDst(StackRegister, -footprint * TargetBytesPerWord);
moveAndUpdateRM(&c, BytesPerWord, &stack, BytesPerWord, &stackDst); moveAndUpdateRM
(&c, TargetBytesPerWord, &stack, TargetBytesPerWord, &stackDst);
} }
virtual void adjustFrame(unsigned difference) { virtual void adjustFrame(unsigned difference) {
Register nextStack(0); Register nextStack(0);
Memory stackSrc(StackRegister, 0); Memory stackSrc(StackRegister, 0);
moveMR(&c, BytesPerWord, &stackSrc, BytesPerWord, &nextStack); moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &nextStack);
Memory stackDst(StackRegister, -difference * BytesPerWord); Memory stackDst(StackRegister, -difference * TargetBytesPerWord);
moveAndUpdateRM(&c, BytesPerWord, &nextStack, BytesPerWord, &stackDst); moveAndUpdateRM
(&c, TargetBytesPerWord, &nextStack, TargetBytesPerWord, &stackDst);
} }
virtual void popFrame(unsigned) { virtual void popFrame(unsigned) {
Register stack(StackRegister); Register stack(StackRegister);
Memory stackSrc(StackRegister, 0); Memory stackSrc(StackRegister, 0);
moveMR(&c, BytesPerWord, &stackSrc, BytesPerWord, &stack); moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &stack);
Register returnAddress(0); Register returnAddress(0);
Memory returnAddressSrc(StackRegister, ReturnAddressOffset * BytesPerWord); Memory returnAddressSrc
moveMR(&c, BytesPerWord, &returnAddressSrc, BytesPerWord, &returnAddress); (StackRegister, ReturnAddressOffset * TargetBytesPerWord);
moveMR(&c, TargetBytesPerWord, &returnAddressSrc, TargetBytesPerWord,
&returnAddress);
emit(&c, mtlr(returnAddress.low)); emit(&c, mtlr(returnAddress.low));
} }
@ -2550,32 +2561,37 @@ class MyAssembler: public Assembler {
if (offset) { if (offset) {
Register tmp(0); Register tmp(0);
Memory returnAddressSrc Memory returnAddressSrc
(StackRegister, (ReturnAddressOffset + footprint) * BytesPerWord); (StackRegister, (ReturnAddressOffset + footprint)
moveMR(&c, BytesPerWord, &returnAddressSrc, BytesPerWord, &tmp); * TargetBytesPerWord);
moveMR(&c, TargetBytesPerWord, &returnAddressSrc, TargetBytesPerWord,
&tmp);
emit(&c, mtlr(tmp.low)); emit(&c, mtlr(tmp.low));
Memory stackSrc(StackRegister, footprint * BytesPerWord); Memory stackSrc(StackRegister, footprint * TargetBytesPerWord);
moveMR(&c, BytesPerWord, &stackSrc, BytesPerWord, &tmp); moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &tmp);
Memory stackDst(StackRegister, (footprint - offset) * BytesPerWord); Memory stackDst
moveAndUpdateRM(&c, BytesPerWord, &tmp, BytesPerWord, &stackDst); (StackRegister, (footprint - offset) * TargetBytesPerWord);
moveAndUpdateRM
(&c, TargetBytesPerWord, &tmp, TargetBytesPerWord, &stackDst);
if (returnAddressSurrogate != NoRegister) { if (returnAddressSurrogate != NoRegister) {
assert(&c, offset > 0); assert(&c, offset > 0);
Register ras(returnAddressSurrogate); Register ras(returnAddressSurrogate);
Memory dst Memory dst
(StackRegister, (ReturnAddressOffset + offset) * BytesPerWord); (StackRegister, (ReturnAddressOffset + offset)
moveRM(&c, BytesPerWord, &ras, BytesPerWord, &dst); * TargetBytesPerWord);
moveRM(&c, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
} }
if (framePointerSurrogate != NoRegister) { if (framePointerSurrogate != NoRegister) {
assert(&c, offset > 0); assert(&c, offset > 0);
Register fps(framePointerSurrogate); Register fps(framePointerSurrogate);
Memory dst(StackRegister, offset * BytesPerWord); Memory dst(StackRegister, offset * TargetBytesPerWord);
moveRM(&c, BytesPerWord, &fps, BytesPerWord, &dst); moveRM(&c, TargetBytesPerWord, &fps, TargetBytesPerWord, &dst);
} }
} else { } else {
popFrame(footprint); popFrame(footprint);
@ -2596,12 +2612,13 @@ class MyAssembler: public Assembler {
if (TailCalls and argumentFootprint > StackAlignmentInWords) { if (TailCalls and argumentFootprint > StackAlignmentInWords) {
Register tmp(0); Register tmp(0);
Memory stackSrc(StackRegister, 0); Memory stackSrc(StackRegister, 0);
moveMR(&c, BytesPerWord, &stackSrc, BytesPerWord, &tmp); moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &tmp);
Memory stackDst(StackRegister, Memory stackDst(StackRegister,
(argumentFootprint - StackAlignmentInWords) (argumentFootprint - StackAlignmentInWords)
* BytesPerWord); * TargetBytesPerWord);
moveAndUpdateRM(&c, BytesPerWord, &tmp, BytesPerWord, &stackDst); moveAndUpdateRM
(&c, TargetBytesPerWord, &tmp, TargetBytesPerWord, &stackDst);
} }
return_(&c); return_(&c);
@ -2614,17 +2631,18 @@ class MyAssembler: public Assembler {
Register tmp1(0); Register tmp1(0);
Memory stackSrc(StackRegister, 0); Memory stackSrc(StackRegister, 0);
moveMR(&c, BytesPerWord, &stackSrc, BytesPerWord, &tmp1); moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &tmp1);
Register tmp2(5); Register tmp2(5);
Memory newStackSrc(ThreadRegister, stackOffsetFromThread); Memory newStackSrc(ThreadRegister, stackOffsetFromThread);
moveMR(&c, BytesPerWord, &newStackSrc, BytesPerWord, &tmp2); moveMR(&c, TargetBytesPerWord, &newStackSrc, TargetBytesPerWord, &tmp2);
Register stack(StackRegister); Register stack(StackRegister);
subR(&c, BytesPerWord, &stack, &tmp2, &tmp2); subR(&c, TargetBytesPerWord, &stack, &tmp2, &tmp2);
Memory stackDst(StackRegister, 0, tmp2.low); Memory stackDst(StackRegister, 0, tmp2.low);
moveAndUpdateRM(&c, BytesPerWord, &tmp1, BytesPerWord, &stackDst); moveAndUpdateRM
(&c, TargetBytesPerWord, &tmp1, TargetBytesPerWord, &stackDst);
return_(&c); return_(&c);
} }
@ -2657,7 +2675,7 @@ class MyAssembler: public Assembler {
{ {
if (isBranch(op)) { if (isBranch(op)) {
assert(&c, aSize == bSize); assert(&c, aSize == bSize);
assert(&c, cSize == BytesPerWord); assert(&c, cSize == TargetBytesPerWord);
assert(&c, cType == ConstantOperand); assert(&c, cType == ConstantOperand);
arch_->c.branchOperations[branchIndex(&(arch_->c), aType, bType)] arch_->c.branchOperations[branchIndex(&(arch_->c), aType, bType)]
@ -2701,21 +2719,21 @@ class MyAssembler: public Assembler {
uint8_t* address = dst + dstOffset + jumpTableSize; uint8_t* address = dst + dstOffset + jumpTableSize;
if (needJump(b)) { if (needJump(b)) {
address += BytesPerWord; address += TargetBytesPerWord;
} }
o->task->jumpAddress = address; o->task->jumpAddress = address;
jumpTableSize += BytesPerWord; jumpTableSize += TargetBytesPerWord;
} }
assert(&c, jumpTableSize); assert(&c, jumpTableSize);
if (needJump(b)) { if (needJump(b)) {
write4(dst + dstOffset, ::b(jumpTableSize + BytesPerWord)); write4(dst + dstOffset, ::b(jumpTableSize + TargetBytesPerWord));
} }
dstOffset += jumpTableSize + BytesPerWord; dstOffset += jumpTableSize + TargetBytesPerWord;
} }
unsigned size = b->size - blockOffset; unsigned size = b->size - blockOffset;
@ -2728,10 +2746,10 @@ class MyAssembler: public Assembler {
} }
unsigned index = c.code.length(); unsigned index = c.code.length();
assert(&c, index % BytesPerWord == 0); assert(&c, index % TargetBytesPerWord == 0);
for (ConstantPoolEntry* e = c.constantPool; e; e = e->next) { for (ConstantPoolEntry* e = c.constantPool; e; e = e->next) {
e->address = dst + index; e->address = dst + index;
index += BytesPerWord; index += TargetBytesPerWord;
} }
for (Task* t = c.tasks; t; t = t->next) { for (Task* t = c.tasks; t; t = t->next) {
@ -2764,7 +2782,7 @@ class MyAssembler: public Assembler {
MyBlock* b = c.lastBlock; MyBlock* b = c.lastBlock;
unsigned thisEventOffset = c.code.length() - b->offset; unsigned thisEventOffset = c.code.length() - b->offset;
if (b->jumpOffsetHead) { if (b->jumpOffsetHead) {
int32_t v = (thisEventOffset + BytesPerWord) int32_t v = (thisEventOffset + TargetBytesPerWord)
- b->jumpOffsetHead->offset; - b->jumpOffsetHead->offset;
if (v > 0 and not bounded(2, 16, v)) { if (v > 0 and not bounded(2, 16, v)) {
@ -2799,7 +2817,7 @@ class MyAssembler: public Assembler {
} }
virtual unsigned footerSize() { virtual unsigned footerSize() {
return c.constantPoolCount * BytesPerWord; return c.constantPoolCount * TargetBytesPerWord;
} }
virtual void dispose() { virtual void dispose() {

View File

@ -11,37 +11,81 @@
#ifndef TARGET_H #ifndef TARGET_H
#define TARGET_H #define TARGET_H
#define TARGET_V1(v) (v) namespace vm {
template <class T>
inline T
targetV1(T v)
{
return v;
}
#ifdef TARGET_OPPOSITE_ENDIAN #ifdef TARGET_OPPOSITE_ENDIAN
# define TARGET_V2(v) \
((((v) >> 8) & 0xFF) | \
(((v) << 8)))
# define TARGET_V4(v) \
((((v) >> 24) & 0x000000FF) | \
(((v) >> 8) & 0x0000FF00) | \
(((v) << 8) & 0x00FF0000) | \
(((v) << 24)))
# define TARGET_V8(v) \
(((static_cast<uint64_t>(v) >> 56) & UINT64_C(0x00000000000000FF)) | \
((static_cast<uint64_t>(v) >> 40) & UINT64_C(0x000000000000FF00)) | \
((static_cast<uint64_t>(v) >> 24) & UINT64_C(0x0000000000FF0000)) | \
((static_cast<uint64_t>(v) >> 8) & UINT64_C(0x00000000FF000000)) | \
((static_cast<uint64_t>(v) << 8) & UINT64_C(0x000000FF00000000)) | \
((static_cast<uint64_t>(v) << 24) & UINT64_C(0x0000FF0000000000)) | \
((static_cast<uint64_t>(v) << 40) & UINT64_C(0x00FF000000000000)) | \
((static_cast<uint64_t>(v) << 56)))
#else
# define TARGET_V2(v) (v)
# define TARGET_V4(v) (v)
# define TARGET_V8(v) (v)
#endif
namespace vm { template <class T>
inline T
targetV2(T v)
{
return (((v >> 8) & 0xFF) |
((v << 8)));
}
template <class T>
inline T
targetV4(T v)
{
return (((v >> 24) & 0x000000FF) |
((v >> 8) & 0x0000FF00) |
((v << 8) & 0x00FF0000) |
((v << 24)));
}
template <class T>
inline T
targetV8(T v)
{
return (((static_cast<uint64_t>(v) >> 56) & UINT64_C(0x00000000000000FF)) |
((static_cast<uint64_t>(v) >> 40) & UINT64_C(0x000000000000FF00)) |
((static_cast<uint64_t>(v) >> 24) & UINT64_C(0x0000000000FF0000)) |
((static_cast<uint64_t>(v) >> 8) & UINT64_C(0x00000000FF000000)) |
((static_cast<uint64_t>(v) << 8) & UINT64_C(0x000000FF00000000)) |
((static_cast<uint64_t>(v) << 24) & UINT64_C(0x0000FF0000000000)) |
((static_cast<uint64_t>(v) << 40) & UINT64_C(0x00FF000000000000)) |
((static_cast<uint64_t>(v) << 56)));
}
#else
template <class T>
inline T
targetV2(T v)
{
return v;
}
template <class T>
inline T
targetV4(T v)
{
return v;
}
template <class T>
inline T
targetV8(T v)
{
return v;
}
#endif
#ifdef TARGET_BYTES_PER_WORD #ifdef TARGET_BYTES_PER_WORD
# if (TARGET_BYTES_PER_WORD == 8) # if (TARGET_BYTES_PER_WORD == 8)
# define TARGET_VW(v) TARGET_V8(v)
template <class T>
inline T
targetVW(T v)
{
return targetV8(v);
}
typedef uint64_t target_uintptr_t; typedef uint64_t target_uintptr_t;
typedef int64_t target_intptr_t; typedef int64_t target_intptr_t;
@ -62,7 +106,13 @@ const unsigned TargetClassVtable = 128;
const unsigned TargetFieldOffset = 12; const unsigned TargetFieldOffset = 12;
# elif (TARGET_BYTES_PER_WORD == 4) # elif (TARGET_BYTES_PER_WORD == 4)
# define TARGET_VW(v) TARGET_V4(v)
template <class T>
inline T
targetVW(T v)
{
return targetV4(v);
}
typedef uint32_t target_uintptr_t; typedef uint32_t target_uintptr_t;
typedef int32_t target_intptr_t; typedef int32_t target_intptr_t;
@ -98,6 +148,13 @@ const uintptr_t TargetPointerMask
const unsigned TargetArrayLength = TargetBytesPerWord; const unsigned TargetArrayLength = TargetBytesPerWord;
const unsigned TargetArrayBody = TargetBytesPerWord * 2; const unsigned TargetArrayBody = TargetBytesPerWord * 2;
inline void
targetMarkBit(target_uintptr_t* map, unsigned i)
{
map[wordOf<target_uintptr_t>(i)] |=
targetVW(static_cast<target_uintptr_t>(1) << bitOf<target_uintptr_t>(i));
}
} // namespace vm } // namespace vm
#endif//TARGET_H #endif//TARGET_H

View File

@ -2947,7 +2947,7 @@ class MyArchitecture: public Assembler::Architecture {
} }
virtual void setConstant(void* dst, uint64_t constant) { virtual void setConstant(void* dst, uint64_t constant) {
target_uintptr_t v = TARGET_VW(constant); target_uintptr_t v = targetVW(constant);
memcpy(dst, &v, TargetBytesPerWord); memcpy(dst, &v, TargetBytesPerWord);
} }