handle long conditional immediate branches properly on PowerPC

Due to encoding limitations, the immediate operand of conditional
branches can be no more than 32KB forward or backward.  Since the
JIT-compiled form of some methods can be larger than 32KB, and we also
do conditional jumps to code outside the current method in some cases,
we must work around this limitation.

The strategy of this commit is to provide inline, intermediate jump
tables where necessary.  A given conditional branch whose target is
too far for a direct jump will instead point to an unconditional
branch in the nearest jump table which points to the actual target.

Unconditional immediate branches are also limited on PowerPC, but this
limit is 32MB, which is not an impediment in practice.  If it does
become a problem, we'll need to encode such branches using multiple
instructions.
This commit is contained in:
Joel Dice 2011-02-27 23:03:13 -07:00
parent 5f50226ae0
commit 255fc9f9d3
7 changed files with 377 additions and 109 deletions

View File

@ -670,7 +670,9 @@ padding(MyBlock* b, unsigned offset)
unsigned total = 0;
for (PoolEvent* e = b->poolEventHead; e; e = e->next) {
if (e->offset <= offset) {
total += BytesPerWord;
if (b->next) {
total += BytesPerWord;
}
for (PoolOffset* o = e->poolOffsetHead; o; o = o->next) {
total += BytesPerWord;
}
@ -2333,9 +2335,12 @@ class MyAssembler: public Assembler {
}
}
virtual void writeTo(uint8_t* dst) {
virtual void setDestination(uint8_t* dst) {
c.result = dst;
}
virtual void write() {
uint8_t* dst = c.result;
unsigned dstOffset = 0;
for (MyBlock* b = c.firstBlock; b; b = b->next) {
if (DebugPool) {
@ -2356,10 +2361,12 @@ class MyAssembler: public Assembler {
o, o->offset, b);
}
poolSize += BytesPerWord;
unsigned entry = dstOffset + poolSize;
if (b->next) {
entry += BytesPerWord;
}
o->entry->address = dst + entry;
unsigned instruction = o->block->start
@ -2370,9 +2377,13 @@ class MyAssembler: public Assembler {
int32_t* p = reinterpret_cast<int32_t*>(dst + instruction);
*p = (v & PoolOffsetMask) | ((~PoolOffsetMask) & *p);
poolSize += BytesPerWord;
}
write4(dst + dstOffset, ::b((poolSize + BytesPerWord - 8) >> 2));
if (b->next) {
write4(dst + dstOffset, ::b((poolSize + BytesPerWord - 8) >> 2));
}
dstOffset += poolSize + BytesPerWord;
}

View File

@ -428,7 +428,9 @@ class Assembler {
unsigned bSize, OperandType bType, Operand* bOperand,
unsigned cSize, OperandType cType, Operand* cOperand) = 0;
virtual void writeTo(uint8_t* dst) = 0;
virtual void setDestination(uint8_t* dst) = 0;
virtual void write() = 0;
virtual Promise* offset(bool forTrace = false) = 0;

View File

@ -374,7 +374,7 @@ methodForIp(MyThread* t, void* ip)
// we must use a version of the method tree at least as recent as the
// compiled form of the method containing the specified address (see
// compile(MyThread*, Allocator*, BootContext*, object)):
// compile(MyThread*, FixedAllocator*, BootContext*, object)):
loadMemoryBarrier();
return treeQuery(t, root(t, MethodTree), reinterpret_cast<intptr_t>(ip),
@ -2229,7 +2229,7 @@ FixedAllocator*
codeAllocator(MyThread* t);
void
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
compile(MyThread* t, FixedAllocator* allocator, BootContext* bootContext,
object method);
int64_t
@ -5583,7 +5583,8 @@ finish(MyThread* t, Allocator* allocator, Assembler* a, const char* name,
{
uint8_t* start = static_cast<uint8_t*>(allocator->allocate(pad(length)));
a->writeTo(start);
a->setDestination(start);
a->write();
logCompile(t, start, length, 0, name, 0);
@ -5851,7 +5852,7 @@ makeSimpleFrameMapTable(MyThread* t, Context* context, uint8_t* start,
}
void
finish(MyThread* t, Allocator* allocator, Context* context)
finish(MyThread* t, FixedAllocator* allocator, Context* context)
{
Compiler* c = context->compiler;
@ -5885,9 +5886,13 @@ finish(MyThread* t, Allocator* allocator, Context* context)
// parallelism (the downside being that it may end up being a waste
// of cycles if another thread compiles the same method in parallel,
// which might be mitigated by fine-grained, per-method locking):
unsigned codeSize = c->compile
(context->leaf ? 0 : stackOverflowThunk(t),
difference(&(t->stackLimit), t));
c->compile(context->leaf ? 0 : stackOverflowThunk(t),
difference(&(t->stackLimit), t));
// we must acquire the class lock here at the latest
unsigned codeSize = c->resolve
(allocator->base + allocator->offset + BytesPerWord);
unsigned total = pad(codeSize) + pad(c->poolSize()) + BytesPerWord;
@ -5921,7 +5926,7 @@ finish(MyThread* t, Allocator* allocator, Context* context)
}
}
c->writeTo(start);
c->write();
BootContext* bc = context->bootContext;
if (bc) {
@ -8646,7 +8651,8 @@ compileThunks(MyThread* t, Allocator* allocator, MyProcessor* p)
uint8_t* start = p->thunks.table.start;
#define THUNK(s) \
tableContext.context.assembler->writeTo(start); \
tableContext.context.assembler->setDestination(start); \
tableContext.context.assembler->write(); \
start += p->thunks.table.length; \
{ void* call; \
tableContext.promise.listener->resolve \
@ -8737,7 +8743,8 @@ compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
uint8_t* start = static_cast<uint8_t*>(codeAllocator(t)->allocate(*size));
a->writeTo(start);
a->setDestination(start);
a->write();
logCompile(t, start, *size, 0, "virtualThunk", 0);
@ -8774,7 +8781,7 @@ virtualThunk(MyThread* t, unsigned index)
}
void
compile(MyThread* t, Allocator* allocator, BootContext* bootContext,
compile(MyThread* t, FixedAllocator* allocator, BootContext* bootContext,
object method)
{
PROTECT(t, method);

View File

@ -386,6 +386,7 @@ class Context {
lastEvent(0),
forkState(0),
subroutine(0),
firstBlock(0),
logicalIp(-1),
constantCount(0),
logicalCodeLength(0),
@ -432,6 +433,7 @@ class Context {
Event* lastEvent;
ForkState* forkState;
MySubroutine* subroutine;
Block* firstBlock;
int logicalIp;
unsigned constantCount;
unsigned logicalCodeLength;
@ -5706,7 +5708,7 @@ block(Context* c, Event* head)
return new (c->zone->allocate(sizeof(Block))) Block(head);
}
unsigned
void
compile(Context* c, uintptr_t stackOverflowHandler, unsigned stackLimitOffset)
{
if (c->logicalCode[c->logicalIp]->lastEvent == 0) {
@ -5836,19 +5838,7 @@ compile(Context* c, uintptr_t stackOverflowHandler, unsigned stackLimitOffset)
}
}
block = firstBlock;
while (block->nextBlock or block->nextInstruction) {
Block* next = block->nextBlock
? block->nextBlock
: block->nextInstruction->firstEvent->block;
next->start = block->assemblerBlock->resolve
(block->start, next->assemblerBlock);
block = next;
}
return block->assemblerBlock->resolve(block->start, 0) + a->footerSize();
c->firstBlock = firstBlock;
}
unsigned
@ -6883,25 +6873,43 @@ class MyCompiler: public Compiler {
appendBarrier(&c, StoreLoadBarrier);
}
virtual unsigned compile(uintptr_t stackOverflowHandler,
unsigned stackLimitOffset)
virtual void compile(uintptr_t stackOverflowHandler,
unsigned stackLimitOffset)
{
return c.machineCodeSize = local::compile
(&c, stackOverflowHandler, stackLimitOffset);
local::compile(&c, stackOverflowHandler, stackLimitOffset);
}
virtual unsigned resolve(uint8_t* dst) {
c.machineCode = dst;
c.assembler->setDestination(dst);
Block* block = c.firstBlock;
while (block->nextBlock or block->nextInstruction) {
Block* next = block->nextBlock
? block->nextBlock
: block->nextInstruction->firstEvent->block;
next->start = block->assemblerBlock->resolve
(block->start, next->assemblerBlock);
block = next;
}
return c.machineCodeSize = block->assemblerBlock->resolve
(block->start, 0) + c.assembler->footerSize();
}
virtual unsigned poolSize() {
return c.constantCount * BytesPerWord;
}
virtual void writeTo(uint8_t* dst) {
c.machineCode = dst;
c.assembler->writeTo(dst);
virtual void write() {
c.assembler->write();
int i = 0;
for (ConstantPoolNode* n = c.firstConstant; n; n = n->next) {
intptr_t* target = reinterpret_cast<intptr_t*>
(dst + pad(c.machineCodeSize) + i);
(c.machineCode + pad(c.machineCodeSize) + i);
if (n->promise->resolved()) {
*target = n->promise->value();

View File

@ -188,10 +188,11 @@ class Compiler {
virtual void storeStoreBarrier() = 0;
virtual void storeLoadBarrier() = 0;
virtual unsigned compile(uintptr_t stackOverflowHandler,
unsigned stackLimitOffset) = 0;
virtual void compile(uintptr_t stackOverflowHandler,
unsigned stackLimitOffset) = 0;
virtual unsigned resolve(uint8_t* dst) = 0;
virtual unsigned poolSize() = 0;
virtual void writeTo(uint8_t* dst) = 0;
virtual void write() = 0;
virtual void dispose() = 0;
};

View File

@ -178,20 +178,44 @@ const unsigned StackAlignmentInWords = StackAlignmentInBytes / BytesPerWord;
const int StackRegister = 1;
const int ThreadRegister = 13;
const bool DebugJumps = false;
class Context;
class MyBlock;
class JumpOffset;
class JumpEvent;
void
resolve(MyBlock*);
unsigned
padding(MyBlock*, unsigned);
class MyBlock: public Assembler::Block {
public:
MyBlock(unsigned offset):
next(0), offset(offset), start(~0), size(0)
MyBlock(Context* context, unsigned offset):
context(context), next(0), jumpOffsetHead(0), jumpOffsetTail(0),
lastJumpOffsetTail(0), jumpEventHead(0), jumpEventTail(0),
lastEventOffset(0), offset(offset), start(~0), size(0)
{ }
virtual unsigned resolve(unsigned start, Assembler::Block* next) {
this->start = start;
this->next = static_cast<MyBlock*>(next);
return start + size;
::resolve(this);
return start + size + padding(this, size);
}
Context* context;
MyBlock* next;
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
JumpOffset* lastJumpOffsetTail;
JumpEvent* jumpEventHead;
JumpEvent* jumpEventTail;
unsigned lastEventOffset;
unsigned offset;
unsigned start;
unsigned size;
@ -199,15 +223,14 @@ class MyBlock: public Assembler::Block {
class Task;
class ConstantPoolEntry;
class JumpPromise;
class Context {
public:
Context(System* s, Allocator* a, Zone* zone):
s(s), zone(zone), client(0), code(s, a, 1024), tasks(0), result(0),
firstBlock(new (zone->allocate(sizeof(MyBlock))) MyBlock(0)),
lastBlock(firstBlock), constantPool(0), jumps(0), constantPoolCount(0),
jumpCount(0)
firstBlock(new (zone->allocate(sizeof(MyBlock))) MyBlock(this, 0)),
lastBlock(firstBlock), jumpOffsetHead(0), jumpOffsetTail(0),
constantPool(0), constantPoolCount(0)
{ }
System* s;
@ -218,10 +241,10 @@ class Context {
uint8_t* result;
MyBlock* firstBlock;
MyBlock* lastBlock;
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
ConstantPoolEntry* constantPool;
JumpPromise* jumps;
unsigned constantPoolCount;
unsigned jumpCount;
};
class Task {
@ -324,38 +347,6 @@ offset(Context* c)
Offset(c, c->lastBlock, c->code.length());
}
class JumpPromise: public Promise {
public:
JumpPromise(Context* c, uintptr_t target):
c(c), target(target), next(c->jumps), index(c->jumpCount++)
{
c->jumps = this;
}
virtual bool resolved() {
return c->result != 0;
}
virtual int64_t value() {
assert(c, resolved());
return reinterpret_cast<intptr_t>
(c->result + c->code.length() + (index * BytesPerWord));
}
Context* c;
uintptr_t target;
JumpPromise* next;
unsigned index;
};
Promise*
jump(Context* c, uintptr_t target)
{
return new (c->zone->allocate(sizeof(JumpPromise)))
JumpPromise(c, target);
}
bool
bounded(int right, int left, int32_t v)
{
@ -363,13 +354,21 @@ bounded(int right, int left, int32_t v)
}
void*
updateOffset(System* s, uint8_t* instruction, bool conditional, int64_t value)
updateOffset(System* s, uint8_t* instruction, bool conditional, int64_t value,
void* jumpAddress)
{
int32_t v = reinterpret_cast<uint8_t*>(value) - instruction;
int32_t mask;
if (conditional) {
expect(s, bounded(2, 16, v));
if (not bounded(2, 16, v)) {
*static_cast<uint32_t*>(jumpAddress) = isa::b(0);
updateOffset(s, static_cast<uint8_t*>(jumpAddress), false, value, 0);
v = static_cast<uint8_t*>(jumpAddress) - instruction;
expect(s, bounded(2, 16, v));
}
mask = 0xFFFC;
} else {
expect(s, bounded(2, 6, v));
@ -384,20 +383,23 @@ updateOffset(System* s, uint8_t* instruction, bool conditional, int64_t value)
class OffsetListener: public Promise::Listener {
public:
OffsetListener(System* s, uint8_t* instruction, bool conditional):
OffsetListener(System* s, uint8_t* instruction, bool conditional,
void* jumpAddress):
s(s),
instruction(instruction),
jumpAddress(jumpAddress),
conditional(conditional)
{ }
virtual bool resolve(int64_t value, void** location) {
void* p = updateOffset(s, instruction, conditional, value);
void* p = updateOffset(s, instruction, conditional, value, jumpAddress);
if (location) *location = p;
return false;
}
System* s;
uint8_t* instruction;
void* jumpAddress;
bool conditional;
};
@ -408,6 +410,7 @@ class OffsetTask: public Task {
Task(next),
promise(promise),
instructionOffset(instructionOffset),
jumpAddress(0),
conditional(conditional)
{ }
@ -415,25 +418,181 @@ class OffsetTask: public Task {
if (promise->resolved()) {
updateOffset
(c->s, c->result + instructionOffset->value(), conditional,
promise->value());
promise->value(), jumpAddress);
} else {
new (promise->listen(sizeof(OffsetListener)))
OffsetListener(c->s, c->result + instructionOffset->value(),
conditional);
conditional, jumpAddress);
}
}
Promise* promise;
Promise* instructionOffset;
void* jumpAddress;
bool conditional;
};
class JumpOffset {
public:
JumpOffset(MyBlock* block, OffsetTask* task, unsigned offset):
block(block), task(task), next(0), offset(offset)
{ }
MyBlock* block;
OffsetTask* task;
JumpOffset* next;
unsigned offset;
};
class JumpEvent {
public:
JumpEvent(JumpOffset* jumpOffsetHead, JumpOffset* jumpOffsetTail,
unsigned offset):
jumpOffsetHead(jumpOffsetHead), jumpOffsetTail(jumpOffsetTail), next(0),
offset(offset)
{ }
JumpOffset* jumpOffsetHead;
JumpOffset* jumpOffsetTail;
JumpEvent* next;
unsigned offset;
};
void
appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
bool conditional)
{
c->tasks = new (c->zone->allocate(sizeof(OffsetTask))) OffsetTask
OffsetTask* task = new (c->zone->allocate(sizeof(OffsetTask))) OffsetTask
(c->tasks, promise, instructionOffset, conditional);
c->tasks = task;
if (conditional) {
JumpOffset* offset = new (c->zone->allocate(sizeof(JumpOffset))) JumpOffset
(c->lastBlock, task, c->code.length() - c->lastBlock->offset);
if (c->lastBlock->jumpOffsetTail) {
c->lastBlock->jumpOffsetTail->next = offset;
} else {
c->lastBlock->jumpOffsetHead = offset;
}
c->lastBlock->jumpOffsetTail = offset;
}
}
void
appendJumpEvent(Context* c, MyBlock* b, unsigned offset, JumpOffset* head,
JumpOffset* tail)
{
JumpEvent* e = new (c->zone->allocate(sizeof(JumpEvent))) JumpEvent
(head, tail, offset);
if (b->jumpEventTail) {
b->jumpEventTail->next = e;
} else {
b->jumpEventHead = e;
}
b->jumpEventTail = e;
}
unsigned
padding(MyBlock* b, unsigned offset)
{
unsigned total = 0;
for (JumpEvent** e = &(b->jumpEventHead); *e;) {
if ((*e)->offset <= offset) {
for (JumpOffset** o = &((*e)->jumpOffsetHead); *o;) {
if ((*o)->task->promise->resolved()
and (*o)->task->instructionOffset->resolved())
{
int32_t v = reinterpret_cast<uint8_t*>((*o)->task->promise->value())
- (b->context->result + (*o)->task->instructionOffset->value());
if (bounded(2, 16, v)) {
// this conditional jump needs no indirection -- a direct
// jump will suffice
*o = (*o)->next;
continue;
}
}
total += BytesPerWord;
o = &((*o)->next);
}
if ((*e)->jumpOffsetHead == 0) {
*e = (*e)->next;
} else {
if (b->next) {
total += BytesPerWord;
}
e = &((*e)->next);
}
} else {
break;
}
}
return total;
}
void
resolve(MyBlock* b)
{
Context* c = b->context;
if (b->jumpOffsetHead) {
if (c->jumpOffsetTail) {
c->jumpOffsetTail->next = b->jumpOffsetHead;
} else {
c->jumpOffsetHead = b->jumpOffsetHead;
}
c->jumpOffsetTail = b->jumpOffsetTail;
}
if (c->jumpOffsetHead) {
bool append;
if (b->next == 0 or b->next->jumpEventHead) {
append = true;
} else {
int32_t v = (b->start + b->size + b->next->size + BytesPerWord)
- (c->jumpOffsetHead->offset + c->jumpOffsetHead->block->start);
append = not bounded(2, 16, v);
if (DebugJumps) {
fprintf(stderr,
"current %p %d %d next %p %d %d\n",
b, b->start, b->size, b->next, b->start + b->size,
b->next->size);
fprintf(stderr,
"offset %p %d is of distance %d to next block; append? %d\n",
c->jumpOffsetHead, c->jumpOffsetHead->offset, v, append);
}
}
if (append) {
#ifndef NDEBUG
int32_t v = (b->start + b->size)
- (c->jumpOffsetHead->offset + c->jumpOffsetHead->block->start);
expect(c, bounded(2, 16, v));
#endif // not NDEBUG
appendJumpEvent(c, b, b->size, c->jumpOffsetHead, c->jumpOffsetTail);
if (DebugJumps) {
for (JumpOffset* o = c->jumpOffsetHead; o; o = o->next) {
fprintf(stderr,
"include %p %d in jump event %p at offset %d in block %p\n",
o, o->offset, b->jumpEventTail, b->size, b);
}
}
c->jumpOffsetHead = 0;
c->jumpOffsetTail = 0;
}
}
}
inline unsigned
@ -491,6 +650,11 @@ inline int newTemp(Context* con) { return con->client->acquireTemporary(); }
inline void freeTemp(Context* con, int r) { con->client->releaseTemporary(r); }
inline int64_t getValue(Assembler::Constant* c) { return c->value->value(); }
inline void
write4(uint8_t* dst, uint32_t v)
{
memcpy(dst, &v, 4);
}
void shiftLeftR(Context* con, unsigned size, Assembler::Register* a, Assembler::Register* b, Assembler::Register* t)
{
@ -1533,7 +1697,7 @@ branchLong(Context* c, TernaryOperation op, Assembler::Operand* al,
if (next) {
updateOffset
(c->s, c->code.data + next, true, reinterpret_cast<intptr_t>
(c->code.data + c->code.length()));
(c->code.data + c->code.length()), 0);
}
}
@ -1987,7 +2151,7 @@ class MyArchitecture: public Assembler::Architecture {
case AlignedCall:
case AlignedJump: {
updateOffset(c.s, static_cast<uint8_t*>(returnAddress) - 4, false,
reinterpret_cast<intptr_t>(newTarget));
reinterpret_cast<intptr_t>(newTarget), 0);
} break;
case LongCall:
@ -2260,7 +2424,9 @@ class MyAssembler: public Assembler {
{
Register stack(StackRegister);
Memory stackLimit(ThreadRegister, stackLimitOffsetFromThread);
Constant handlerConstant(jump(&c, handler));
Constant handlerConstant
(new (c.zone->allocate(sizeof(ResolvedPromise)))
ResolvedPromise(handler));
branchRM(&c, JumpIfGreaterOrEqual, BytesPerWord, &stack, &stackLimit,
&handlerConstant);
}
@ -2482,22 +2648,62 @@ class MyAssembler: public Assembler {
}
}
virtual void writeTo(uint8_t* dst) {
virtual void setDestination(uint8_t* dst) {
c.result = dst;
}
virtual void write() {
uint8_t* dst = c.result;
unsigned dstOffset = 0;
for (MyBlock* b = c.firstBlock; b; b = b->next) {
memcpy(dst + b->start, c.code.data + b->offset, b->size);
}
if (DebugJumps) {
fprintf(stderr, "write block %p\n", b);
}
for (JumpPromise* j = c.jumps; j; j = j->next) {
uint8_t* instruction
= dst + c.code.length() + (c.jumpCount - j->index - 1);
int32_t op = ::b(0);
memcpy(instruction, &op, BytesPerWord);
updateOffset(c.s, instruction, false, j->target);
unsigned blockOffset = 0;
for (JumpEvent* e = b->jumpEventHead; e; e = e->next) {
unsigned size = e->offset - blockOffset;
memcpy(dst + dstOffset, c.code.data + b->offset + blockOffset, size);
blockOffset = e->offset;
dstOffset += size;
unsigned jumpTableSize = 0;
for (JumpOffset* o = e->jumpOffsetHead; o; o = o->next) {
if (DebugJumps) {
fprintf(stderr, "visit offset %p %d in block %p\n",
o, o->offset, b);
}
uint8_t* address = dst + dstOffset + jumpTableSize;
if (b->next) {
address += BytesPerWord;
}
o->task->jumpAddress = address;
jumpTableSize += BytesPerWord;
}
assert(&c, jumpTableSize);
if (b->next) {
write4(dst + dstOffset, ::b(jumpTableSize + BytesPerWord));
}
dstOffset += jumpTableSize + BytesPerWord;
}
unsigned size = b->size - blockOffset;
memcpy(dst + dstOffset,
c.code.data + b->offset + blockOffset,
size);
dstOffset += size;
}
unsigned index = c.code.length() + (c.jumpCount * BytesPerWord);
unsigned index = c.code.length();
assert(&c, index % BytesPerWord == 0);
for (ConstantPoolEntry* e = c.constantPool; e; e = e->next) {
e->address = dst + index;
@ -2523,7 +2729,7 @@ class MyAssembler: public Assembler {
b->size = c.code.length() - b->offset;
if (startNew) {
c.lastBlock = new (c.zone->allocate(sizeof(MyBlock)))
MyBlock(c.code.length());
MyBlock(&c, c.code.length());
} else {
c.lastBlock = 0;
}
@ -2531,7 +2737,37 @@ class MyAssembler: public Assembler {
}
virtual void endEvent() {
// ignore
MyBlock* b = c.lastBlock;
unsigned thisEventOffset = c.code.length() - b->offset;
if (b->jumpOffsetHead) {
int32_t v = (thisEventOffset + BytesPerWord)
- b->jumpOffsetHead->offset;
if (v > 0 and not bounded(2, 16, v)) {
appendJumpEvent
(&c, b, b->lastEventOffset, b->jumpOffsetHead,
b->lastJumpOffsetTail);
if (DebugJumps) {
for (JumpOffset* o = b->jumpOffsetHead;
o != b->lastJumpOffsetTail->next; o = o->next)
{
fprintf(stderr,
"in endEvent, include %p %d in jump event %p "
"at offset %d in block %p\n",
o, o->offset, b->jumpEventTail, b->lastEventOffset, b);
}
}
b->jumpOffsetHead = b->lastJumpOffsetTail->next;
b->lastJumpOffsetTail->next = 0;
if (b->jumpOffsetHead == 0) {
b->jumpOffsetTail = 0;
}
}
}
b->lastEventOffset = thisEventOffset;
b->lastJumpOffsetTail = b->jumpOffsetTail;
}
virtual unsigned length() {
@ -2539,7 +2775,7 @@ class MyAssembler: public Assembler {
}
virtual unsigned footerSize() {
return (c.jumpCount + c.constantPoolCount) * BytesPerWord;
return c.constantPoolCount * BytesPerWord;
}
virtual void dispose() {

View File

@ -3616,9 +3616,12 @@ class MyAssembler: public Assembler {
}
}
virtual void writeTo(uint8_t* dst) {
virtual void setDestination(uint8_t* dst) {
c.result = dst;
}
virtual void write() {
uint8_t* dst = c.result;
for (MyBlock* b = c.firstBlock; b; b = b->next) {
unsigned index = 0;
unsigned padding = 0;