begin splitting up x86 assembler

This commit is contained in:
Joshua Warner 2013-02-16 19:50:34 -07:00 committed by Joshua Warner
parent 49bfda3932
commit 61f03643e0
11 changed files with 583 additions and 337 deletions

View File

@ -939,7 +939,7 @@ generated-code = \
$(build)/type-name-initializations.cpp \ $(build)/type-name-initializations.cpp \
$(build)/type-maps.cpp $(build)/type-maps.cpp
vm-depends := $(generated-code) $(wildcard $(src)/*.h) $(wildcard $(src)/codegen/*.h) $(wildcard $(src)/codegen/compiler/*.h) vm-depends := $(generated-code) $(wildcard $(src)/*.h) $(wildcard $(src)/codegen/*.h)
vm-sources = \ vm-sources = \
$(src)/$(system).cpp \ $(src)/$(system).cpp \
@ -968,26 +968,31 @@ embed-objects = $(call cpp-objects,$(embed-sources),$(src),$(build-embed))
compiler-sources = \ compiler-sources = \
$(src)/codegen/compiler.cpp \ $(src)/codegen/compiler.cpp \
$(src)/codegen/compiler/context.cpp \ $(wildcard $(src)/codegen/compiler/*.cpp) \
$(src)/codegen/compiler/resource.cpp \
$(src)/codegen/compiler/site.cpp \
$(src)/codegen/compiler/regalloc.cpp \
$(src)/codegen/compiler/value.cpp \
$(src)/codegen/compiler/read.cpp \
$(src)/codegen/compiler/event.cpp \
$(src)/codegen/compiler/promise.cpp \
$(src)/codegen/compiler/frame.cpp \
$(src)/codegen/compiler/ir.cpp \
$(src)/codegen/registers.cpp \ $(src)/codegen/registers.cpp \
$(src)/codegen/targets.cpp $(src)/codegen/targets.cpp
compiler-objects = $(call cpp-objects,$(compiler-sources),$(src),$(build))
$(compiler-objects): $(wildcard $(src)/codegen/compiler/*.h) $(vm-depends)
x86-assembler-sources = $(wildcard $(src)/codegen/x86/*.cpp)
x86-assembler-objects = $(call cpp-objects,$(x86-assembler-sources),$(src),$(build))
$(x86-assembler-objects): $(wildcard $(src)/codegen/x86/*.h) $(vm-depends)
arm-assembler-sources = $(wildcard $(src)/codegen/arm/*.cpp)
arm-assembler-objects = $(call cpp-objects,$(arm-assembler-sources),$(src),$(build))
$(arm-assembler-objects): $(wildcard $(src)/codegen/arm/*.h) $(vm-depends)
powerpc-assembler-sources = $(wildcard $(src)/codegen/powerpc/*.cpp)
powerpc-assembler-objects = $(call cpp-objects,$(powerpc-assembler-sources),$(src),$(build))
$(powerpc-assembler-objects): $(wildcard $(src)/codegen/powerpc/*.h) $(vm-depends)
all-assembler-sources = \ all-assembler-sources = \
$(src)/codegen/x86/assembler.cpp \ $(x86-assembler-sources) \
$(src)/codegen/arm/assembler.cpp \ $(arm-assembler-sources) \
$(src)/codegen/powerpc/assembler.cpp $(powerpc-assembler-sources)
native-assembler-sources = \ native-assembler-sources = $($(target-asm)-assembler-sources)
$(src)/codegen/$(target-asm)/assembler.cpp native-assembler-objects = $($(target-asm)-assembler-objects)
all-codegen-target-sources = \ all-codegen-target-sources = \
$(compiler-sources) \ $(compiler-sources) \

View File

@ -15,6 +15,11 @@
#include "codegen/assembler.h" #include "codegen/assembler.h"
#include "codegen/registers.h" #include "codegen/registers.h"
#include "codegen/x86/context.h"
#include "codegen/x86/block.h"
#include "codegen/x86/fixup.h"
#include "codegen/x86/padding.h"
#include "util/runtime-array.h" #include "util/runtime-array.h"
#include "util/abort.h" #include "util/abort.h"
@ -23,11 +28,10 @@
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x) #define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
using namespace vm; using namespace vm;
using namespace avian::codegen;
namespace { namespace avian {
namespace codegen {
namespace local { namespace x86 {
enum { enum {
rax = 0, rax = 0,
@ -82,18 +86,6 @@ const int LongJumpRegister = r10;
const unsigned StackAlignmentInBytes = 16; const unsigned StackAlignmentInBytes = 16;
const unsigned StackAlignmentInWords = StackAlignmentInBytes / TargetBytesPerWord; const unsigned StackAlignmentInWords = StackAlignmentInBytes / TargetBytesPerWord;
bool
isInt8(target_intptr_t v)
{
return v == static_cast<int8_t>(v);
}
bool
isInt32(target_intptr_t v)
{
return v == static_cast<int32_t>(v);
}
class Task; class Task;
class AlignmentPadding; class AlignmentPadding;
@ -101,213 +93,6 @@ unsigned
padding(AlignmentPadding* p, unsigned index, unsigned offset, padding(AlignmentPadding* p, unsigned index, unsigned offset,
AlignmentPadding* limit); AlignmentPadding* limit);
class Context;
class MyBlock;
ResolvedPromise*
resolved(Context* c, int64_t value);
class MyBlock: public Assembler::Block {
public:
MyBlock(unsigned offset):
next(0), firstPadding(0), lastPadding(0), offset(offset), start(~0),
size(0)
{ }
virtual unsigned resolve(unsigned start, Assembler::Block* next) {
this->start = start;
this->next = static_cast<MyBlock*>(next);
return start + size + padding(firstPadding, start, offset, lastPadding);
}
MyBlock* next;
AlignmentPadding* firstPadding;
AlignmentPadding* lastPadding;
unsigned offset;
unsigned start;
unsigned size;
};
typedef void (*OperationType)(Context*);
typedef void (*UnaryOperationType)(Context*, unsigned, lir::Operand*);
typedef void (*BinaryOperationType)
(Context*, unsigned, lir::Operand*, unsigned, lir::Operand*);
typedef void (*BranchOperationType)
(Context*, lir::TernaryOperation, unsigned, lir::Operand*,
lir::Operand*, lir::Operand*);
class ArchitectureContext {
public:
ArchitectureContext(System* s, bool useNativeFeatures):
s(s), useNativeFeatures(useNativeFeatures)
{ }
System* s;
bool useNativeFeatures;
OperationType operations[lir::OperationCount];
UnaryOperationType unaryOperations[lir::UnaryOperationCount
* lir::OperandTypeCount];
BinaryOperationType binaryOperations
[(lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount
* lir::OperandTypeCount];
BranchOperationType branchOperations
[lir::BranchOperationCount
* lir::OperandTypeCount
* lir::OperandTypeCount];
};
class Context {
public:
Context(System* s, Allocator* a, Zone* zone, ArchitectureContext* ac):
s(s), zone(zone), client(0), code(s, a, 1024), tasks(0), result(0),
firstBlock(new(zone) MyBlock(0)),
lastBlock(firstBlock), ac(ac)
{ }
System* s;
Zone* zone;
Assembler::Client* client;
Vector code;
Task* tasks;
uint8_t* result;
MyBlock* firstBlock;
MyBlock* lastBlock;
ArchitectureContext* ac;
};
Aborter* getAborter(Context* c) {
return c->s;
}
Aborter* getAborter(ArchitectureContext* c) {
return c->s;
}
ResolvedPromise*
resolved(Context* c, int64_t value)
{
return new(c->zone) ResolvedPromise(value);
}
class Offset: public Promise {
public:
Offset(Context* c, MyBlock* block, unsigned offset, AlignmentPadding* limit):
c(c), block(block), offset(offset), limit(limit), value_(-1)
{ }
virtual bool resolved() {
return block->start != static_cast<unsigned>(~0);
}
virtual int64_t value() {
assert(c, resolved());
if (value_ == -1) {
value_ = block->start + (offset - block->offset)
+ padding(block->firstPadding, block->start, block->offset, limit);
}
return value_;
}
Context* c;
MyBlock* block;
unsigned offset;
AlignmentPadding* limit;
int value_;
};
Promise*
offset(Context* c)
{
return new(c->zone) Offset(c, c->lastBlock, c->code.length(), c->lastBlock->lastPadding);
}
class Task {
public:
Task(Task* next): next(next) { }
virtual void run(Context* c) = 0;
Task* next;
};
void*
resolveOffset(System* s, uint8_t* instruction, unsigned instructionSize,
int64_t value)
{
intptr_t v = reinterpret_cast<uint8_t*>(value)
- instruction - instructionSize;
expect(s, isInt32(v));
int32_t v4 = v;
memcpy(instruction + instructionSize - 4, &v4, 4);
return instruction + instructionSize;
}
class OffsetListener: public Promise::Listener {
public:
OffsetListener(System* s, uint8_t* instruction,
unsigned instructionSize):
s(s),
instruction(instruction),
instructionSize(instructionSize)
{ }
virtual bool resolve(int64_t value, void** location) {
void* p = resolveOffset(s, instruction, instructionSize, value);
if (location) *location = p;
return false;
}
System* s;
uint8_t* instruction;
unsigned instructionSize;
};
class OffsetTask: public Task {
public:
OffsetTask(Task* next, Promise* promise, Promise* instructionOffset,
unsigned instructionSize):
Task(next),
promise(promise),
instructionOffset(instructionOffset),
instructionSize(instructionSize)
{ }
virtual void run(Context* c) {
if (promise->resolved()) {
resolveOffset
(c->s, c->result + instructionOffset->value(), instructionSize,
promise->value());
} else {
new (promise->listen(sizeof(OffsetListener)))
OffsetListener(c->s, c->result + instructionOffset->value(),
instructionSize);
}
}
Promise* promise;
Promise* instructionOffset;
unsigned instructionSize;
};
void
appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
unsigned instructionSize)
{
OffsetTask* task =
new(c->zone) OffsetTask(c->tasks, promise, instructionOffset, instructionSize);
c->tasks = task;
}
void void
copy(System* s, void* dst, int64_t src, unsigned size) copy(System* s, void* dst, int64_t src, unsigned size)
{ {
@ -378,60 +163,6 @@ appendImmediateTask(Context* c, Promise* promise, Promise* offset,
(c->tasks, promise, offset, size, promiseOffset); (c->tasks, promise, offset, size, promiseOffset);
} }
class AlignmentPadding {
public:
AlignmentPadding(Context* c, unsigned instructionOffset, unsigned alignment):
offset(c->code.length()),
instructionOffset(instructionOffset),
alignment(alignment),
next(0),
padding(-1)
{
if (c->lastBlock->firstPadding) {
c->lastBlock->lastPadding->next = this;
} else {
c->lastBlock->firstPadding = this;
}
c->lastBlock->lastPadding = this;
}
unsigned offset;
unsigned instructionOffset;
unsigned alignment;
AlignmentPadding* next;
int padding;
};
unsigned
padding(AlignmentPadding* p, unsigned start, unsigned offset,
AlignmentPadding* limit)
{
unsigned padding = 0;
if (limit) {
if (limit->padding == -1) {
for (; p; p = p->next) {
if (p->padding == -1) {
unsigned index = p->offset - offset;
while ((start + index + padding + p->instructionOffset)
% p->alignment)
{
++ padding;
}
p->padding = padding;
if (p == limit) break;
} else {
padding = p->padding;
}
}
} else {
padding = limit->padding;
}
}
return padding;
}
extern "C" bool extern "C" bool
detectFeature(unsigned ecx, unsigned edx); detectFeature(unsigned ecx, unsigned edx);
@ -558,7 +289,7 @@ modrmSibImm(Context* c, int a, int scale, int index, int base, int offset)
{ {
if (offset == 0 and regCode(base) != rbp) { if (offset == 0 and regCode(base) != rbp) {
modrmSib(c, 0x00, a, scale, index, base); modrmSib(c, 0x00, a, scale, index, base);
} else if (isInt8(offset)) { } else if (vm::fitsInInt8(offset)) {
modrmSib(c, 0x40, a, scale, index, base); modrmSib(c, 0x40, a, scale, index, base);
c->code.append(offset); c->code.append(offset);
} else { } else {
@ -627,7 +358,7 @@ storeLoadBarrier(Context* c)
void void
unconditional(Context* c, unsigned jump, lir::Constant* a) unconditional(Context* c, unsigned jump, lir::Constant* a)
{ {
appendOffsetTask(c, a->value, offset(c), 5); appendOffsetTask(c, a->value, offsetPromise(c), 5);
opcode(c, jump); opcode(c, jump);
c->code.append4(0); c->code.append4(0);
@ -636,7 +367,7 @@ unconditional(Context* c, unsigned jump, lir::Constant* a)
void void
conditional(Context* c, unsigned condition, lir::Constant* a) conditional(Context* c, unsigned condition, lir::Constant* a)
{ {
appendOffsetTask(c, a->value, offset(c), 6); appendOffsetTask(c, a->value, offsetPromise(c), 6);
opcode(c, 0x0f, condition); opcode(c, 0x0f, condition);
c->code.append4(0); c->code.append4(0);
@ -904,7 +635,7 @@ moveCR2(Context* c, UNUSED unsigned aSize, lir::Constant* a,
c->code.appendTargetAddress(a->value->value()); c->code.appendTargetAddress(a->value->value());
} else { } else {
appendImmediateTask appendImmediateTask
(c, a->value, offset(c), TargetBytesPerWord, promiseOffset); (c, a->value, offsetPromise(c), TargetBytesPerWord, promiseOffset);
c->code.appendTargetAddress(static_cast<target_uintptr_t>(0)); c->code.appendTargetAddress(static_cast<target_uintptr_t>(0));
} }
} }
@ -1260,14 +991,14 @@ moveCM(Context* c, unsigned aSize UNUSED, lir::Constant* a,
if (a->value->resolved()) { if (a->value->resolved()) {
c->code.append4(a->value->value()); c->code.append4(a->value->value());
} else { } else {
appendImmediateTask(c, a->value, offset(c), 4); appendImmediateTask(c, a->value, offsetPromise(c), 4);
c->code.append4(0); c->code.append4(0);
} }
break; break;
case 8: { case 8: {
if (TargetBytesPerWord == 8) { if (TargetBytesPerWord == 8) {
if (a->value->resolved() and isInt32(a->value->value())) { if (a->value->resolved() and vm::fitsInInt32(a->value->value())) {
maybeRex(c, bSize, b); maybeRex(c, bSize, b);
opcode(c, 0xc7); opcode(c, 0xc7);
modrmSibImm(c, 0, b->scale, b->index, b->base, b->offset); modrmSibImm(c, 0, b->scale, b->index, b->base, b->offset);
@ -1358,7 +1089,7 @@ addCarryCR(Context* c, unsigned size, lir::Constant* a,
int64_t v = a->value->value(); int64_t v = a->value->value();
maybeRex(c, size, b); maybeRex(c, size, b);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x83, 0xd0 + regCode(b)); opcode(c, 0x83, 0xd0 + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -1387,9 +1118,9 @@ addCR(Context* c, unsigned aSize, lir::Constant* a,
addCR(c, 4, &al, 4, b); addCR(c, 4, &al, 4, b);
addCarryCR(c, 4, &ah, &bh); addCarryCR(c, 4, &ah, &bh);
} else { } else {
if (isInt32(v)) { if (vm::fitsInInt32(v)) {
maybeRex(c, aSize, b); maybeRex(c, aSize, b);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x83, 0xc0 + regCode(b)); opcode(c, 0x83, 0xc0 + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -1414,7 +1145,7 @@ subtractBorrowCR(Context* c, unsigned size UNUSED, lir::Constant* a,
assert(c, TargetBytesPerWord == 8 or size == 4); assert(c, TargetBytesPerWord == 8 or size == 4);
int64_t v = a->value->value(); int64_t v = a->value->value();
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x83, 0xd8 + regCode(b)); opcode(c, 0x83, 0xd8 + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -1447,9 +1178,9 @@ subtractCR(Context* c, unsigned aSize, lir::Constant* a,
subtractCR(c, 4, &al, 4, b); subtractCR(c, 4, &al, 4, b);
subtractBorrowCR(c, 4, &ah, &bh); subtractBorrowCR(c, 4, &ah, &bh);
} else { } else {
if (isInt32(v)) { if (vm::fitsInInt32(v)) {
maybeRex(c, aSize, b); maybeRex(c, aSize, b);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x83, 0xe8 + regCode(b)); opcode(c, 0x83, 0xe8 + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -1537,9 +1268,9 @@ andCR(Context* c, unsigned aSize, lir::Constant* a,
andCR(c, 4, &al, 4, b); andCR(c, 4, &al, 4, b);
andCR(c, 4, &ah, 4, &bh); andCR(c, 4, &ah, 4, &bh);
} else { } else {
if (isInt32(v)) { if (vm::fitsInInt32(v)) {
maybeRex(c, aSize, b); maybeRex(c, aSize, b);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x83, 0xe0 + regCode(b)); opcode(c, 0x83, 0xe0 + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -1595,9 +1326,9 @@ orCR(Context* c, unsigned aSize, lir::Constant* a,
orCR(c, 4, &al, 4, b); orCR(c, 4, &al, 4, b);
orCR(c, 4, &ah, 4, &bh); orCR(c, 4, &ah, 4, &bh);
} else { } else {
if (isInt32(v)) { if (vm::fitsInInt32(v)) {
maybeRex(c, aSize, b); maybeRex(c, aSize, b);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x83, 0xc8 + regCode(b)); opcode(c, 0x83, 0xc8 + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -1652,9 +1383,9 @@ xorCR(Context* c, unsigned aSize, lir::Constant* a,
xorCR(c, 4, &al, 4, b); xorCR(c, 4, &al, 4, b);
xorCR(c, 4, &ah, 4, &bh); xorCR(c, 4, &ah, 4, &bh);
} else { } else {
if (isInt32(v)) { if (vm::fitsInInt32(v)) {
maybeRex(c, aSize, b); maybeRex(c, aSize, b);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x83, 0xf0 + regCode(b)); opcode(c, 0x83, 0xf0 + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -1828,10 +1559,10 @@ compareCR(Context* c, unsigned aSize, lir::Constant* a,
assert(c, aSize == bSize); assert(c, aSize == bSize);
assert(c, TargetBytesPerWord == 8 or aSize == 4); assert(c, TargetBytesPerWord == 8 or aSize == 4);
if (a->value->resolved() and isInt32(a->value->value())) { if (a->value->resolved() and vm::fitsInInt32(a->value->value())) {
int64_t v = a->value->value(); int64_t v = a->value->value();
maybeRex(c, aSize, b); maybeRex(c, aSize, b);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x83, 0xf8 + regCode(b)); opcode(c, 0x83, 0xf8 + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -1871,12 +1602,12 @@ compareCM(Context* c, unsigned aSize, lir::Constant* a,
if (a->value->resolved()) { if (a->value->resolved()) {
int64_t v = a->value->value(); int64_t v = a->value->value();
maybeRex(c, aSize, b); maybeRex(c, aSize, b);
opcode(c, isInt8(v) ? 0x83 : 0x81); opcode(c, vm::fitsInInt8(v) ? 0x83 : 0x81);
modrmSibImm(c, rdi, b->scale, b->index, b->base, b->offset); modrmSibImm(c, rdi, b->scale, b->index, b->base, b->offset);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
c->code.append(v); c->code.append(v);
} else if (isInt32(v)) { } else if (vm::fitsInInt32(v)) {
c->code.append4(v); c->code.append4(v);
} else { } else {
abort(c); abort(c);
@ -2070,9 +1801,9 @@ multiplyCR(Context* c, unsigned aSize, lir::Constant* a,
} else { } else {
int64_t v = a->value->value(); int64_t v = a->value->value();
if (v != 1) { if (v != 1) {
if (isInt32(v)) { if (vm::fitsInInt32(v)) {
maybeRex(c, bSize, b, b); maybeRex(c, bSize, b, b);
if (isInt8(v)) { if (vm::fitsInInt8(v)) {
opcode(c, 0x6b); opcode(c, 0x6b);
modrm(c, 0xc0, b, b); modrm(c, 0xc0, b, b);
c->code.append(v); c->code.append(v);
@ -2150,7 +1881,7 @@ doShift(Context* c, UNUSED void (*shift)
maybeRex(c, bSize, b); maybeRex(c, bSize, b);
if (v == 1) { if (v == 1) {
opcode(c, 0xd1, type + regCode(b)); opcode(c, 0xd1, type + regCode(b));
} else if (isInt8(v)) { } else if (vm::fitsInInt8(v)) {
opcode(c, 0xc1, type + regCode(b)); opcode(c, 0xc1, type + regCode(b));
c->code.append(v); c->code.append(v);
} else { } else {
@ -2796,7 +2527,7 @@ class MyArchitecture: public Assembler::Architecture {
} }
virtual unsigned argumentFootprint(unsigned footprint) { virtual unsigned argumentFootprint(unsigned footprint) {
return local::argumentFootprint(footprint); return x86::argumentFootprint(footprint);
} }
virtual bool argumentAlignment() { virtual bool argumentAlignment() {
@ -2904,7 +2635,7 @@ class MyArchitecture: public Assembler::Architecture {
intptr_t v = static_cast<uint8_t*>(newTarget) intptr_t v = static_cast<uint8_t*>(newTarget)
- static_cast<uint8_t*>(returnAddress); - static_cast<uint8_t*>(returnAddress);
assert(&c, isInt32(v)); assert(&c, vm::fitsInInt32(v));
int32_t v32 = v; int32_t v32 = v;
@ -2939,7 +2670,7 @@ class MyArchitecture: public Assembler::Architecture {
unsigned targetParameterFootprint, void** ip, unsigned targetParameterFootprint, void** ip,
void** stack) void** stack)
{ {
local::nextFrame(&c, static_cast<uint8_t*>(start), size, footprint, x86::nextFrame(&c, static_cast<uint8_t*>(start), size, footprint,
link, mostRecent, targetParameterFootprint, ip, stack); link, mostRecent, targetParameterFootprint, ip, stack);
} }
@ -3393,7 +3124,7 @@ class MyAssembler: public Assembler {
{ {
lir::Register stack(rsp); lir::Register stack(rsp);
lir::Memory stackLimit(rbx, stackLimitOffsetFromThread); lir::Memory stackLimit(rbx, stackLimitOffsetFromThread);
lir::Constant handlerConstant(resolved(&c, handler)); lir::Constant handlerConstant(resolvedPromise(&c, handler));
branchRM(&c, lir::JumpIfGreaterOrEqual, TargetBytesPerWord, &stack, &stackLimit, branchRM(&c, lir::JumpIfGreaterOrEqual, TargetBytesPerWord, &stack, &stackLimit,
&handlerConstant); &handlerConstant);
} }
@ -3470,7 +3201,7 @@ class MyAssembler: public Assembler {
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base)); OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base));
} }
lir::Constant footprintConstant(resolved(&c, footprint * TargetBytesPerWord)); lir::Constant footprintConstant(resolvedPromise(&c, footprint * TargetBytesPerWord));
apply(lir::Subtract, apply(lir::Subtract,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &footprintConstant), OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &footprintConstant),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
@ -3479,7 +3210,7 @@ class MyAssembler: public Assembler {
virtual void adjustFrame(unsigned difference) { virtual void adjustFrame(unsigned difference) {
lir::Register stack(rsp); lir::Register stack(rsp);
lir::Constant differenceConstant(resolved(&c, difference * TargetBytesPerWord)); lir::Constant differenceConstant(resolvedPromise(&c, difference * TargetBytesPerWord));
apply(lir::Subtract, apply(lir::Subtract,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &differenceConstant), OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &differenceConstant),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
@ -3497,7 +3228,7 @@ class MyAssembler: public Assembler {
popR(&c, TargetBytesPerWord, &base); popR(&c, TargetBytesPerWord, &base);
} else { } else {
lir::Register stack(rsp); lir::Register stack(rsp);
lir::Constant footprint(resolved(&c, frameFootprint * TargetBytesPerWord)); lir::Constant footprint(resolvedPromise(&c, frameFootprint * TargetBytesPerWord));
apply(lir::Add, apply(lir::Add,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &footprint), OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &footprint),
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
@ -3536,7 +3267,7 @@ class MyAssembler: public Assembler {
lir::Register stack(rsp); lir::Register stack(rsp);
lir::Constant footprint lir::Constant footprint
(resolved (resolvedPromise
(&c, (frameFootprint - offset + baseSize) * TargetBytesPerWord)); (&c, (frameFootprint - offset + baseSize) * TargetBytesPerWord));
addCR(&c, TargetBytesPerWord, &footprint, TargetBytesPerWord, &stack); addCR(&c, TargetBytesPerWord, &footprint, TargetBytesPerWord, &stack);
@ -3578,7 +3309,7 @@ class MyAssembler: public Assembler {
lir::Register stack(rsp); lir::Register stack(rsp);
lir::Constant adjustment lir::Constant adjustment
(resolved(&c, (argumentFootprint - StackAlignmentInWords) (resolvedPromise(&c, (argumentFootprint - StackAlignmentInWords)
* TargetBytesPerWord)); * TargetBytesPerWord));
addCR(&c, TargetBytesPerWord, &adjustment, TargetBytesPerWord, &stack); addCR(&c, TargetBytesPerWord, &adjustment, TargetBytesPerWord, &stack);
@ -3674,7 +3405,7 @@ class MyAssembler: public Assembler {
} }
virtual Promise* offset(bool) { virtual Promise* offset(bool) {
return local::offset(&c); return x86::offsetPromise(&c);
} }
virtual Block* endBlock(bool startNew) { virtual Block* endBlock(bool startNew) {
@ -3713,17 +3444,12 @@ Assembler* MyArchitecture::makeAssembler(Allocator* allocator, Zone* zone) {
new(zone) MyAssembler(c.s, allocator, zone, this); new(zone) MyAssembler(c.s, allocator, zone, this);
} }
} // namespace local } // namespace x86
} // namespace
namespace avian {
namespace codegen {
Assembler::Architecture* makeArchitectureX86(System* system, bool useNativeFeatures) Assembler::Architecture* makeArchitectureX86(System* system, bool useNativeFeatures)
{ {
return new (allocate(system, sizeof(local::MyArchitecture))) return new (allocate(system, sizeof(x86::MyArchitecture)))
local::MyArchitecture(system, useNativeFeatures); x86::MyArchitecture(system, useNativeFeatures);
} }
} // namespace codegen } // namespace codegen

39
src/codegen/x86/block.cpp Normal file
View File

@ -0,0 +1,39 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "codegen/x86/block.h"
#include "common.h"
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
namespace avian {
namespace codegen {
namespace x86 {
unsigned
padding(AlignmentPadding* p, unsigned index, unsigned offset, AlignmentPadding* limit);
MyBlock::MyBlock(unsigned offset):
next(0), firstPadding(0), lastPadding(0), offset(offset), start(~0),
size(0)
{ }
unsigned MyBlock::resolve(unsigned start, Assembler::Block* next) {
this->start = start;
this->next = static_cast<MyBlock*>(next);
return start + size + padding(firstPadding, start, offset, lastPadding);
}
} // namespace x86
} // namespace codegen
} // namespace avian

40
src/codegen/x86/block.h Normal file
View File

@ -0,0 +1,40 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_BLOCK_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_BLOCK_H
#include "codegen/assembler.h"
namespace avian {
namespace codegen {
namespace x86 {
class AlignmentPadding;
class MyBlock: public Assembler::Block {
public:
MyBlock(unsigned offset);
virtual unsigned resolve(unsigned start, Assembler::Block* next);
MyBlock* next;
AlignmentPadding* firstPadding;
AlignmentPadding* lastPadding;
unsigned offset;
unsigned start;
unsigned size;
};
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_BLOCK_H

View File

@ -0,0 +1,34 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "codegen/x86/context.h"
#include "codegen/x86/block.h"
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
namespace avian {
namespace codegen {
namespace x86 {
ArchitectureContext::ArchitectureContext(vm::System* s, bool useNativeFeatures):
s(s), useNativeFeatures(useNativeFeatures)
{ }
Context::Context(vm::System* s, vm::Allocator* a, vm::Zone* zone, ArchitectureContext* ac):
s(s), zone(zone), client(0), code(s, a, 1024), tasks(0), result(0),
firstBlock(new(zone) MyBlock(0)),
lastBlock(firstBlock), ac(ac)
{ }
} // namespace x86
} // namespace codegen
} // namespace avian

91
src/codegen/x86/context.h Normal file
View File

@ -0,0 +1,91 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_CONTEXT_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_CONTEXT_H
#include "codegen/lir.h"
#include "codegen/assembler.h"
#include "alloc-vector.h"
class Aborter;
namespace vm {
class System;
class Allocator;
class Zone;
} // namespace vm
namespace avian {
namespace codegen {
namespace x86 {
class Context;
class MyBlock;
class Task;
typedef void (*OperationType)(Context*);
typedef void (*UnaryOperationType)(Context*, unsigned, lir::Operand*);
typedef void (*BinaryOperationType)
(Context*, unsigned, lir::Operand*, unsigned, lir::Operand*);
typedef void (*BranchOperationType)
(Context*, lir::TernaryOperation, unsigned, lir::Operand*,
lir::Operand*, lir::Operand*);
class ArchitectureContext {
public:
ArchitectureContext(vm::System* s, bool useNativeFeatures);
vm::System* s;
bool useNativeFeatures;
OperationType operations[lir::OperationCount];
UnaryOperationType unaryOperations[lir::UnaryOperationCount
* lir::OperandTypeCount];
BinaryOperationType binaryOperations
[(lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount
* lir::OperandTypeCount];
BranchOperationType branchOperations
[lir::BranchOperationCount
* lir::OperandTypeCount
* lir::OperandTypeCount];
};
class Context {
public:
Context(vm::System* s, vm::Allocator* a, vm::Zone* zone, ArchitectureContext* ac);
vm::System* s;
vm::Zone* zone;
Assembler::Client* client;
vm::Vector code;
Task* tasks;
uint8_t* result;
MyBlock* firstBlock;
MyBlock* lastBlock;
ArchitectureContext* ac;
};
inline Aborter* getAborter(Context* c) {
return c->s;
}
inline Aborter* getAborter(ArchitectureContext* c) {
return c->s;
}
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_CONTEXT_H

106
src/codegen/x86/fixup.cpp Normal file
View File

@ -0,0 +1,106 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "codegen/assembler.h"
#include "codegen/x86/context.h"
#include "codegen/x86/fixup.h"
#include "codegen/x86/padding.h"
#include "codegen/x86/block.h"
namespace avian {
namespace codegen {
namespace x86 {
ResolvedPromise* resolvedPromise(Context* c, int64_t value) {
return new(c->zone) ResolvedPromise(value);
}
Offset::Offset(Context* c, MyBlock* block, unsigned offset, AlignmentPadding* limit):
c(c), block(block), offset(offset), limit(limit), value_(-1)
{ }
bool Offset::resolved() {
return block->start != static_cast<unsigned>(~0);
}
int64_t Offset::value() {
assert(c, resolved());
if (value_ == -1) {
value_ = block->start + (offset - block->offset)
+ padding(block->firstPadding, block->start, block->offset, limit);
}
return value_;
}
Promise* offsetPromise(Context* c) {
return new(c->zone) Offset(c, c->lastBlock, c->code.length(), c->lastBlock->lastPadding);
}
void*
resolveOffset(vm::System* s, uint8_t* instruction, unsigned instructionSize,
int64_t value)
{
intptr_t v = reinterpret_cast<uint8_t*>(value)
- instruction - instructionSize;
expect(s, vm::fitsInInt32(v));
int32_t v4 = v;
memcpy(instruction + instructionSize - 4, &v4, 4);
return instruction + instructionSize;
}
OffsetListener::OffsetListener(vm::System* s, uint8_t* instruction,
unsigned instructionSize):
s(s),
instruction(instruction),
instructionSize(instructionSize)
{ }
bool OffsetListener::resolve(int64_t value, void** location) {
void* p = resolveOffset(s, instruction, instructionSize, value);
if (location) *location = p;
return false;
}
OffsetTask::OffsetTask(Task* next, Promise* promise, Promise* instructionOffset,
unsigned instructionSize):
Task(next),
promise(promise),
instructionOffset(instructionOffset),
instructionSize(instructionSize)
{ }
void OffsetTask::run(Context* c) {
if (promise->resolved()) {
resolveOffset
(c->s, c->result + instructionOffset->value(), instructionSize,
promise->value());
} else {
new (promise->listen(sizeof(OffsetListener)))
OffsetListener(c->s, c->result + instructionOffset->value(),
instructionSize);
}
}
void
appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
unsigned instructionSize)
{
OffsetTask* task =
new(c->zone) OffsetTask(c->tasks, promise, instructionOffset, instructionSize);
c->tasks = task;
}
} // namespace x86
} // namespace codegen
} // namespace avian

87
src/codegen/x86/fixup.h Normal file
View File

@ -0,0 +1,87 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_FIXUP_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_FIXUP_H
namespace vm {
class System;
}
namespace avian {
namespace codegen {
class Promise;
namespace x86 {
class MyBlock;
class AlignmentPadding;
ResolvedPromise* resolvedPromise(Context* c, int64_t value);
class Offset: public Promise {
public:
Offset(Context* c, MyBlock* block, unsigned offset, AlignmentPadding* limit);
virtual bool resolved();
virtual int64_t value();
Context* c;
MyBlock* block;
unsigned offset;
AlignmentPadding* limit;
int value_;
};
Promise* offsetPromise(Context* c);
class Task {
public:
Task(Task* next): next(next) { }
virtual void run(Context* c) = 0;
Task* next;
};
void* resolveOffset(vm::System* s, uint8_t* instruction, unsigned instructionSize, int64_t value);
class OffsetListener: public Promise::Listener {
public:
OffsetListener(vm::System* s, uint8_t* instruction, unsigned instructionSize);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
uint8_t* instruction;
unsigned instructionSize;
};
class OffsetTask: public Task {
public:
OffsetTask(Task* next, Promise* promise, Promise* instructionOffset, unsigned instructionSize);
virtual void run(Context* c);
Promise* promise;
Promise* instructionOffset;
unsigned instructionSize;
};
void appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset, unsigned instructionSize);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_FIXUP_H

View File

@ -0,0 +1,68 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "codegen/assembler.h"
#include "codegen/x86/context.h"
#include "codegen/x86/fixup.h"
#include "codegen/x86/padding.h"
#include "codegen/x86/block.h"
namespace avian {
namespace codegen {
namespace x86 {
AlignmentPadding::AlignmentPadding(Context* c, unsigned instructionOffset, unsigned alignment):
offset(c->code.length()),
instructionOffset(instructionOffset),
alignment(alignment),
next(0),
padding(-1)
{
if (c->lastBlock->firstPadding) {
c->lastBlock->lastPadding->next = this;
} else {
c->lastBlock->firstPadding = this;
}
c->lastBlock->lastPadding = this;
}
unsigned
padding(AlignmentPadding* p, unsigned start, unsigned offset,
AlignmentPadding* limit)
{
unsigned padding = 0;
if (limit) {
if (limit->padding == -1) {
for (; p; p = p->next) {
if (p->padding == -1) {
unsigned index = p->offset - offset;
while ((start + index + padding + p->instructionOffset)
% p->alignment)
{
++ padding;
}
p->padding = padding;
if (p == limit) break;
} else {
padding = p->padding;
}
}
} else {
padding = limit->padding;
}
}
return padding;
}
} // namespace x86
} // namespace codegen
} // namespace avian

38
src/codegen/x86/padding.h Normal file
View File

@ -0,0 +1,38 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_PADDING_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_PADDING_H
namespace avian {
namespace codegen {
namespace x86 {
class AlignmentPadding {
public:
AlignmentPadding(Context* c, unsigned instructionOffset, unsigned alignment);
unsigned offset;
unsigned instructionOffset;
unsigned alignment;
AlignmentPadding* next;
int padding;
};
unsigned
padding(AlignmentPadding* p, unsigned start, unsigned offset,
AlignmentPadding* limit);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_PADDING_H

View File

@ -359,6 +359,18 @@ nextPowerOfTwo(unsigned n)
return r; return r;
} }
inline bool fitsInInt8(int64_t v) {
return v == static_cast<int8_t>(v);
}
inline bool fitsInInt16(int64_t v) {
return v == static_cast<int16_t>(v);
}
inline bool fitsInInt32(int64_t v) {
return v == static_cast<int32_t>(v);
}
inline unsigned inline unsigned
log(unsigned n) log(unsigned n)
{ {