/* Copyright (c) 2008, Avian Contributors Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. There is NO WARRANTY for this software. See license.txt for details. */ #include "assembler.h" #include "vector.h" using namespace vm; namespace { enum { rax = 0, rcx = 1, rdx = 2, rbx = 3, rsp = 4, rbp = 5, rsi = 6, rdi = 7, r8 = 8, r9 = 9, r10 = 10, r11 = 11, r12 = 12, r13 = 13, r14 = 14, r15 = 15, }; inline bool isInt8(intptr_t v) { return v == static_cast(v); } inline bool isInt32(intptr_t v) { return v == static_cast(v); } class Task; class Context { public: Context(System* s, Allocator* a, Zone* zone): s(s), zone(zone), client(0), code(s, a, 1024), tasks(0), result(0) { } System* s; Zone* zone; Assembler::Client* client; Vector code; Task* tasks; uint8_t* result; }; typedef void (*OperationType)(Context*); typedef void (*UnaryOperationType)(Context*, unsigned, Assembler::Operand*); typedef void (*BinaryOperationType) (Context*, unsigned, Assembler::Operand*, unsigned, Assembler::Operand*); typedef void (*TernaryOperationType) (Context*, unsigned, Assembler::Operand*, unsigned, Assembler::Operand*, unsigned, Assembler::Operand*); class ArchitectureContext { public: ArchitectureContext(System* s): s(s) { } System* s; OperationType operations[OperationCount]; UnaryOperationType unaryOperations[UnaryOperationCount * OperandTypeCount]; BinaryOperationType binaryOperations[BinaryOperationCount * OperandTypeCount * OperandTypeCount]; TernaryOperationType ternaryOperations[TernaryOperationCount * OperandTypeCount * OperandTypeCount * OperandTypeCount]; }; inline void NO_RETURN abort(Context* c) { abort(c->s); } inline void NO_RETURN abort(ArchitectureContext* c) { abort(c->s); } #ifndef NDEBUG inline void assert(Context* c, bool v) { assert(c->s, v); } inline void assert(ArchitectureContext* c, bool v) { assert(c->s, v); } #endif // not NDEBUG inline void expect(Context* c, bool v) { expect(c->s, v); } ResolvedPromise* resolved(Context* c, int64_t value) { return new (c->zone->allocate(sizeof(ResolvedPromise))) ResolvedPromise(value); } class CodePromise: public Promise { public: CodePromise(Context* c, unsigned offset): c(c), offset(offset) { } virtual int64_t value() { if (resolved()) { return reinterpret_cast(c->result + offset); } abort(c); } virtual bool resolved() { return c->result != 0; } Context* c; unsigned offset; }; CodePromise* codePromise(Context* c, unsigned offset) { return new (c->zone->allocate(sizeof(CodePromise))) CodePromise(c, offset); } class Task { public: Task(Task* next): next(next) { } virtual ~Task() { } virtual void run(Context* c) = 0; Task* next; }; class OffsetTask: public Task { public: OffsetTask(Task* next, Promise* promise, unsigned instructionOffset, unsigned instructionSize): Task(next), promise(promise), instructionOffset(instructionOffset), instructionSize(instructionSize) { } virtual void run(Context* c) { uint8_t* instruction = c->result + instructionOffset; intptr_t v = reinterpret_cast(promise->value()) - instruction - instructionSize; expect(c, isInt32(v)); int32_t v4 = v; memcpy(instruction + instructionSize - 4, &v4, 4); } Promise* promise; unsigned instructionOffset; unsigned instructionSize; }; void appendOffsetTask(Context* c, Promise* promise, int instructionOffset, unsigned instructionSize) { c->tasks = new (c->zone->allocate(sizeof(OffsetTask))) OffsetTask (c->tasks, promise, instructionOffset, instructionSize); } class ImmediateTask: public Task { public: ImmediateTask(Task* next, Promise* promise, unsigned offset): Task(next), promise(promise), offset(offset) { } virtual void run(Context* c) { intptr_t v = promise->value(); memcpy(c->result + offset, &v, BytesPerWord); } Promise* promise; unsigned offset; }; void appendImmediateTask(Context* c, Promise* promise, unsigned offset) { c->tasks = new (c->zone->allocate(sizeof(ImmediateTask))) ImmediateTask (c->tasks, promise, offset); } void encode(Context* c, uint8_t* instruction, unsigned length, int a, int b, int32_t displacement, int index, unsigned scale) { c->code.append(instruction, length); uint8_t width; if (displacement == 0 and b != rbp) { width = 0; } else if (isInt8(displacement)) { width = 0x40; } else { width = 0x80; } if (index == -1) { c->code.append(width | (a << 3) | b); if (b == rsp) { c->code.append(0x24); } } else { assert(c, b != rsp); c->code.append(width | (a << 3) | 4); c->code.append((log(scale) << 6) | (index << 3) | b); } if (displacement == 0 and b != rbp) { // do nothing } else if (isInt8(displacement)) { c->code.append(displacement); } else { c->code.append4(displacement); } } void rex(Context* c, uint8_t mask, int r) { if (BytesPerWord == 8) { c->code.append(mask | ((r & 8) >> 3)); } } void rex(Context* c) { rex(c, 0x48, rax); } void encode(Context* c, uint8_t instruction, int a, Assembler::Memory* b, bool rex) { if (rex) { ::rex(c); } encode(c, &instruction, 1, a, b->base, b->offset, b->index, b->scale); } void encode2(Context* c, uint16_t instruction, int a, Assembler::Memory* b, bool rex) { if (rex) { ::rex(c); } uint8_t i[2] = { instruction >> 8, instruction & 0xff }; encode(c, i, 2, a, b->base, b->offset, b->index, b->scale); } void return_(Context* c) { c->code.append(0xc3); } void unconditional(Context* c, unsigned jump, Assembler::Constant* a) { appendOffsetTask(c, a->value, c->code.length(), 5); c->code.append(jump); c->code.append4(0); } void conditional(Context* c, unsigned condition, Assembler::Constant* a) { appendOffsetTask(c, a->value, c->code.length(), 6); c->code.append(0x0f); c->code.append(condition); c->code.append4(0); } inline unsigned index(UnaryOperation operation, OperandType operand) { return operation + (UnaryOperationCount * operand); } inline unsigned index(BinaryOperation operation, OperandType operand1, OperandType operand2) { return operation + (BinaryOperationCount * operand1) + (BinaryOperationCount * OperandTypeCount * operand2); } inline unsigned index(TernaryOperation operation, OperandType operand1, OperandType operand2, OperandType operand3) { return operation + (TernaryOperationCount * operand1) + (TernaryOperationCount * OperandTypeCount * operand2) + (TernaryOperationCount * OperandTypeCount * OperandTypeCount * operand3); } void pushR(Context* c, unsigned size, Assembler::Register* a) { if (BytesPerWord == 4 and size == 8) { Assembler::Register ah(a->high); pushR(c, 4, &ah); pushR(c, 4, a); } else { c->code.append(0x50 | a->low); } } void moveRR(Context* c, unsigned aSize, Assembler::Register* a, unsigned bSize, Assembler::Register* b) { if (BytesPerWord == 4 and aSize == 8 and bSize == 8) { Assembler::Register ah(a->high); Assembler::Register bh(b->high); moveRR(c, 4, a, 4, b); moveRR(c, 4, &ah, 4, &bh); } else { switch (aSize) { case 1: if (BytesPerWord == 4 and a->low > rbx) { assert(c, b->low <= rbx); moveRR(c, BytesPerWord, a, BytesPerWord, b); moveRR(c, 1, b, BytesPerWord, b); } else { rex(c); c->code.append(0x0f); c->code.append(0xbe); c->code.append(0xc0 | (b->low << 3) | a->low); } break; case 2: rex(c); c->code.append(0x0f); c->code.append(0xbf); c->code.append(0xc0 | (b->low << 3) | a->low); break; case 8: case 4: if (aSize == 4 and bSize == 8) { if (BytesPerWord == 8) { rex(c); c->code.append(0x63); c->code.append(0xc0 | (b->low << 3) | a->low); } else { if (a->low == rax and b->low == rax and b->high == rdx) { c->code.append(0x99); // cdq } else { assert(c, b->low == rax and b->high == rdx); moveRR(c, 4, a, 4, b); moveRR(c, 4, b, 8, b); } } } else { if (a->low != b->low) { rex(c); c->code.append(0x89); c->code.append(0xc0 | (a->low << 3) | b->low); } } break; } } } void popR(Context* c, unsigned size, Assembler::Register* a) { if (BytesPerWord == 4 and size == 8) { Assembler::Register ah(a->high); popR(c, 4, a); popR(c, 4, &ah); } else { c->code.append(0x58 | a->low); if (BytesPerWord == 8 and size == 4) { moveRR(c, 4, a, 8, a); } } } void populateTables(ArchitectureContext*) { // todo // const int Constant = ConstantOperand; // const int Address = AddressOperand; // const int Register = RegisterOperand; // const int Memory = MemoryOperand; // #define CAST1(x) reinterpret_cast(x) // #define CAST2(x) reinterpret_cast(x) // #define CAST3(x) reinterpret_cast(x) } class MyArchitecture: public Assembler::Architecture { public: MyArchitecture(System* system): c(system), referenceCount(0) { populateTables(&c); } virtual unsigned registerCount() { return 8;//BytesPerWord == 4 ? 8 : 16; } virtual int stack() { return rsp; } virtual int thread() { return rbx; } virtual int returnLow() { return rax; } virtual bool reserved(int register_) { switch (register_) { case rbp: case rsp: case rbx: return true; default: return false; } } virtual int returnHigh() { return (BytesPerWord == 4 ? rdx : NoRegister); } virtual unsigned argumentRegisterCount() { return (BytesPerWord == 4 ? 0 : 6); } virtual int argumentRegister(unsigned index) { assert(&c, BytesPerWord == 8); switch (index) { case 0: return rdi; case 1: return rsi; case 2: return rdx; case 3: return rcx; case 4: return r8; case 5: return r9; default: abort(&c); } } virtual void updateCall(void* returnAddress, void* newTarget) { uint8_t* instruction = static_cast(returnAddress) - 5; assert(&c, *instruction == 0xE8); assert(&c, reinterpret_cast(instruction + 1) % 4 == 0); int32_t v = static_cast(newTarget) - static_cast(returnAddress); memcpy(instruction + 1, &v, 4); } virtual unsigned alignFrameSize(unsigned sizeInWords) { const unsigned alignment = 16 / BytesPerWord; return (ceiling(sizeInWords + 2, alignment) * alignment); } virtual void* frameIp(void* stack) { return *static_cast(stack); } virtual unsigned frameHeaderSize() { return 2; } virtual unsigned frameFooterSize() { return 0; } virtual void nextFrame(void** stack, void** base) { *stack = static_cast(*base) + 1; *base = *static_cast(*base); } virtual void* popReturnAddress(void* stack) { return static_cast(stack) + 1; } virtual void plan (UnaryOperation, unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask, bool* thunk) { *aTypeMask = (1 << RegisterOperand) | (1 << MemoryOperand); *aRegisterMask = ~static_cast(0); *thunk = false; } virtual void plan (BinaryOperation op, unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask, bool* thunk) { *aTypeMask = ~0; *aRegisterMask = ~static_cast(0); *bTypeMask = (1 << RegisterOperand) | (1 << MemoryOperand); *bRegisterMask = ~static_cast(0); *thunk = false; switch (op) { case Compare: if (BytesPerWord == 8 and aSize != 8) { *aTypeMask = ~(1 << MemoryOperand); *bTypeMask = ~(1 << MemoryOperand); } else { *bTypeMask = ~(1 << ConstantOperand); } break; case Move: if (BytesPerWord == 4) { if (aSize == 4 and bSize == 8) { const uint32_t mask = ~((1 << rax) | (1 << rdx)); *aRegisterMask = (static_cast(mask) << 32) | mask; *bRegisterMask = (static_cast(1) << (rdx + 32)) | (static_cast(1) << rax); } else if (aSize == 1) { const uint32_t mask = (1 << rax) | (1 << rcx) | (1 << rdx) | (1 << rbx); *aRegisterMask = (static_cast(mask) << 32) | mask; *bRegisterMask = (static_cast(mask) << 32) | mask; } } break; default: break; } } virtual void plan (TernaryOperation, unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned, uint8_t* bTypeMask, uint64_t* bRegisterMask, unsigned, uint8_t* cTypeMask, uint64_t* cRegisterMask, bool* thunk) { *aTypeMask = ~0; *aRegisterMask = ~static_cast(0); *bTypeMask = ~0; *bRegisterMask = ~static_cast(0); *cTypeMask = (1 << RegisterOperand) | (1 << MemoryOperand); *cRegisterMask = ~static_cast(0); *thunk = false; } virtual void acquire() { ++ referenceCount; } virtual void release() { if (-- referenceCount == 0) { c.s->free(this); } } ArchitectureContext c; unsigned referenceCount; }; class MyAssembler: public Assembler { public: MyAssembler(System* s, Allocator* a, Zone* zone, MyArchitecture* arch): c(s, a, zone), arch_(arch) { } virtual void setClient(Client* client) { assert(&c, c.client == 0); c.client = client; } virtual Architecture* arch() { return arch_; } virtual void saveFrame(unsigned stackOffset, unsigned baseOffset) { Register stack(rsp); Memory stackDst(rbx, stackOffset); apply(Move, BytesPerWord, RegisterOperand, &stack, BytesPerWord, MemoryOperand, &stackDst); Register base(rbp); Memory baseDst(rbx, baseOffset); apply(Move, BytesPerWord, RegisterOperand, &base, BytesPerWord, MemoryOperand, &baseDst); } virtual void pushFrame(unsigned argumentCount, ...) { struct { unsigned size; OperandType type; Operand* operand; } arguments[argumentCount]; va_list a; va_start(a, argumentCount); unsigned footprint = 0; for (unsigned i = 0; i < argumentCount; ++i) { arguments[i].size = va_arg(a, unsigned); arguments[i].type = static_cast(va_arg(a, int)); arguments[i].operand = va_arg(a, Operand*); footprint += ceiling(arguments[i].size, BytesPerWord); } va_end(a); allocateFrame(footprint); for (unsigned i = 0; i < argumentCount; ++i) { Memory dst(rsp, footprint); apply(Move, arguments[i].size, arguments[i].type, arguments[i].operand, pad(arguments[i].size), MemoryOperand, &dst); footprint -= ceiling(arguments[i].size, BytesPerWord); } } virtual void allocateFrame(unsigned footprint) { Register base(rbp); pushR(&c, BytesPerWord, &base); Register stack(rsp); apply(Move, BytesPerWord, RegisterOperand, &stack, BytesPerWord, RegisterOperand, &base); Constant footprintConstant (resolved(&c, arch_->alignFrameSize(footprint) * BytesPerWord)); apply(Subtract, BytesPerWord, ConstantOperand, &footprintConstant, BytesPerWord, RegisterOperand, &stack, BytesPerWord, RegisterOperand, &stack); } virtual void popFrame() { Register base(rbp); Register stack(rsp); apply(Move, BytesPerWord, RegisterOperand, &base, BytesPerWord, RegisterOperand, &stack); popR(&c, BytesPerWord, &base); } virtual void apply(Operation op) { arch_->c.operations[op](&c); } virtual void apply(UnaryOperation op, unsigned aSize, OperandType aType, Operand* aOperand) { arch_->c.unaryOperations[index(op, aType)](&c, aSize, aOperand); } virtual void apply(BinaryOperation op, unsigned aSize, OperandType aType, Operand* aOperand, unsigned bSize, OperandType bType, Operand* bOperand) { arch_->c.binaryOperations[index(op, aType, bType)] (&c, aSize, aOperand, bSize, bOperand); } virtual void apply(TernaryOperation op, unsigned aSize, OperandType aType, Operand* aOperand, unsigned bSize, OperandType bType, Operand* bOperand, unsigned cSize, OperandType cType, Operand* cOperand) { arch_->c.ternaryOperations[index(op, aType, bType, cType)] (&c, aSize, aOperand, bSize, bOperand, cSize, cOperand); } virtual void writeTo(uint8_t* dst) { c.result = dst; memcpy(dst, c.code.data, c.code.length()); for (Task* t = c.tasks; t; t = t->next) { t->run(&c); } } virtual Offset* offset() { // todo return 0; } virtual void endBlock() { // todo } virtual unsigned length() { return c.code.length(); } virtual void dispose() { c.code.dispose(); } Context c; MyArchitecture* arch_; }; } // namespace namespace vm { Assembler::Architecture* makeArchitecture(System* system) { return new (allocate(system, sizeof(MyArchitecture))) MyArchitecture(system); } Assembler* makeAssembler(System* system, Allocator* allocator, Zone* zone, Assembler::Architecture* architecture) { return new (zone->allocate(sizeof(MyAssembler))) MyAssembler(system, allocator, zone, static_cast(architecture)); } } // namespace vm