further break out x86 assembler

This commit is contained in:
Joshua Warner 2013-02-16 21:55:28 -07:00 committed by Joshua Warner
parent 61f03643e0
commit 984f987e03
15 changed files with 2674 additions and 2323 deletions

File diff suppressed because it is too large Load Diff

View File

@ -11,10 +11,6 @@
#include "codegen/x86/block.h"
#include "common.h"
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
namespace avian {
namespace codegen {
namespace x86 {

View File

@ -11,10 +11,6 @@
#include "codegen/x86/context.h"
#include "codegen/x86/block.h"
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
namespace avian {
namespace codegen {
namespace x86 {

View File

@ -11,6 +11,10 @@
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_CONTEXT_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_CONTEXT_H
#define CAST1(x) reinterpret_cast<UnaryOperationType>(x)
#define CAST2(x) reinterpret_cast<BinaryOperationType>(x)
#define CAST_BRANCH(x) reinterpret_cast<BranchOperationType>(x)
#include "codegen/lir.h"
#include "codegen/assembler.h"
#include "alloc-vector.h"

View File

@ -0,0 +1,40 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "codegen/x86/context.h"
#include "codegen/x86/block.h"
namespace avian {
namespace codegen {
namespace x86 {
extern "C" bool
detectFeature(unsigned ecx, unsigned edx);
bool useSSE(ArchitectureContext* c) {
if (vm::TargetBytesPerWord == 8) {
// amd64 implies SSE2 support
return true;
} else if (c->useNativeFeatures) {
static int supported = -1;
if (supported == -1) {
supported = detectFeature(0, 0x2000000) // SSE 1
and detectFeature(0, 0x4000000); // SSE 2
}
return supported;
} else {
return false;
}
}
} // namespace x86
} // namespace codegen
} // namespace avian

28
src/codegen/x86/detect.h Normal file
View File

@ -0,0 +1,28 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_DETECT_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_DETECT_H
#include "codegen/assembler.h"
namespace avian {
namespace codegen {
namespace x86 {
class ArchitectureContext;
bool useSSE(ArchitectureContext* c);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_DETECT_H

345
src/codegen/x86/encode.cpp Normal file
View File

@ -0,0 +1,345 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "target.h"
#include "codegen/x86/context.h"
#include "codegen/x86/encode.h"
#include "codegen/x86/registers.h"
#include "codegen/x86/fixup.h"
namespace avian {
namespace codegen {
namespace x86 {
#define REX_W 0x48
#define REX_R 0x44
#define REX_X 0x42
#define REX_B 0x41
#define REX_NONE 0x40
void maybeRex(Context* c, unsigned size, int a, int index, int base, bool always) {
if (vm::TargetBytesPerWord == 8) {
uint8_t byte;
if (size == 8) {
byte = REX_W;
} else {
byte = REX_NONE;
}
if (a != lir::NoRegister and (a & 8)) byte |= REX_R;
if (index != lir::NoRegister and (index & 8)) byte |= REX_X;
if (base != lir::NoRegister and (base & 8)) byte |= REX_B;
if (always or byte != REX_NONE) c->code.append(byte);
}
}
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b) {
maybeRex(c, size, a->low, lir::NoRegister, b->low, false);
}
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b) {
maybeRex(c, size, a->low, lir::NoRegister, b->low, true);
}
void maybeRex(Context* c, unsigned size, lir::Register* a) {
maybeRex(c, size, lir::NoRegister, lir::NoRegister, a->low, false);
}
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b) {
maybeRex(c, size, a->low, b->index, b->base, size == 1 and (a->low & 4));
}
void maybeRex(Context* c, unsigned size, lir::Memory* a) {
maybeRex(c, size, lir::NoRegister, a->index, a->base, false);
}
void modrm(Context* c, uint8_t mod, int a, int b) {
c->code.append(mod | (regCode(b) << 3) | regCode(a));
}
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b) {
modrm(c, mod, a->low, b->low);
}
void sib(Context* c, unsigned scale, int index, int base) {
c->code.append((vm::log(scale) << 6) | (regCode(index) << 3) | regCode(base));
}
void modrmSib(Context* c, int width, int a, int scale, int index, int base) {
if (index == lir::NoRegister) {
modrm(c, width, base, a);
if (regCode(base) == rsp) {
sib(c, 0x00, rsp, rsp);
}
} else {
modrm(c, width, rsp, a);
sib(c, scale, index, base);
}
}
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset) {
if (offset == 0 and regCode(base) != rbp) {
modrmSib(c, 0x00, a, scale, index, base);
} else if (vm::fitsInInt8(offset)) {
modrmSib(c, 0x40, a, scale, index, base);
c->code.append(offset);
} else {
modrmSib(c, 0x80, a, scale, index, base);
c->code.append4(offset);
}
}
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b) {
modrmSibImm(c, a->low, b->scale, b->index, b->base, b->offset);
}
void opcode(Context* c, uint8_t op) {
c->code.append(op);
}
void opcode(Context* c, uint8_t op1, uint8_t op2) {
c->code.append(op1);
c->code.append(op2);
}
void unconditional(Context* c, unsigned jump, lir::Constant* a) {
appendOffsetTask(c, a->value, offsetPromise(c), 5);
opcode(c, jump);
c->code.append4(0);
}
void conditional(Context* c, unsigned condition, lir::Constant* a) {
appendOffsetTask(c, a->value, offsetPromise(c), 6);
opcode(c, 0x0f, condition);
c->code.append4(0);
}
void sseMoveRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b)
{
assert(c, aSize >= 4);
assert(c, aSize == bSize);
if (isFloatReg(a) and isFloatReg(b)) {
if (aSize == 4) {
opcode(c, 0xf3);
maybeRex(c, 4, a, b);
opcode(c, 0x0f, 0x10);
modrm(c, 0xc0, a, b);
} else {
opcode(c, 0xf2);
maybeRex(c, 4, b, a);
opcode(c, 0x0f, 0x10);
modrm(c, 0xc0, a, b);
}
} else if (isFloatReg(a)) {
opcode(c, 0x66);
maybeRex(c, aSize, a, b);
opcode(c, 0x0f, 0x7e);
modrm(c, 0xc0, b, a);
} else {
opcode(c, 0x66);
maybeRex(c, aSize, b, a);
opcode(c, 0x0f, 0x6e);
modrm(c, 0xc0, a, b);
}
}
void sseMoveCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b)
{
assert(c, aSize <= vm::TargetBytesPerWord);
lir::Register tmp(c->client->acquireTemporary(GeneralRegisterMask));
moveCR2(c, aSize, a, aSize, &tmp, 0);
sseMoveRR(c, aSize, &tmp, bSize, b);
c->client->releaseTemporary(tmp.low);
}
void sseMoveMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b)
{
assert(c, aSize >= 4);
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
opcode(c, 0xf3);
opcode(c, 0x0f, 0x7e);
modrmSibImm(c, b, a);
} else {
opcode(c, 0x66);
maybeRex(c, aSize, b, a);
opcode(c, 0x0f, 0x6e);
modrmSibImm(c, b, a);
}
}
void sseMoveRM(Context* c, unsigned aSize, lir::Register* a,
UNUSED unsigned bSize, lir::Memory* b)
{
assert(c, aSize >= 4);
assert(c, aSize == bSize);
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
opcode(c, 0x66);
opcode(c, 0x0f, 0xd6);
modrmSibImm(c, a, b);
} else {
opcode(c, 0x66);
maybeRex(c, aSize, a, b);
opcode(c, 0x0f, 0x7e);
modrmSibImm(c, a, b);
}
}
void branch(Context* c, lir::TernaryOperation op, lir::Constant* target) {
switch (op) {
case lir::JumpIfEqual:
conditional(c, 0x84, target);
break;
case lir::JumpIfNotEqual:
conditional(c, 0x85, target);
break;
case lir::JumpIfLess:
conditional(c, 0x8c, target);
break;
case lir::JumpIfGreater:
conditional(c, 0x8f, target);
break;
case lir::JumpIfLessOrEqual:
conditional(c, 0x8e, target);
break;
case lir::JumpIfGreaterOrEqual:
conditional(c, 0x8d, target);
break;
default:
abort(c);
}
}
void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target) {
switch (op) {
case lir::JumpIfFloatEqual:
conditional(c, 0x84, target);
break;
case lir::JumpIfFloatNotEqual:
conditional(c, 0x85, target);
break;
case lir::JumpIfFloatLess:
conditional(c, 0x82, target);
break;
case lir::JumpIfFloatGreater:
conditional(c, 0x87, target);
break;
case lir::JumpIfFloatLessOrEqual:
conditional(c, 0x86, target);
break;
case lir::JumpIfFloatGreaterOrEqual:
conditional(c, 0x83, target);
break;
case lir::JumpIfFloatLessOrUnordered:
conditional(c, 0x82, target);
conditional(c, 0x8a, target);
break;
case lir::JumpIfFloatGreaterOrUnordered:
conditional(c, 0x87, target);
conditional(c, 0x8a, target);
break;
case lir::JumpIfFloatLessOrEqualOrUnordered:
conditional(c, 0x86, target);
conditional(c, 0x8a, target);
break;
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
conditional(c, 0x83, target);
conditional(c, 0x8a, target);
break;
default:
abort(c);
}
}
void floatRegOp(Context* c, unsigned aSize, lir::Register* a, unsigned bSize,
lir::Register* b, uint8_t op, uint8_t mod)
{
if (aSize == 4) {
opcode(c, 0xf3);
} else {
opcode(c, 0xf2);
}
maybeRex(c, bSize, b, a);
opcode(c, 0x0f, op);
modrm(c, mod, a, b);
}
void floatMemOp(Context* c, unsigned aSize, lir::Memory* a, unsigned bSize,
lir::Register* b, uint8_t op)
{
if (aSize == 4) {
opcode(c, 0xf3);
} else {
opcode(c, 0xf2);
}
maybeRex(c, bSize, b, a);
opcode(c, 0x0f, op);
modrmSibImm(c, b, a);
}
void moveCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void moveCR2(Context* c, UNUSED unsigned aSize, lir::Constant* a,
UNUSED unsigned bSize, lir::Register* b, unsigned promiseOffset)
{
if (vm::TargetBytesPerWord == 4 and bSize == 8) {
int64_t v = a->value->value();
ResolvedPromise high((v >> 32) & 0xFFFFFFFF);
lir::Constant ah(&high);
ResolvedPromise low(v & 0xFFFFFFFF);
lir::Constant al(&low);
lir::Register bh(b->high);
moveCR(c, 4, &al, 4, b);
moveCR(c, 4, &ah, 4, &bh);
} else {
maybeRex(c, vm::TargetBytesPerWord, b);
opcode(c, 0xb8 + regCode(b));
if (a->value->resolved()) {
c->code.appendTargetAddress(a->value->value());
} else {
appendImmediateTask
(c, a->value, offsetPromise(c), vm::TargetBytesPerWord, promiseOffset);
c->code.appendTargetAddress(static_cast<vm::target_uintptr_t>(0));
}
}
}
} // namespace x86
} // namespace codegen
} // namespace avian

93
src/codegen/x86/encode.h Normal file
View File

@ -0,0 +1,93 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_ENCODE_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_ENCODE_H
#include "codegen/assembler.h"
#include "codegen/x86/registers.h"
namespace avian {
namespace codegen {
namespace x86 {
void maybeRex(Context* c, unsigned size, int a, int index, int base, bool always);
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b);
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b);
void maybeRex(Context* c, unsigned size, lir::Register* a);
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b);
void maybeRex(Context* c, unsigned size, lir::Memory* a);
inline int regCode(int a) {
return a & 7;
}
inline int regCode(lir::Register* a) {
return regCode(a->low);
}
inline bool isFloatReg(lir::Register* a) {
return a->low >= xmm0;
}
void modrm(Context* c, uint8_t mod, int a, int b);
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b);
void sib(Context* c, unsigned scale, int index, int base);
void modrmSib(Context* c, int width, int a, int scale, int index, int base);
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset);
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b);
void opcode(Context* c, uint8_t op);
void opcode(Context* c, uint8_t op1, uint8_t op2);
void unconditional(Context* c, unsigned jump, lir::Constant* a);
void conditional(Context* c, unsigned condition, lir::Constant* a);
void sseMoveRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void sseMoveCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void sseMoveMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void sseMoveRM(Context* c, unsigned aSize, lir::Register* a,
UNUSED unsigned bSize, lir::Memory* b);
void branch(Context* c, lir::TernaryOperation op, lir::Constant* target);
void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target);
void floatRegOp(Context* c, unsigned aSize, lir::Register* a, unsigned bSize,
lir::Register* b, uint8_t op, uint8_t mod = 0xc0);
void floatMemOp(Context* c, unsigned aSize, lir::Memory* a, unsigned bSize,
lir::Register* b, uint8_t op);
void moveCR2(Context* c, UNUSED unsigned aSize, lir::Constant* a,
UNUSED unsigned bSize, lir::Register* b, unsigned promiseOffset);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_ENCODE_H

View File

@ -101,6 +101,62 @@ appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset,
c->tasks = task;
}
ImmediateListener::ImmediateListener(vm::System* s, void* dst, unsigned size, unsigned offset):
s(s), dst(dst), size(size), offset(offset)
{ }
void copy(vm::System* s, void* dst, int64_t src, unsigned size) {
switch (size) {
case 4: {
int32_t v = src;
memcpy(dst, &v, 4);
} break;
case 8: {
int64_t v = src;
memcpy(dst, &v, 8);
} break;
default: abort(s);
}
}
bool ImmediateListener::resolve(int64_t value, void** location) {
copy(s, dst, value, size);
if (location) *location = static_cast<uint8_t*>(dst) + offset;
return offset == 0;
}
ImmediateTask::ImmediateTask(Task* next, Promise* promise, Promise* offset, unsigned size,
unsigned promiseOffset):
Task(next),
promise(promise),
offset(offset),
size(size),
promiseOffset(promiseOffset)
{ }
void ImmediateTask::run(Context* c) {
if (promise->resolved()) {
copy(c->s, c->result + offset->value(), promise->value(), size);
} else {
new (promise->listen(sizeof(ImmediateListener))) ImmediateListener
(c->s, c->result + offset->value(), size, promiseOffset);
}
}
void
appendImmediateTask(Context* c, Promise* promise, Promise* offset,
unsigned size, unsigned promiseOffset)
{
c->tasks = new(c->zone) ImmediateTask
(c->tasks, promise, offset, size, promiseOffset);
}
ShiftMaskPromise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask) {
return new(c->zone) ShiftMaskPromise(base, shift, mask);
}
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -80,6 +80,37 @@ class OffsetTask: public Task {
void appendOffsetTask(Context* c, Promise* promise, Promise* instructionOffset, unsigned instructionSize);
class ImmediateListener: public Promise::Listener {
public:
ImmediateListener(vm::System* s, void* dst, unsigned size, unsigned offset);
virtual bool resolve(int64_t value, void** location);
vm::System* s;
void* dst;
unsigned size;
unsigned offset;
};
class ImmediateTask: public Task {
public:
ImmediateTask(Task* next, Promise* promise, Promise* offset, unsigned size,
unsigned promiseOffset);
virtual void run(Context* c);
Promise* promise;
Promise* offset;
unsigned size;
unsigned promiseOffset;
};
void
appendImmediateTask(Context* c, Promise* promise, Promise* offset,
unsigned size, unsigned promiseOffset = 0);
ShiftMaskPromise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask);
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,174 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "common.h"
#include "codegen/lir.h"
#include "codegen/x86/context.h"
#include "codegen/x86/multimethod.h"
#include "codegen/x86/operations.h"
namespace avian {
namespace codegen {
namespace x86 {
unsigned index(ArchitectureContext*, lir::UnaryOperation operation, lir::OperandType operand) {
return operation + (lir::UnaryOperationCount * operand);
}
unsigned index(ArchitectureContext*, lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2)
{
return operation
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) * operand1)
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * operand2);
}
unsigned index(ArchitectureContext* c UNUSED, lir::TernaryOperation operation,
lir::OperandType operand1, lir::OperandType operand2)
{
assert(c, not isBranch(operation));
return lir::BinaryOperationCount + operation
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) * operand1)
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
* lir::OperandTypeCount * operand2);
}
unsigned branchIndex(ArchitectureContext* c UNUSED, lir::OperandType operand1,
lir::OperandType operand2)
{
return operand1 + (lir::OperandTypeCount * operand2);
}
void populateTables(ArchitectureContext* c) {
const lir::OperandType C = lir::ConstantOperand;
const lir::OperandType A = lir::AddressOperand;
const lir::OperandType R = lir::RegisterOperand;
const lir::OperandType M = lir::MemoryOperand;
OperationType* zo = c->operations;
UnaryOperationType* uo = c->unaryOperations;
BinaryOperationType* bo = c->binaryOperations;
BranchOperationType* bro = c->branchOperations;
zo[lir::Return] = return_;
zo[lir::LoadBarrier] = ignore;
zo[lir::StoreStoreBarrier] = ignore;
zo[lir::StoreLoadBarrier] = storeLoadBarrier;
zo[lir::Trap] = trap;
uo[index(c, lir::Call, C)] = CAST1(callC);
uo[index(c, lir::Call, R)] = CAST1(callR);
uo[index(c, lir::Call, M)] = CAST1(callM);
uo[index(c, lir::AlignedCall, C)] = CAST1(alignedCallC);
uo[index(c, lir::LongCall, C)] = CAST1(longCallC);
uo[index(c, lir::AlignedLongCall, C)] = CAST1(alignedLongCallC);
uo[index(c, lir::Jump, R)] = CAST1(jumpR);
uo[index(c, lir::Jump, C)] = CAST1(jumpC);
uo[index(c, lir::Jump, M)] = CAST1(jumpM);
uo[index(c, lir::AlignedJump, C)] = CAST1(alignedJumpC);
uo[index(c, lir::LongJump, C)] = CAST1(longJumpC);
uo[index(c, lir::AlignedLongJump, C)] = CAST1(alignedLongJumpC);
bo[index(c, lir::Negate, R, R)] = CAST2(negateRR);
bo[index(c, lir::FloatNegate, R, R)] = CAST2(floatNegateRR);
bo[index(c, lir::Move, R, R)] = CAST2(moveRR);
bo[index(c, lir::Move, C, R)] = CAST2(moveCR);
bo[index(c, lir::Move, M, R)] = CAST2(moveMR);
bo[index(c, lir::Move, R, M)] = CAST2(moveRM);
bo[index(c, lir::Move, C, M)] = CAST2(moveCM);
bo[index(c, lir::Move, A, R)] = CAST2(moveAR);
bo[index(c, lir::FloatSquareRoot, R, R)] = CAST2(floatSqrtRR);
bo[index(c, lir::FloatSquareRoot, M, R)] = CAST2(floatSqrtMR);
bo[index(c, lir::MoveZ, R, R)] = CAST2(moveZRR);
bo[index(c, lir::MoveZ, M, R)] = CAST2(moveZMR);
bo[index(c, lir::MoveZ, C, R)] = CAST2(moveCR);
bo[index(c, lir::Add, R, R)] = CAST2(addRR);
bo[index(c, lir::Add, C, R)] = CAST2(addCR);
bo[index(c, lir::Subtract, C, R)] = CAST2(subtractCR);
bo[index(c, lir::Subtract, R, R)] = CAST2(subtractRR);
bo[index(c, lir::FloatAdd, R, R)] = CAST2(floatAddRR);
bo[index(c, lir::FloatAdd, M, R)] = CAST2(floatAddMR);
bo[index(c, lir::FloatSubtract, R, R)] = CAST2(floatSubtractRR);
bo[index(c, lir::FloatSubtract, M, R)] = CAST2(floatSubtractMR);
bo[index(c, lir::And, R, R)] = CAST2(andRR);
bo[index(c, lir::And, C, R)] = CAST2(andCR);
bo[index(c, lir::Or, R, R)] = CAST2(orRR);
bo[index(c, lir::Or, C, R)] = CAST2(orCR);
bo[index(c, lir::Xor, R, R)] = CAST2(xorRR);
bo[index(c, lir::Xor, C, R)] = CAST2(xorCR);
bo[index(c, lir::Multiply, R, R)] = CAST2(multiplyRR);
bo[index(c, lir::Multiply, C, R)] = CAST2(multiplyCR);
bo[index(c, lir::Divide, R, R)] = CAST2(divideRR);
bo[index(c, lir::FloatMultiply, R, R)] = CAST2(floatMultiplyRR);
bo[index(c, lir::FloatMultiply, M, R)] = CAST2(floatMultiplyMR);
bo[index(c, lir::FloatDivide, R, R)] = CAST2(floatDivideRR);
bo[index(c, lir::FloatDivide, M, R)] = CAST2(floatDivideMR);
bo[index(c, lir::Remainder, R, R)] = CAST2(remainderRR);
bo[index(c, lir::ShiftLeft, R, R)] = CAST2(shiftLeftRR);
bo[index(c, lir::ShiftLeft, C, R)] = CAST2(shiftLeftCR);
bo[index(c, lir::ShiftRight, R, R)] = CAST2(shiftRightRR);
bo[index(c, lir::ShiftRight, C, R)] = CAST2(shiftRightCR);
bo[index(c, lir::UnsignedShiftRight, R, R)] = CAST2(unsignedShiftRightRR);
bo[index(c, lir::UnsignedShiftRight, C, R)] = CAST2(unsignedShiftRightCR);
bo[index(c, lir::Float2Float, R, R)] = CAST2(float2FloatRR);
bo[index(c, lir::Float2Float, M, R)] = CAST2(float2FloatMR);
bo[index(c, lir::Float2Int, R, R)] = CAST2(float2IntRR);
bo[index(c, lir::Float2Int, M, R)] = CAST2(float2IntMR);
bo[index(c, lir::Int2Float, R, R)] = CAST2(int2FloatRR);
bo[index(c, lir::Int2Float, M, R)] = CAST2(int2FloatMR);
bo[index(c, lir::Absolute, R, R)] = CAST2(absoluteRR);
bo[index(c, lir::FloatAbsolute, R, R)] = CAST2(floatAbsoluteRR);
bro[branchIndex(c, R, R)] = CAST_BRANCH(branchRR);
bro[branchIndex(c, C, R)] = CAST_BRANCH(branchCR);
bro[branchIndex(c, C, M)] = CAST_BRANCH(branchCM);
bro[branchIndex(c, R, M)] = CAST_BRANCH(branchRM);
}
} // namespace x86
} // namespace codegen
} // namespace avian

View File

@ -0,0 +1,38 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_MULTIMETHOD_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_MULTIMETHOD_H
namespace avian {
namespace codegen {
namespace x86 {
class ArchitectureContext;
unsigned index(ArchitectureContext*, lir::UnaryOperation operation, lir::OperandType operand);
unsigned index(ArchitectureContext*, lir::BinaryOperation operation,
lir::OperandType operand1,
lir::OperandType operand2);
unsigned index(ArchitectureContext* c UNUSED, lir::TernaryOperation operation,
lir::OperandType operand1, lir::OperandType operand2);
unsigned branchIndex(ArchitectureContext* c UNUSED, lir::OperandType operand1,
lir::OperandType operand2);
void populateTables(ArchitectureContext* c);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_MULTIMETHOD_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,261 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_OPERATIONS_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_OPERATIONS_H
namespace avian {
namespace codegen {
namespace x86 {
void return_(Context* c);
void trap(Context* c);
void ignore(Context*);
void storeLoadBarrier(Context* c);
void callC(Context* c, unsigned size UNUSED, lir::Constant* a);
void longCallC(Context* c, unsigned size, lir::Constant* a);
void jumpR(Context* c, unsigned size UNUSED, lir::Register* a);
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* a);
void jumpM(Context* c, unsigned size UNUSED, lir::Memory* a);
void longJumpC(Context* c, unsigned size, lir::Constant* a);
void callR(Context* c, unsigned size UNUSED, lir::Register* a);
void callM(Context* c, unsigned size UNUSED, lir::Memory* a);
void alignedCallC(Context* c, unsigned size, lir::Constant* a);
void alignedLongCallC(Context* c, unsigned size, lir::Constant* a);
void alignedJumpC(Context* c, unsigned size, lir::Constant* a);
void alignedLongJumpC(Context* c, unsigned size, lir::Constant* a);
void pushR(Context* c, unsigned size, lir::Register* a);
void popR(Context* c, unsigned size, lir::Register* a);
void negateR(Context* c, unsigned size, lir::Register* a);
void negateRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b UNUSED);
void moveCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void swapRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void moveRR(Context* c, unsigned aSize, lir::Register* a,
UNUSED unsigned bSize, lir::Register* b);
void moveMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize, lir::Register* b);
void moveRM(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Memory* b);
void moveAR(Context* c, unsigned aSize, lir::Address* a,
unsigned bSize, lir::Register* b);
void moveCM(Context* c, unsigned aSize UNUSED, lir::Constant* a,
unsigned bSize, lir::Memory* b);
void moveZRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void moveZMR(Context* c, unsigned aSize UNUSED, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void addCarryRR(Context* c, unsigned size, lir::Register* a,
lir::Register* b);
void addRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void addCarryCR(Context* c, unsigned size, lir::Constant* a,
lir::Register* b);
void addCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void subtractBorrowCR(Context* c, unsigned size UNUSED, lir::Constant* a,
lir::Register* b);
void subtractCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void subtractBorrowRR(Context* c, unsigned size, lir::Register* a,
lir::Register* b);
void subtractRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void andRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void andCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void orRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void orCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void xorRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void xorCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void multiplyRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void compareCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void compareRM(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Memory* b);
void compareCM(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Memory* b);
void compareFloatRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void branchLong(Context* c, lir::TernaryOperation op, lir::Operand* al,
lir::Operand* ah, lir::Operand* bl,
lir::Operand* bh, lir::Constant* target,
BinaryOperationType compare);
void branchRR(Context* c, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Register* b,
lir::Constant* target);
void branchCR(Context* c, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Register* b,
lir::Constant* target);
void branchRM(Context* c, lir::TernaryOperation op, unsigned size,
lir::Register* a, lir::Memory* b,
lir::Constant* target);
void branchCM(Context* c, lir::TernaryOperation op, unsigned size,
lir::Constant* a, lir::Memory* b,
lir::Constant* target);
void multiplyCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void divideRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b UNUSED);
void remainderRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void doShift(Context* c, UNUSED void (*shift)
(Context*, unsigned, lir::Register*, unsigned,
lir::Register*),
int type, UNUSED unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void shiftLeftRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void shiftLeftCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void shiftRightRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void shiftRightCR(Context* c, unsigned aSize, lir::Constant* a,
unsigned bSize, lir::Register* b);
void unsignedShiftRightRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void unsignedShiftRightCR(Context* c, unsigned aSize UNUSED, lir::Constant* a,
unsigned bSize, lir::Register* b);
void floatSqrtRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatSqrtMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void floatAddRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatAddMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void floatSubtractRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatSubtractMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void floatMultiplyRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatMultiplyMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void floatDivideRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatDivideMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void float2FloatRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void float2FloatMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize UNUSED, lir::Register* b);
void float2IntRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void float2IntMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize, lir::Register* b);
void int2FloatRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize, lir::Register* b);
void int2FloatMR(Context* c, unsigned aSize, lir::Memory* a,
unsigned bSize, lir::Register* b);
void floatNegateRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void floatAbsoluteRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b);
void absoluteRR(Context* c, unsigned aSize, lir::Register* a,
unsigned bSize UNUSED, lir::Register* b UNUSED);
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_OPERATIONS_H

View File

@ -0,0 +1,67 @@
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#ifndef AVIAN_CODEGEN_ASSEMBLER_X86_REGISTERS_H
#define AVIAN_CODEGEN_ASSEMBLER_X86_REGISTERS_H
namespace avian {
namespace codegen {
namespace x86 {
enum {
rax = 0,
rcx = 1,
rdx = 2,
rbx = 3,
rsp = 4,
rbp = 5,
rsi = 6,
rdi = 7,
r8 = 8,
r9 = 9,
r10 = 10,
r11 = 11,
r12 = 12,
r13 = 13,
r14 = 14,
r15 = 15,
};
enum {
xmm0 = r15 + 1,
xmm1,
xmm2,
xmm3,
xmm4,
xmm5,
xmm6,
xmm7,
xmm8,
xmm9,
xmm10,
xmm11,
xmm12,
xmm13,
xmm14,
xmm15,
};
const int LongJumpRegister = r10;
const unsigned GeneralRegisterMask = vm::TargetBytesPerWord == 4 ? 0x000000ff : 0x0000ffff;
const unsigned FloatRegisterMask = vm::TargetBytesPerWord == 4 ? 0x00ff0000 : 0xffff0000;
} // namespace x86
} // namespace codegen
} // namespace avian
#endif // AVIAN_CODEGEN_ASSEMBLER_X86_REGISTERS_H