mirror of
https://github.com/corda/corda.git
synced 2025-01-03 19:54:13 +00:00
Merge pull request #377 from joshuawarner32/expand-register-width
Refactoring to allow up to 64 registers
This commit is contained in:
commit
0f426e01e5
@ -12,6 +12,7 @@
|
||||
#define AVIAN_CODEGEN_ARCHITECTURE_H
|
||||
|
||||
#include "ir.h"
|
||||
#include "registers.h"
|
||||
|
||||
namespace vm {
|
||||
class Zone;
|
||||
@ -32,16 +33,26 @@ class RegisterFile;
|
||||
class OperandMask {
|
||||
public:
|
||||
uint8_t typeMask;
|
||||
uint64_t registerMask;
|
||||
RegisterMask lowRegisterMask;
|
||||
RegisterMask highRegisterMask;
|
||||
|
||||
OperandMask(uint8_t typeMask, uint64_t registerMask)
|
||||
: typeMask(typeMask), registerMask(registerMask)
|
||||
OperandMask(uint8_t typeMask,
|
||||
RegisterMask lowRegisterMask,
|
||||
RegisterMask highRegisterMask)
|
||||
: typeMask(typeMask),
|
||||
lowRegisterMask(lowRegisterMask),
|
||||
highRegisterMask(highRegisterMask)
|
||||
{
|
||||
}
|
||||
|
||||
OperandMask() : typeMask(~0), registerMask(~static_cast<uint64_t>(0))
|
||||
OperandMask() : typeMask(~0), lowRegisterMask(AnyRegisterMask), highRegisterMask(AnyRegisterMask)
|
||||
{
|
||||
}
|
||||
|
||||
void setLowHighRegisterMasks(RegisterMask lowRegisterMask, RegisterMask highRegisterMask) {
|
||||
this->lowRegisterMask = lowRegisterMask;
|
||||
this->highRegisterMask = highRegisterMask;
|
||||
}
|
||||
};
|
||||
|
||||
class Architecture {
|
||||
@ -50,13 +61,13 @@ class Architecture {
|
||||
|
||||
virtual const RegisterFile* registerFile() = 0;
|
||||
|
||||
virtual int scratch() = 0;
|
||||
virtual int stack() = 0;
|
||||
virtual int thread() = 0;
|
||||
virtual int returnLow() = 0;
|
||||
virtual int returnHigh() = 0;
|
||||
virtual int virtualCallTarget() = 0;
|
||||
virtual int virtualCallIndex() = 0;
|
||||
virtual Register scratch() = 0;
|
||||
virtual Register stack() = 0;
|
||||
virtual Register thread() = 0;
|
||||
virtual Register returnLow() = 0;
|
||||
virtual Register returnHigh() = 0;
|
||||
virtual Register virtualCallTarget() = 0;
|
||||
virtual Register virtualCallIndex() = 0;
|
||||
|
||||
virtual ir::TargetInfo targetInfo() = 0;
|
||||
|
||||
@ -67,14 +78,14 @@ class Architecture {
|
||||
virtual bool alwaysCondensed(lir::BinaryOperation op) = 0;
|
||||
virtual bool alwaysCondensed(lir::TernaryOperation op) = 0;
|
||||
|
||||
virtual bool reserved(int register_) = 0;
|
||||
virtual bool reserved(Register register_) = 0;
|
||||
|
||||
virtual unsigned frameFootprint(unsigned footprint) = 0;
|
||||
virtual unsigned argumentFootprint(unsigned footprint) = 0;
|
||||
virtual bool argumentAlignment() = 0;
|
||||
virtual bool argumentRegisterAlignment() = 0;
|
||||
virtual unsigned argumentRegisterCount() = 0;
|
||||
virtual int argumentRegister(unsigned index) = 0;
|
||||
virtual Register argumentRegister(unsigned index) = 0;
|
||||
|
||||
virtual bool hasLinkRegister() = 0;
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "avian/zone.h"
|
||||
|
||||
#include <avian/codegen/lir.h>
|
||||
#include <avian/codegen/registers.h>
|
||||
#include <avian/codegen/promise.h>
|
||||
|
||||
namespace avian {
|
||||
@ -25,11 +26,11 @@ class Architecture;
|
||||
class OperandInfo {
|
||||
public:
|
||||
const unsigned size;
|
||||
const lir::OperandType type;
|
||||
const lir::Operand::Type type;
|
||||
lir::Operand* const operand;
|
||||
|
||||
inline OperandInfo(unsigned size,
|
||||
lir::OperandType type,
|
||||
lir::Operand::Type type,
|
||||
lir::Operand* operand)
|
||||
: size(size), type(type), operand(operand)
|
||||
{
|
||||
@ -52,10 +53,10 @@ class Assembler {
|
||||
public:
|
||||
class Client {
|
||||
public:
|
||||
virtual int acquireTemporary(uint32_t mask = ~static_cast<uint32_t>(0)) = 0;
|
||||
virtual void releaseTemporary(int r) = 0;
|
||||
virtual Register acquireTemporary(RegisterMask mask = AnyRegisterMask) = 0;
|
||||
virtual void releaseTemporary(Register r) = 0;
|
||||
|
||||
virtual void save(int r) = 0;
|
||||
virtual void save(Register r) = 0;
|
||||
};
|
||||
|
||||
class Block {
|
||||
@ -76,8 +77,8 @@ class Assembler {
|
||||
virtual void popFrame(unsigned footprint) = 0;
|
||||
virtual void popFrameForTailCall(unsigned footprint,
|
||||
int offset,
|
||||
int returnAddressSurrogate,
|
||||
int framePointerSurrogate) = 0;
|
||||
Register returnAddressSurrogate,
|
||||
Register framePointerSurrogate) = 0;
|
||||
virtual void popFrameAndPopArgumentsAndReturn(unsigned frameFootprint,
|
||||
unsigned argumentFootprint) = 0;
|
||||
virtual void popFrameAndUpdateStackAndReturn(unsigned frameFootprint,
|
||||
|
@ -11,6 +11,8 @@
|
||||
#ifndef AVIAN_CODEGEN_LIR_H
|
||||
#define AVIAN_CODEGEN_LIR_H
|
||||
|
||||
#include <avian/codegen/registers.h>
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
class Promise;
|
||||
@ -79,19 +81,8 @@ const unsigned NonBranchTernaryOperationCount = FloatMin + 1;
|
||||
const unsigned BranchOperationCount = JumpIfFloatGreaterOrEqualOrUnordered
|
||||
- FloatMin;
|
||||
|
||||
enum OperandType {
|
||||
ConstantOperand,
|
||||
AddressOperand,
|
||||
RegisterOperand,
|
||||
MemoryOperand
|
||||
};
|
||||
|
||||
enum ValueType { ValueGeneral, ValueFloat };
|
||||
|
||||
const unsigned OperandTypeCount = MemoryOperand + 1;
|
||||
|
||||
const int NoRegister = -1;
|
||||
|
||||
inline bool isBranch(lir::TernaryOperation op)
|
||||
{
|
||||
return op > FloatMin;
|
||||
@ -128,6 +119,21 @@ inline bool isFloatUnaryOp(lir::BinaryOperation op)
|
||||
}
|
||||
|
||||
class Operand {
|
||||
public:
|
||||
|
||||
enum class Type {
|
||||
Constant,
|
||||
Address,
|
||||
RegisterPair,
|
||||
Memory
|
||||
};
|
||||
|
||||
const static unsigned TypeCount = (unsigned)Type::Memory + 1;
|
||||
|
||||
const static unsigned ConstantMask = 1 << (unsigned)Type::Constant;
|
||||
const static unsigned AddressMask = 1 << (unsigned)Type::Address;
|
||||
const static unsigned RegisterPairMask = 1 << (unsigned)Type::RegisterPair;
|
||||
const static unsigned MemoryMask = 1 << (unsigned)Type::Memory;
|
||||
};
|
||||
|
||||
class Constant : public Operand {
|
||||
@ -148,26 +154,26 @@ class Address : public Operand {
|
||||
Promise* address;
|
||||
};
|
||||
|
||||
class Register : public Operand {
|
||||
class RegisterPair : public Operand {
|
||||
public:
|
||||
Register(int low, int high = NoRegister) : low(low), high(high)
|
||||
RegisterPair(Register low, Register high = NoRegister) : low(low), high(high)
|
||||
{
|
||||
}
|
||||
|
||||
int low;
|
||||
int high;
|
||||
Register low;
|
||||
Register high;
|
||||
};
|
||||
|
||||
class Memory : public Operand {
|
||||
public:
|
||||
Memory(int base, int offset, int index = NoRegister, unsigned scale = 1)
|
||||
Memory(Register base, int offset, Register index = NoRegister, unsigned scale = 1)
|
||||
: base(base), offset(offset), index(index), scale(scale)
|
||||
{
|
||||
}
|
||||
|
||||
int base;
|
||||
Register base;
|
||||
int offset;
|
||||
int index;
|
||||
Register index;
|
||||
unsigned scale;
|
||||
};
|
||||
|
||||
|
@ -16,28 +16,189 @@
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
|
||||
class RegisterMask {
|
||||
public:
|
||||
uint32_t mask;
|
||||
uint8_t start;
|
||||
uint8_t limit;
|
||||
class RegisterMask;
|
||||
|
||||
static unsigned maskStart(uint32_t mask);
|
||||
static unsigned maskLimit(uint32_t mask);
|
||||
class Register {
|
||||
private:
|
||||
int8_t _index;
|
||||
public:
|
||||
explicit constexpr Register(int8_t _index) : _index(_index) {}
|
||||
constexpr Register() : _index(-1) {}
|
||||
|
||||
inline RegisterMask(uint32_t mask)
|
||||
: mask(mask), start(maskStart(mask)), limit(maskLimit(mask))
|
||||
{
|
||||
constexpr bool operator == (Register o) const {
|
||||
return _index == o._index;
|
||||
}
|
||||
|
||||
constexpr bool operator != (Register o) const {
|
||||
return !(*this == o);
|
||||
}
|
||||
|
||||
constexpr RegisterMask operator | (Register o) const;
|
||||
|
||||
constexpr bool operator < (Register o) const {
|
||||
return _index < o._index;
|
||||
}
|
||||
|
||||
constexpr bool operator > (Register o) const {
|
||||
return _index > o._index;
|
||||
}
|
||||
|
||||
constexpr bool operator <= (Register o) const {
|
||||
return _index <= o._index;
|
||||
}
|
||||
|
||||
constexpr bool operator >= (Register o) const {
|
||||
return _index >= o._index;
|
||||
}
|
||||
|
||||
constexpr int index() const {
|
||||
return _index;
|
||||
}
|
||||
};
|
||||
|
||||
constexpr Register NoRegister;
|
||||
|
||||
class RegisterMask {
|
||||
private:
|
||||
uint64_t mask;
|
||||
|
||||
static constexpr unsigned maskStart(uint64_t mask, unsigned offset = 64) {
|
||||
return mask == 0 ? (offset & 63) : maskStart(mask << 1, offset - 1);
|
||||
}
|
||||
|
||||
static constexpr unsigned maskLimit(uint64_t mask, unsigned offset = 0) {
|
||||
return mask == 0 ? offset : maskLimit(mask >> 1, offset + 1);
|
||||
}
|
||||
public:
|
||||
constexpr RegisterMask(uint64_t mask) : mask(mask) {}
|
||||
constexpr RegisterMask() : mask(0) {}
|
||||
constexpr RegisterMask(Register reg) : mask(static_cast<uint64_t>(1) << reg.index()) {}
|
||||
|
||||
constexpr unsigned begin() const {
|
||||
return maskStart(mask);
|
||||
}
|
||||
|
||||
constexpr unsigned end() const {
|
||||
return maskLimit(mask);
|
||||
}
|
||||
|
||||
constexpr RegisterMask operator &(RegisterMask o) const {
|
||||
return RegisterMask(mask & o.mask);
|
||||
}
|
||||
|
||||
RegisterMask operator &=(RegisterMask o) {
|
||||
mask &= o.mask;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr RegisterMask operator |(RegisterMask o) const {
|
||||
return RegisterMask(mask | o.mask);
|
||||
}
|
||||
|
||||
constexpr bool contains(Register reg) const {
|
||||
return (mask & (static_cast<uint64_t>(1) << reg.index())) != 0;
|
||||
}
|
||||
|
||||
constexpr bool containsExactly(Register reg) const {
|
||||
return mask == (mask & (static_cast<uint64_t>(1) << reg.index()));
|
||||
}
|
||||
|
||||
constexpr RegisterMask excluding(Register reg) const {
|
||||
return RegisterMask(mask & ~(static_cast<uint64_t>(1) << reg.index()));
|
||||
}
|
||||
|
||||
constexpr RegisterMask including(Register reg) const {
|
||||
return RegisterMask(mask | (static_cast<uint64_t>(1) << reg.index()));
|
||||
}
|
||||
|
||||
constexpr explicit operator uint64_t() const {
|
||||
return mask;
|
||||
}
|
||||
|
||||
constexpr explicit operator bool() const {
|
||||
return mask != 0;
|
||||
}
|
||||
};
|
||||
|
||||
constexpr RegisterMask AnyRegisterMask(~static_cast<uint64_t>(0));
|
||||
constexpr RegisterMask NoneRegisterMask(0);
|
||||
|
||||
constexpr RegisterMask Register::operator | (Register o) const {
|
||||
return RegisterMask(*this) | o;
|
||||
}
|
||||
|
||||
class RegisterIterator;
|
||||
|
||||
class BoundedRegisterMask : public RegisterMask {
|
||||
public:
|
||||
uint8_t start;
|
||||
uint8_t limit;
|
||||
|
||||
BoundedRegisterMask(RegisterMask mask)
|
||||
: RegisterMask(mask), start(mask.begin()), limit(mask.end())
|
||||
{
|
||||
}
|
||||
|
||||
RegisterIterator begin() const;
|
||||
|
||||
RegisterIterator end() const;
|
||||
};
|
||||
|
||||
class RegisterIterator {
|
||||
public:
|
||||
int index;
|
||||
int direction;
|
||||
int limit;
|
||||
const RegisterMask mask;
|
||||
|
||||
RegisterIterator(int index, int direction, int limit, RegisterMask mask)
|
||||
: index(index), direction(direction), limit(limit), mask(mask)
|
||||
{
|
||||
}
|
||||
|
||||
bool operator !=(const RegisterIterator& o) const {
|
||||
return index != o.index;
|
||||
}
|
||||
|
||||
Register operator *() {
|
||||
return Register(index);
|
||||
}
|
||||
|
||||
void operator ++ () {
|
||||
if(index != limit) {
|
||||
index += direction;
|
||||
}
|
||||
while(index != limit && !mask.contains(Register(index))) {
|
||||
index += direction;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
inline RegisterIterator BoundedRegisterMask::begin() const {
|
||||
// We use reverse iteration... for some reason.
|
||||
return RegisterIterator(limit - 1, -1, start - 1, *this);
|
||||
}
|
||||
|
||||
inline RegisterIterator BoundedRegisterMask::end() const {
|
||||
// We use reverse iteration... for some reason.
|
||||
return RegisterIterator(start - 1, -1, start - 1, *this);
|
||||
}
|
||||
|
||||
inline RegisterIterator begin(BoundedRegisterMask mask) {
|
||||
return mask.begin();
|
||||
}
|
||||
|
||||
inline RegisterIterator end(BoundedRegisterMask mask) {
|
||||
return mask.end();
|
||||
}
|
||||
|
||||
class RegisterFile {
|
||||
public:
|
||||
RegisterMask allRegisters;
|
||||
RegisterMask generalRegisters;
|
||||
RegisterMask floatRegisters;
|
||||
BoundedRegisterMask allRegisters;
|
||||
BoundedRegisterMask generalRegisters;
|
||||
BoundedRegisterMask floatRegisters;
|
||||
|
||||
inline RegisterFile(uint32_t generalRegisterMask, uint32_t floatRegisterMask)
|
||||
RegisterFile(RegisterMask generalRegisterMask, RegisterMask floatRegisterMask)
|
||||
: allRegisters(generalRegisterMask | floatRegisterMask),
|
||||
generalRegisters(generalRegisterMask),
|
||||
floatRegisters(floatRegisterMask)
|
||||
@ -45,31 +206,6 @@ class RegisterFile {
|
||||
}
|
||||
};
|
||||
|
||||
class RegisterIterator {
|
||||
public:
|
||||
int index;
|
||||
const RegisterMask& mask;
|
||||
|
||||
inline RegisterIterator(const RegisterMask& mask)
|
||||
: index(mask.start), mask(mask)
|
||||
{
|
||||
}
|
||||
|
||||
inline bool hasNext()
|
||||
{
|
||||
return index < mask.limit;
|
||||
}
|
||||
|
||||
inline int next()
|
||||
{
|
||||
int r = index;
|
||||
do {
|
||||
index++;
|
||||
} while (index < mask.limit && !(mask.mask & (1 << index)));
|
||||
return r;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
||||
|
||||
|
1
makefile
1
makefile
@ -1223,7 +1223,6 @@ compiler-sources = \
|
||||
$(src)/codegen/compiler.cpp \
|
||||
$(wildcard $(src)/codegen/compiler/*.cpp) \
|
||||
$(src)/debug-util.cpp \
|
||||
$(src)/codegen/registers.cpp \
|
||||
$(src)/codegen/runtime.cpp \
|
||||
$(src)/codegen/targets.cpp \
|
||||
$(src)/util/fixed-allocator.cpp
|
||||
|
@ -1,6 +1,5 @@
|
||||
add_library (avian_codegen
|
||||
compiler.cpp
|
||||
registers.cpp
|
||||
runtime.cpp
|
||||
targets.cpp
|
||||
|
||||
|
@ -256,10 +256,10 @@ Site* pickTargetSite(Context* c,
|
||||
|
||||
expect(c, target.cost < Target::Impossible);
|
||||
|
||||
if (target.type == lir::MemoryOperand) {
|
||||
if (target.type == lir::Operand::Type::Memory) {
|
||||
return frameSite(c, target.index);
|
||||
} else {
|
||||
return registerSite(c, target.index);
|
||||
return registerSite(c, Register(target.index));
|
||||
}
|
||||
}
|
||||
|
||||
@ -342,7 +342,7 @@ Site* maybeMove(Context* c,
|
||||
OperandMask src;
|
||||
OperandMask tmp;
|
||||
c->arch->planMove(
|
||||
size, src, tmp, OperandMask(dstMask.typeMask, dstMask.registerMask));
|
||||
size, src, tmp, OperandMask(dstMask.typeMask, dstMask.registerMask, 0));
|
||||
|
||||
SiteMask srcMask = SiteMask::lowPart(src);
|
||||
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
|
||||
@ -369,7 +369,7 @@ Site* maybeMove(Context* c,
|
||||
size,
|
||||
src,
|
||||
tmp,
|
||||
OperandMask(1 << dstSite->type(c), dstSite->registerMask(c)));
|
||||
OperandMask(1 << (unsigned)dstSite->type(c), dstSite->registerMask(c), 0));
|
||||
|
||||
SiteMask srcMask = SiteMask::lowPart(src);
|
||||
unsigned cost = 0xFFFFFFFF;
|
||||
@ -514,15 +514,15 @@ void steal(Context* c, Resource* r, Value* thief)
|
||||
|
||||
SiteMask generalRegisterMask(Context* c)
|
||||
{
|
||||
return SiteMask(1 << lir::RegisterOperand,
|
||||
c->regFile->generalRegisters.mask,
|
||||
return SiteMask(lir::Operand::RegisterPairMask,
|
||||
c->regFile->generalRegisters,
|
||||
NoFrameIndex);
|
||||
}
|
||||
|
||||
SiteMask generalRegisterOrConstantMask(Context* c)
|
||||
{
|
||||
return SiteMask((1 << lir::RegisterOperand) | (1 << lir::ConstantOperand),
|
||||
c->regFile->generalRegisters.mask,
|
||||
return SiteMask(lir::Operand::RegisterPairMask | lir::Operand::ConstantMask,
|
||||
c->regFile->generalRegisters,
|
||||
NoFrameIndex);
|
||||
}
|
||||
|
||||
@ -616,11 +616,11 @@ bool isHome(Value* v, int frameIndex)
|
||||
bool acceptForResolve(Context* c, Site* s, Read* read, const SiteMask& mask)
|
||||
{
|
||||
if (acceptMatch(c, s, read, mask) and (not s->frozen(c))) {
|
||||
if (s->type(c) == lir::RegisterOperand) {
|
||||
if (s->type(c) == lir::Operand::Type::RegisterPair) {
|
||||
return c->availableGeneralRegisterCount > ResolveRegisterReserveCount;
|
||||
} else {
|
||||
assertT(c,
|
||||
s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex)));
|
||||
s->match(c, SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex)));
|
||||
|
||||
return isHome(read->value,
|
||||
offsetToFrameIndex(c, static_cast<MemorySite*>(s)->offset));
|
||||
@ -698,7 +698,7 @@ void apply(Context* c,
|
||||
{
|
||||
assertT(c, s1Low->type(c) == s1High->type(c));
|
||||
|
||||
lir::OperandType s1Type = s1Low->type(c);
|
||||
lir::Operand::Type s1Type = s1Low->type(c);
|
||||
OperandUnion s1Union;
|
||||
asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
||||
|
||||
@ -717,11 +717,11 @@ void apply(Context* c,
|
||||
assertT(c, s1Low->type(c) == s1High->type(c));
|
||||
assertT(c, s2Low->type(c) == s2High->type(c));
|
||||
|
||||
lir::OperandType s1Type = s1Low->type(c);
|
||||
lir::Operand::Type s1Type = s1Low->type(c);
|
||||
OperandUnion s1Union;
|
||||
asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
||||
|
||||
lir::OperandType s2Type = s2Low->type(c);
|
||||
lir::Operand::Type s2Type = s2Low->type(c);
|
||||
OperandUnion s2Union;
|
||||
asAssemblerOperand(c, s2Low, s2High, &s2Union);
|
||||
|
||||
@ -746,15 +746,15 @@ void apply(Context* c,
|
||||
assertT(c, s2Low->type(c) == s2High->type(c));
|
||||
assertT(c, s3Low->type(c) == s3High->type(c));
|
||||
|
||||
lir::OperandType s1Type = s1Low->type(c);
|
||||
lir::Operand::Type s1Type = s1Low->type(c);
|
||||
OperandUnion s1Union;
|
||||
asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
||||
|
||||
lir::OperandType s2Type = s2Low->type(c);
|
||||
lir::Operand::Type s2Type = s2Low->type(c);
|
||||
OperandUnion s2Union;
|
||||
asAssemblerOperand(c, s2Low, s2High, &s2Union);
|
||||
|
||||
lir::OperandType s3Type = s3Low->type(c);
|
||||
lir::Operand::Type s3Type = s3Low->type(c);
|
||||
OperandUnion s3Union;
|
||||
asAssemblerOperand(c, s3Low, s3High, &s3Union);
|
||||
|
||||
@ -782,7 +782,7 @@ void saveLocals(Context* c, Event* e)
|
||||
e->addRead(
|
||||
c,
|
||||
local->value,
|
||||
SiteMask(1 << lir::MemoryOperand, 0, compiler::frameIndex(c, li)));
|
||||
SiteMask(lir::Operand::MemoryMask, 0, compiler::frameIndex(c, li)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -815,10 +815,10 @@ void maybeMove(Context* c,
|
||||
|
||||
if (cost) {
|
||||
// todo: let c->arch->planMove decide this:
|
||||
bool useTemporary = ((target->type(c) == lir::MemoryOperand
|
||||
and srcValue->source->type(c) == lir::MemoryOperand)
|
||||
bool useTemporary = ((target->type(c) == lir::Operand::Type::Memory
|
||||
and srcValue->source->type(c) == lir::Operand::Type::Memory)
|
||||
or (srcSelectSize < dstSize
|
||||
and target->type(c) != lir::RegisterOperand));
|
||||
and target->type(c) != lir::Operand::Type::RegisterPair));
|
||||
|
||||
srcValue->source->freeze(c, srcValue);
|
||||
|
||||
@ -827,7 +827,7 @@ void maybeMove(Context* c,
|
||||
srcValue->source->thaw(c, srcValue);
|
||||
|
||||
bool addOffset = srcSize != srcSelectSize and c->arch->bigEndian()
|
||||
and srcValue->source->type(c) == lir::MemoryOperand;
|
||||
and srcValue->source->type(c) == lir::Operand::Type::Memory;
|
||||
|
||||
if (addOffset) {
|
||||
static_cast<MemorySite*>(srcValue->source)->offset
|
||||
@ -874,14 +874,14 @@ void maybeMove(Context* c,
|
||||
c->arch->planSource(op, dstSize, src, dstSize, &thunk);
|
||||
|
||||
if (isGeneralValue(srcValue)) {
|
||||
src.registerMask &= c->regFile->generalRegisters.mask;
|
||||
src.lowRegisterMask &= c->regFile->generalRegisters;
|
||||
}
|
||||
|
||||
assertT(c, thunk == 0);
|
||||
assertT(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand));
|
||||
assertT(c, dstMask.typeMask & src.typeMask & lir::Operand::RegisterPairMask);
|
||||
|
||||
Site* tmpTarget
|
||||
= freeRegisterSite(c, dstMask.registerMask & src.registerMask);
|
||||
= freeRegisterSite(c, dstMask.registerMask & src.lowRegisterMask);
|
||||
|
||||
srcValue->source->freeze(c, srcValue);
|
||||
|
||||
@ -1635,8 +1635,8 @@ bool resolveSourceSites(Context* c,
|
||||
Read* r = live(c, v);
|
||||
|
||||
if (r and sites[el.localIndex] == 0) {
|
||||
SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand),
|
||||
c->regFile->generalRegisters.mask,
|
||||
SiteMask mask(lir::Operand::RegisterPairMask | lir::Operand::MemoryMask,
|
||||
c->regFile->generalRegisters,
|
||||
AnyFrameIndex);
|
||||
|
||||
Site* s = pickSourceSite(
|
||||
@ -1677,8 +1677,8 @@ void resolveTargetSites(Context* c,
|
||||
Read* r = live(c, v);
|
||||
|
||||
if (r and sites[el.localIndex] == 0) {
|
||||
SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand),
|
||||
c->regFile->generalRegisters.mask,
|
||||
SiteMask mask(lir::Operand::RegisterPairMask | lir::Operand::MemoryMask,
|
||||
c->regFile->generalRegisters,
|
||||
AnyFrameIndex);
|
||||
|
||||
Site* s = pickSourceSite(
|
||||
@ -2210,24 +2210,24 @@ class Client : public Assembler::Client {
|
||||
{
|
||||
}
|
||||
|
||||
virtual int acquireTemporary(uint32_t mask)
|
||||
virtual Register acquireTemporary(RegisterMask mask)
|
||||
{
|
||||
unsigned cost;
|
||||
int r = pickRegisterTarget(c, 0, mask, &cost);
|
||||
Register r = pickRegisterTarget(c, 0, mask, &cost);
|
||||
expect(c, cost < Target::Impossible);
|
||||
save(r);
|
||||
c->registerResources[r].increment(c);
|
||||
c->registerResources[r.index()].increment(c);
|
||||
return r;
|
||||
}
|
||||
|
||||
virtual void releaseTemporary(int r)
|
||||
virtual void releaseTemporary(Register r)
|
||||
{
|
||||
c->registerResources[r].decrement(c);
|
||||
c->registerResources[r.index()].decrement(c);
|
||||
}
|
||||
|
||||
virtual void save(int r)
|
||||
virtual void save(Register r)
|
||||
{
|
||||
RegisterResource* reg = c->registerResources + r;
|
||||
RegisterResource* reg = c->registerResources + r.index();
|
||||
|
||||
assertT(c, reg->referenceCount == 0);
|
||||
assertT(c, reg->freezeCount == 0);
|
||||
|
@ -53,19 +53,15 @@ Context::Context(vm::System* system,
|
||||
- regFile->generalRegisters.start),
|
||||
targetInfo(arch->targetInfo())
|
||||
{
|
||||
for (unsigned i = regFile->generalRegisters.start;
|
||||
i < regFile->generalRegisters.limit;
|
||||
++i) {
|
||||
new (registerResources + i) RegisterResource(arch->reserved(i));
|
||||
for (Register i : regFile->generalRegisters) {
|
||||
new (registerResources + i.index()) RegisterResource(arch->reserved(i));
|
||||
|
||||
if (registerResources[i].reserved) {
|
||||
if (registerResources[i.index()].reserved) {
|
||||
--availableGeneralRegisterCount;
|
||||
}
|
||||
}
|
||||
for (unsigned i = regFile->floatRegisters.start;
|
||||
i < regFile->floatRegisters.limit;
|
||||
++i) {
|
||||
new (registerResources + i) RegisterResource(arch->reserved(i));
|
||||
for (Register i : regFile->floatRegisters) {
|
||||
new (registerResources + i.index()) RegisterResource(arch->reserved(i));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,7 @@ class CallEvent : public Event {
|
||||
? arguments.count
|
||||
: 0)
|
||||
{
|
||||
uint32_t registerMask = c->regFile->generalRegisters.mask;
|
||||
RegisterMask registerMask = c->regFile->generalRegisters;
|
||||
|
||||
if (callingConvention == ir::CallingConvention::Native) {
|
||||
assertT(c, (flags & Compiler::TailJump) == 0);
|
||||
@ -396,14 +396,14 @@ class CallEvent : public Event {
|
||||
SiteMask targetMask;
|
||||
if (index + (c->arch->argumentRegisterAlignment() ? footprint : 1)
|
||||
<= c->arch->argumentRegisterCount()) {
|
||||
int number = c->arch->argumentRegister(index);
|
||||
Register number = c->arch->argumentRegister(index);
|
||||
|
||||
if (DebugReads) {
|
||||
fprintf(stderr, "reg %d arg read %p\n", number, v);
|
||||
fprintf(stderr, "reg %d arg read %p\n", number.index(), v);
|
||||
}
|
||||
|
||||
targetMask = SiteMask::fixedRegisterMask(number);
|
||||
registerMask &= ~(1 << number);
|
||||
registerMask = registerMask.excluding(number);
|
||||
} else {
|
||||
if (index < c->arch->argumentRegisterCount()) {
|
||||
index = c->arch->argumentRegisterCount();
|
||||
@ -415,7 +415,7 @@ class CallEvent : public Event {
|
||||
fprintf(stderr, "stack %d arg read %p\n", frameIndex, v);
|
||||
}
|
||||
|
||||
targetMask = SiteMask(1 << lir::MemoryOperand, 0, frameIndex);
|
||||
targetMask = SiteMask(lir::Operand::MemoryMask, 0, frameIndex);
|
||||
}
|
||||
|
||||
this->addRead(c, v, targetMask);
|
||||
@ -445,7 +445,7 @@ class CallEvent : public Event {
|
||||
this->addRead(
|
||||
c,
|
||||
address,
|
||||
SiteMask(op.typeMask, registerMask & op.registerMask, AnyFrameIndex));
|
||||
SiteMask(op.typeMask, registerMask & op.lowRegisterMask, AnyFrameIndex));
|
||||
}
|
||||
|
||||
Stack* stack = stackBefore;
|
||||
@ -512,7 +512,7 @@ class CallEvent : public Event {
|
||||
this->addRead(c, v, generalRegisterMask(c));
|
||||
} else {
|
||||
this->addRead(
|
||||
c, v, SiteMask(1 << lir::MemoryOperand, 0, frameIndex));
|
||||
c, v, SiteMask(lir::Operand::MemoryMask, 0, frameIndex));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -544,7 +544,7 @@ class CallEvent : public Event {
|
||||
|
||||
this->addRead(c,
|
||||
stack->value,
|
||||
SiteMask(1 << lir::MemoryOperand, 0, logicalIndex));
|
||||
SiteMask(lir::Operand::MemoryMask, 0, logicalIndex));
|
||||
}
|
||||
|
||||
stack = stack->next;
|
||||
@ -581,29 +581,29 @@ class CallEvent : public Event {
|
||||
assertT(
|
||||
c,
|
||||
returnAddressSurrogate == 0
|
||||
or returnAddressSurrogate->source->type(c) == lir::RegisterOperand);
|
||||
or returnAddressSurrogate->source->type(c) == lir::Operand::Type::RegisterPair);
|
||||
assertT(
|
||||
c,
|
||||
framePointerSurrogate == 0
|
||||
or framePointerSurrogate->source->type(c) == lir::RegisterOperand);
|
||||
or framePointerSurrogate->source->type(c) == lir::Operand::Type::RegisterPair);
|
||||
|
||||
int ras;
|
||||
Register ras;
|
||||
if (returnAddressSurrogate) {
|
||||
returnAddressSurrogate->source->freeze(c, returnAddressSurrogate);
|
||||
|
||||
ras = static_cast<RegisterSite*>(returnAddressSurrogate->source)
|
||||
->number;
|
||||
} else {
|
||||
ras = lir::NoRegister;
|
||||
ras = NoRegister;
|
||||
}
|
||||
|
||||
int fps;
|
||||
Register fps;
|
||||
if (framePointerSurrogate) {
|
||||
framePointerSurrogate->source->freeze(c, framePointerSurrogate);
|
||||
|
||||
fps = static_cast<RegisterSite*>(framePointerSurrogate->source)->number;
|
||||
} else {
|
||||
fps = lir::NoRegister;
|
||||
fps = NoRegister;
|
||||
}
|
||||
|
||||
int offset = static_cast<int>(footprint)
|
||||
@ -783,9 +783,9 @@ class MoveEvent : public Event {
|
||||
op,
|
||||
srcSelectSize,
|
||||
OperandMask(
|
||||
1 << srcValue->source->type(c),
|
||||
(static_cast<uint64_t>(srcValue->nextWord->source->registerMask(c))
|
||||
<< 32) | static_cast<uint64_t>(srcValue->source->registerMask(c))),
|
||||
1 << (unsigned)srcValue->source->type(c),
|
||||
srcValue->source->registerMask(c),
|
||||
srcValue->nextWord->source->registerMask(c)),
|
||||
dstSize,
|
||||
dst);
|
||||
|
||||
@ -866,7 +866,7 @@ class MoveEvent : public Event {
|
||||
assertT(c, srcSelectSize == c->targetInfo.pointerSize);
|
||||
|
||||
if (dstValue->nextWord->target or live(c, dstValue->nextWord)) {
|
||||
assertT(c, dstLowMask.typeMask & (1 << lir::RegisterOperand));
|
||||
assertT(c, dstLowMask.typeMask & lir::Operand::RegisterPairMask);
|
||||
|
||||
Site* low = freeRegisterSite(c, dstLowMask.registerMask);
|
||||
|
||||
@ -897,7 +897,7 @@ class MoveEvent : public Event {
|
||||
|
||||
srcValue->source->thaw(c, srcValue);
|
||||
|
||||
assertT(c, dstHighMask.typeMask & (1 << lir::RegisterOperand));
|
||||
assertT(c, dstHighMask.typeMask & lir::Operand::RegisterPairMask);
|
||||
|
||||
Site* high = freeRegisterSite(c, dstHighMask.registerMask);
|
||||
|
||||
@ -1126,18 +1126,14 @@ class CombineEvent : public Event {
|
||||
op,
|
||||
firstValue->type.size(c->targetInfo),
|
||||
OperandMask(
|
||||
1 << firstValue->source->type(c),
|
||||
(static_cast<uint64_t>(
|
||||
firstValue->nextWord->source->registerMask(c))
|
||||
<< 32)
|
||||
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
|
||||
1 << (unsigned)firstValue->source->type(c),
|
||||
firstValue->source->registerMask(c),
|
||||
firstValue->nextWord->source->registerMask(c)),
|
||||
secondValue->type.size(c->targetInfo),
|
||||
OperandMask(
|
||||
1 << secondValue->source->type(c),
|
||||
(static_cast<uint64_t>(
|
||||
secondValue->nextWord->source->registerMask(c))
|
||||
<< 32)
|
||||
| static_cast<uint64_t>(secondValue->source->registerMask(c))),
|
||||
1 << (unsigned)secondValue->source->type(c),
|
||||
secondValue->source->registerMask(c),
|
||||
secondValue->nextWord->source->registerMask(c)),
|
||||
resultValue->type.size(c->targetInfo),
|
||||
cMask);
|
||||
|
||||
@ -1318,11 +1314,9 @@ class TranslateEvent : public Event {
|
||||
op,
|
||||
firstValue->type.size(c->targetInfo),
|
||||
OperandMask(
|
||||
1 << firstValue->source->type(c),
|
||||
(static_cast<uint64_t>(
|
||||
firstValue->nextWord->source->registerMask(c))
|
||||
<< 32)
|
||||
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
|
||||
1 << (unsigned)firstValue->source->type(c),
|
||||
firstValue->source->registerMask(c),
|
||||
firstValue->nextWord->source->registerMask(c)),
|
||||
resultValue->type.size(c->targetInfo),
|
||||
bMask);
|
||||
|
||||
@ -1457,7 +1451,7 @@ ConstantSite* findConstantSite(Context* c, Value* v)
|
||||
{
|
||||
for (SiteIterator it(c, v); it.hasMore();) {
|
||||
Site* s = it.next();
|
||||
if (s->type(c) == lir::ConstantOperand) {
|
||||
if (s->type(c) == lir::Operand::Type::Constant) {
|
||||
return static_cast<ConstantSite*>(s);
|
||||
}
|
||||
}
|
||||
@ -1467,7 +1461,7 @@ ConstantSite* findConstantSite(Context* c, Value* v)
|
||||
void moveIfConflict(Context* c, Value* v, MemorySite* s)
|
||||
{
|
||||
if (v->reads) {
|
||||
SiteMask mask(1 << lir::RegisterOperand, ~0, AnyFrameIndex);
|
||||
SiteMask mask(lir::Operand::RegisterPairMask, ~0, AnyFrameIndex);
|
||||
v->reads->intersect(&mask);
|
||||
if (s->conflicts(mask)) {
|
||||
maybeMove(c, v->reads, true, false);
|
||||
@ -1504,29 +1498,29 @@ class MemoryEvent : public Event {
|
||||
|
||||
virtual void compile(Context* c)
|
||||
{
|
||||
int indexRegister;
|
||||
Register indexRegister;
|
||||
int displacement = this->displacement;
|
||||
unsigned scale = this->scale;
|
||||
if (index) {
|
||||
ConstantSite* constant = findConstantSite(c, index);
|
||||
|
||||
if (constant) {
|
||||
indexRegister = lir::NoRegister;
|
||||
indexRegister = NoRegister;
|
||||
displacement += (constant->value->value() * scale);
|
||||
scale = 1;
|
||||
} else {
|
||||
assertT(c, index->source->type(c) == lir::RegisterOperand);
|
||||
assertT(c, index->source->type(c) == lir::Operand::Type::RegisterPair);
|
||||
indexRegister = static_cast<RegisterSite*>(index->source)->number;
|
||||
}
|
||||
} else {
|
||||
indexRegister = lir::NoRegister;
|
||||
indexRegister = NoRegister;
|
||||
}
|
||||
assertT(c, base->source->type(c) == lir::RegisterOperand);
|
||||
int baseRegister = static_cast<RegisterSite*>(base->source)->number;
|
||||
assertT(c, base->source->type(c) == lir::Operand::Type::RegisterPair);
|
||||
Register baseRegister = static_cast<RegisterSite*>(base->source)->number;
|
||||
|
||||
popRead(c, this, base);
|
||||
if (index) {
|
||||
if (c->targetInfo.pointerSize == 8 and indexRegister != lir::NoRegister) {
|
||||
if (c->targetInfo.pointerSize == 8 and indexRegister != NoRegister) {
|
||||
apply(c,
|
||||
lir::Move,
|
||||
4,
|
||||
@ -1718,9 +1712,9 @@ class BranchEvent : public Event {
|
||||
OperandMask dstMask;
|
||||
c->arch->planDestination(op,
|
||||
firstValue->type.size(c->targetInfo),
|
||||
OperandMask(0, 0),
|
||||
OperandMask(0, 0, 0),
|
||||
firstValue->type.size(c->targetInfo),
|
||||
OperandMask(0, 0),
|
||||
OperandMask(0, 0, 0),
|
||||
c->targetInfo.pointerSize,
|
||||
dstMask);
|
||||
|
||||
@ -1879,12 +1873,12 @@ void clean(Context* c, Value* v, unsigned popIndex)
|
||||
{
|
||||
for (SiteIterator it(c, v); it.hasMore();) {
|
||||
Site* s = it.next();
|
||||
if (not(s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))
|
||||
if (not(s->match(c, SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex))
|
||||
and offsetToFrameIndex(c, static_cast<MemorySite*>(s)->offset)
|
||||
>= popIndex)) {
|
||||
if (false
|
||||
and s->match(c,
|
||||
SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))) {
|
||||
SiteMask(lir::Operand::MemoryMask, 0, AnyFrameIndex))) {
|
||||
char buffer[256];
|
||||
s->toString(c, buffer, 256);
|
||||
fprintf(stderr,
|
||||
@ -2016,7 +2010,7 @@ class BoundsCheckEvent : public Event {
|
||||
lir::Constant handlerConstant(resolvedPromise(c, handler));
|
||||
a->apply(lir::Call,
|
||||
OperandInfo(c->targetInfo.pointerSize,
|
||||
lir::ConstantOperand,
|
||||
lir::Operand::Type::Constant,
|
||||
&handlerConstant));
|
||||
}
|
||||
} else {
|
||||
@ -2038,10 +2032,10 @@ class BoundsCheckEvent : public Event {
|
||||
}
|
||||
|
||||
if (constant == 0 or constant->value->value() >= 0) {
|
||||
assertT(c, object->source->type(c) == lir::RegisterOperand);
|
||||
assertT(c, object->source->type(c) == lir::Operand::Type::RegisterPair);
|
||||
MemorySite length(static_cast<RegisterSite*>(object->source)->number,
|
||||
lengthOffset,
|
||||
lir::NoRegister,
|
||||
NoRegister,
|
||||
1);
|
||||
length.acquired = true;
|
||||
|
||||
@ -2072,7 +2066,7 @@ class BoundsCheckEvent : public Event {
|
||||
lir::Constant handlerConstant(resolvedPromise(c, handler));
|
||||
a->apply(lir::Call,
|
||||
OperandInfo(c->targetInfo.pointerSize,
|
||||
lir::ConstantOperand,
|
||||
lir::Operand::Type::Constant,
|
||||
&handlerConstant));
|
||||
|
||||
nextPromise->offset = a->offset();
|
||||
|
@ -205,7 +205,7 @@ Read* StubRead::next(Context*)
|
||||
SingleRead* read(Context* c, const SiteMask& mask, Value* successor)
|
||||
{
|
||||
assertT(c,
|
||||
(mask.typeMask != 1 << lir::MemoryOperand) or mask.frameIndex >= 0);
|
||||
(mask.typeMask != lir::Operand::MemoryMask) or mask.frameIndex >= 0);
|
||||
|
||||
return new (c->zone) SingleRead(mask, successor);
|
||||
}
|
||||
|
@ -57,24 +57,24 @@ unsigned resourceCost(Context* c,
|
||||
}
|
||||
|
||||
bool pickRegisterTarget(Context* c,
|
||||
int i,
|
||||
Register i,
|
||||
Value* v,
|
||||
uint32_t mask,
|
||||
int* target,
|
||||
RegisterMask mask,
|
||||
Register* target,
|
||||
unsigned* cost,
|
||||
CostCalculator* costCalculator)
|
||||
{
|
||||
if ((1 << i) & mask) {
|
||||
RegisterResource* r = c->registerResources + i;
|
||||
if (mask.contains(i)) {
|
||||
RegisterResource* r = c->registerResources + i.index();
|
||||
unsigned myCost
|
||||
= resourceCost(
|
||||
c,
|
||||
v,
|
||||
r,
|
||||
SiteMask(1 << lir::RegisterOperand, 1 << i, NoFrameIndex),
|
||||
SiteMask(lir::Operand::RegisterPairMask, RegisterMask(i), NoFrameIndex),
|
||||
costCalculator) + Target::MinimumRegisterCost;
|
||||
|
||||
if ((static_cast<uint32_t>(1) << i) == mask) {
|
||||
if (mask.containsExactly(i)) {
|
||||
*cost = myCost;
|
||||
return true;
|
||||
} else if (myCost < *cost) {
|
||||
@ -85,29 +85,25 @@ bool pickRegisterTarget(Context* c,
|
||||
return false;
|
||||
}
|
||||
|
||||
int pickRegisterTarget(Context* c,
|
||||
Register pickRegisterTarget(Context* c,
|
||||
Value* v,
|
||||
uint32_t mask,
|
||||
RegisterMask mask,
|
||||
unsigned* cost,
|
||||
CostCalculator* costCalculator)
|
||||
{
|
||||
int target = lir::NoRegister;
|
||||
Register target = NoRegister;
|
||||
*cost = Target::Impossible;
|
||||
|
||||
if (mask & c->regFile->generalRegisters.mask) {
|
||||
for (int i = c->regFile->generalRegisters.limit - 1;
|
||||
i >= c->regFile->generalRegisters.start;
|
||||
--i) {
|
||||
if (mask & c->regFile->generalRegisters) {
|
||||
for (Register i : c->regFile->generalRegisters) {
|
||||
if (pickRegisterTarget(c, i, v, mask, &target, cost, costCalculator)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mask & c->regFile->floatRegisters.mask) {
|
||||
for (int i = c->regFile->floatRegisters.start;
|
||||
i < static_cast<int>(c->regFile->floatRegisters.limit);
|
||||
++i) {
|
||||
if (mask & c->regFile->floatRegisters) {
|
||||
for (Register i : c->regFile->floatRegisters) {
|
||||
if (pickRegisterTarget(c, i, v, mask, &target, cost, costCalculator)) {
|
||||
return i;
|
||||
}
|
||||
@ -119,12 +115,12 @@ int pickRegisterTarget(Context* c,
|
||||
|
||||
Target pickRegisterTarget(Context* c,
|
||||
Value* v,
|
||||
uint32_t mask,
|
||||
RegisterMask mask,
|
||||
CostCalculator* costCalculator)
|
||||
{
|
||||
unsigned cost;
|
||||
int number = pickRegisterTarget(c, v, mask, &cost, costCalculator);
|
||||
return Target(number, lir::RegisterOperand, cost);
|
||||
Register number = pickRegisterTarget(c, v, mask, &cost, costCalculator);
|
||||
return Target(number, cost);
|
||||
}
|
||||
|
||||
unsigned frameCost(Context* c,
|
||||
@ -135,7 +131,7 @@ unsigned frameCost(Context* c,
|
||||
return resourceCost(c,
|
||||
v,
|
||||
c->frameResources + frameIndex,
|
||||
SiteMask(1 << lir::MemoryOperand, 0, frameIndex),
|
||||
SiteMask(lir::Operand::MemoryMask, 0, frameIndex),
|
||||
costCalculator) + Target::MinimumFrameCost;
|
||||
}
|
||||
|
||||
@ -147,7 +143,7 @@ Target pickFrameTarget(Context* c, Value* v, CostCalculator* costCalculator)
|
||||
do {
|
||||
if (p->home >= 0) {
|
||||
Target mine(p->home,
|
||||
lir::MemoryOperand,
|
||||
lir::Operand::Type::Memory,
|
||||
frameCost(c, v, p->home, costCalculator));
|
||||
|
||||
if (mine.cost == Target::MinimumFrameCost) {
|
||||
@ -168,7 +164,7 @@ Target pickAnyFrameTarget(Context* c, Value* v, CostCalculator* costCalculator)
|
||||
|
||||
unsigned count = totalFrameSize(c);
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
Target mine(i, lir::MemoryOperand, frameCost(c, v, i, costCalculator));
|
||||
Target mine(i, lir::Operand::Type::Memory, frameCost(c, v, i, costCalculator));
|
||||
if (mine.cost == Target::MinimumFrameCost) {
|
||||
return mine;
|
||||
} else if (mine.cost < best.cost) {
|
||||
@ -186,7 +182,7 @@ Target pickTarget(Context* c,
|
||||
Target best,
|
||||
CostCalculator* costCalculator)
|
||||
{
|
||||
if (mask.typeMask & (1 << lir::RegisterOperand)) {
|
||||
if (mask.typeMask & lir::Operand::RegisterPairMask) {
|
||||
Target mine
|
||||
= pickRegisterTarget(c, value, mask.registerMask, costCalculator);
|
||||
|
||||
@ -198,10 +194,10 @@ Target pickTarget(Context* c,
|
||||
}
|
||||
}
|
||||
|
||||
if (mask.typeMask & (1 << lir::MemoryOperand)) {
|
||||
if (mask.typeMask & lir::Operand::MemoryMask) {
|
||||
if (mask.frameIndex >= 0) {
|
||||
Target mine(mask.frameIndex,
|
||||
lir::MemoryOperand,
|
||||
lir::Operand::Type::Memory,
|
||||
frameCost(c, value, mask.frameIndex, costCalculator));
|
||||
if (mine.cost == Target::MinimumFrameCost) {
|
||||
return mine;
|
||||
@ -234,14 +230,14 @@ Target pickTarget(Context* c,
|
||||
|
||||
Value* value = read->value;
|
||||
|
||||
uint32_t registerMask
|
||||
= (isFloatValue(value) ? ~0 : c->regFile->generalRegisters.mask);
|
||||
RegisterMask registerMask
|
||||
= (isFloatValue(value) ? AnyRegisterMask : (RegisterMask)c->regFile->generalRegisters);
|
||||
|
||||
SiteMask mask(~0, registerMask, AnyFrameIndex);
|
||||
read->intersect(&mask);
|
||||
|
||||
if (isFloatValue(value)) {
|
||||
uint32_t floatMask = mask.registerMask & c->regFile->floatRegisters.mask;
|
||||
RegisterMask floatMask = mask.registerMask & c->regFile->floatRegisters;
|
||||
if (floatMask) {
|
||||
mask.registerMask = floatMask;
|
||||
}
|
||||
@ -273,9 +269,9 @@ Target pickTarget(Context* c,
|
||||
if (intersectRead) {
|
||||
if (best.cost == Target::Impossible) {
|
||||
fprintf(stderr,
|
||||
"mask type %d reg %d frame %d\n",
|
||||
"mask type %d reg %" LLD " frame %d\n",
|
||||
mask.typeMask,
|
||||
mask.registerMask,
|
||||
(uint64_t)mask.registerMask,
|
||||
mask.frameIndex);
|
||||
abort(c);
|
||||
}
|
||||
|
@ -55,13 +55,18 @@ class Target {
|
||||
{
|
||||
}
|
||||
|
||||
Target(int index, lir::OperandType type, unsigned cost)
|
||||
Target(int16_t index, lir::Operand::Type type, unsigned cost)
|
||||
: index(index), type(type), cost(cost)
|
||||
{
|
||||
}
|
||||
|
||||
Target(Register reg, unsigned cost)
|
||||
: index(reg.index()), type(lir::Operand::Type::RegisterPair), cost(cost)
|
||||
{
|
||||
}
|
||||
|
||||
int16_t index;
|
||||
lir::OperandType type;
|
||||
lir::Operand::Type type;
|
||||
uint8_t cost;
|
||||
};
|
||||
|
||||
@ -77,22 +82,22 @@ unsigned resourceCost(Context* c,
|
||||
CostCalculator* costCalculator);
|
||||
|
||||
bool pickRegisterTarget(Context* c,
|
||||
int i,
|
||||
Register i,
|
||||
Value* v,
|
||||
uint32_t mask,
|
||||
int* target,
|
||||
RegisterMask mask,
|
||||
Register* target,
|
||||
unsigned* cost,
|
||||
CostCalculator* costCalculator = 0);
|
||||
|
||||
int pickRegisterTarget(Context* c,
|
||||
Register pickRegisterTarget(Context* c,
|
||||
Value* v,
|
||||
uint32_t mask,
|
||||
RegisterMask mask,
|
||||
unsigned* cost,
|
||||
CostCalculator* costCalculator = 0);
|
||||
|
||||
Target pickRegisterTarget(Context* c,
|
||||
Value* v,
|
||||
uint32_t mask,
|
||||
RegisterMask mask,
|
||||
CostCalculator* costCalculator = 0);
|
||||
|
||||
unsigned frameCost(Context* c,
|
||||
|
@ -88,7 +88,7 @@ void RegisterResource::freeze(Context* c, Value* v)
|
||||
freezeResource(c, this, v);
|
||||
|
||||
if (freezeCount == 1
|
||||
and ((1 << index(c)) & c->regFile->generalRegisters.mask)) {
|
||||
and c->regFile->generalRegisters.contains(index(c))) {
|
||||
decrementAvailableGeneralRegisterCount(c);
|
||||
}
|
||||
}
|
||||
@ -100,7 +100,7 @@ void RegisterResource::thaw(Context* c, Value* v)
|
||||
thawResource(c, this, v);
|
||||
|
||||
if (freezeCount == 0
|
||||
and ((1 << index(c)) & c->regFile->generalRegisters.mask)) {
|
||||
and c->regFile->generalRegisters.contains(index(c))) {
|
||||
incrementAvailableGeneralRegisterCount(c);
|
||||
}
|
||||
}
|
||||
@ -113,9 +113,9 @@ unsigned RegisterResource::toString(Context* c,
|
||||
return vm::snprintf(buffer, bufferSize, "register %d", index(c));
|
||||
}
|
||||
|
||||
unsigned RegisterResource::index(Context* c)
|
||||
Register RegisterResource::index(Context* c)
|
||||
{
|
||||
return this - c->registerResources;
|
||||
return Register(this - c->registerResources);
|
||||
}
|
||||
|
||||
void RegisterResource::increment(Context* c)
|
||||
@ -130,7 +130,7 @@ void RegisterResource::increment(Context* c)
|
||||
++this->referenceCount;
|
||||
|
||||
if (this->referenceCount == 1
|
||||
and ((1 << this->index(c)) & c->regFile->generalRegisters.mask)) {
|
||||
and c->regFile->generalRegisters.contains(this->index(c))) {
|
||||
decrementAvailableGeneralRegisterCount(c);
|
||||
}
|
||||
}
|
||||
@ -150,7 +150,7 @@ void RegisterResource::decrement(Context* c)
|
||||
--this->referenceCount;
|
||||
|
||||
if (this->referenceCount == 0
|
||||
and ((1 << this->index(c)) & c->regFile->generalRegisters.mask)) {
|
||||
and c->regFile->generalRegisters.contains(this->index(c))) {
|
||||
incrementAvailableGeneralRegisterCount(c);
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ class RegisterResource : public Resource {
|
||||
|
||||
virtual unsigned toString(Context* c, char* buffer, unsigned bufferSize);
|
||||
|
||||
virtual unsigned index(Context*);
|
||||
virtual Register index(Context*);
|
||||
|
||||
void increment(Context*);
|
||||
|
||||
|
@ -152,7 +152,7 @@ class AddressSite : public Site {
|
||||
|
||||
virtual bool match(Context*, const SiteMask& mask)
|
||||
{
|
||||
return mask.typeMask & (1 << lir::AddressOperand);
|
||||
return mask.typeMask & lir::Operand::AddressMask;
|
||||
}
|
||||
|
||||
virtual bool loneMatch(Context*, const SiteMask&)
|
||||
@ -165,9 +165,9 @@ class AddressSite : public Site {
|
||||
abort(c);
|
||||
}
|
||||
|
||||
virtual lir::OperandType type(Context*)
|
||||
virtual lir::Operand::Type type(Context*)
|
||||
{
|
||||
return lir::AddressOperand;
|
||||
return lir::Operand::Type::Address;
|
||||
}
|
||||
|
||||
virtual void asAssemblerOperand(Context* c UNUSED,
|
||||
@ -201,7 +201,7 @@ class AddressSite : public Site {
|
||||
|
||||
virtual SiteMask mask(Context*)
|
||||
{
|
||||
return SiteMask(1 << lir::AddressOperand, 0, NoFrameIndex);
|
||||
return SiteMask(lir::Operand::AddressMask, 0, NoFrameIndex);
|
||||
}
|
||||
|
||||
virtual SiteMask nextWordMask(Context* c, unsigned)
|
||||
@ -217,14 +217,14 @@ Site* addressSite(Context* c, Promise* address)
|
||||
return new (c->zone) AddressSite(address);
|
||||
}
|
||||
|
||||
RegisterSite::RegisterSite(uint32_t mask, int number)
|
||||
RegisterSite::RegisterSite(RegisterMask mask, Register number)
|
||||
: mask_(mask), number(number)
|
||||
{
|
||||
}
|
||||
|
||||
unsigned RegisterSite::toString(Context*, char* buffer, unsigned bufferSize)
|
||||
{
|
||||
if (number != lir::NoRegister) {
|
||||
if (number != NoRegister) {
|
||||
return vm::snprintf(buffer, bufferSize, "%p register %d", this, number);
|
||||
} else {
|
||||
return vm::snprintf(
|
||||
@ -234,11 +234,11 @@ unsigned RegisterSite::toString(Context*, char* buffer, unsigned bufferSize)
|
||||
|
||||
unsigned RegisterSite::copyCost(Context* c, Site* s)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
if (s and (this == s
|
||||
or (s->type(c) == lir::RegisterOperand
|
||||
and (static_cast<RegisterSite*>(s)->mask_ & (1 << number))))) {
|
||||
or (s->type(c) == lir::Operand::Type::RegisterPair
|
||||
and (static_cast<RegisterSite*>(s)->mask_.contains(number))))) {
|
||||
return 0;
|
||||
} else {
|
||||
return RegisterCopyCost;
|
||||
@ -247,10 +247,10 @@ unsigned RegisterSite::copyCost(Context* c, Site* s)
|
||||
|
||||
bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
if ((mask.typeMask & (1 << lir::RegisterOperand))) {
|
||||
return ((static_cast<uint64_t>(1) << number) & mask.registerMask);
|
||||
if ((mask.typeMask & lir::Operand::RegisterPairMask)) {
|
||||
return mask.registerMask.contains(number);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
@ -258,10 +258,10 @@ bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask)
|
||||
|
||||
bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
if ((mask.typeMask & (1 << lir::RegisterOperand))) {
|
||||
return ((static_cast<uint64_t>(1) << number) == mask.registerMask);
|
||||
if ((mask.typeMask & lir::Operand::RegisterPairMask)) {
|
||||
return mask.registerMask.containsExactly(number);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
@ -269,28 +269,28 @@ bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask)
|
||||
|
||||
bool RegisterSite::matchNextWord(Context* c, Site* s, unsigned)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
if (s->type(c) != lir::RegisterOperand) {
|
||||
if (s->type(c) != lir::Operand::Type::RegisterPair) {
|
||||
return false;
|
||||
}
|
||||
|
||||
RegisterSite* rs = static_cast<RegisterSite*>(s);
|
||||
unsigned size = rs->registerSize(c);
|
||||
if (size > c->targetInfo.pointerSize) {
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
return number == rs->number;
|
||||
} else {
|
||||
uint32_t mask = c->regFile->generalRegisters.mask;
|
||||
return ((1 << number) & mask) and ((1 << rs->number) & mask);
|
||||
RegisterMask mask = c->regFile->generalRegisters;
|
||||
return mask.contains(number) and mask.contains(rs->number);
|
||||
}
|
||||
}
|
||||
|
||||
void RegisterSite::acquire(Context* c, Value* v)
|
||||
{
|
||||
Target target;
|
||||
if (number != lir::NoRegister) {
|
||||
target = Target(number, lir::RegisterOperand, 0);
|
||||
if (number != NoRegister) {
|
||||
target = Target(number, 0);
|
||||
} else {
|
||||
target = pickRegisterTarget(c, v, mask_);
|
||||
expect(c, target.cost < Target::Impossible);
|
||||
@ -299,65 +299,65 @@ void RegisterSite::acquire(Context* c, Value* v)
|
||||
RegisterResource* resource = c->registerResources + target.index;
|
||||
compiler::acquire(c, resource, v, this);
|
||||
|
||||
number = target.index;
|
||||
number = Register(target.index);
|
||||
}
|
||||
|
||||
void RegisterSite::release(Context* c, Value* v)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
compiler::release(c, c->registerResources + number, v, this);
|
||||
compiler::release(c, c->registerResources + number.index(), v, this);
|
||||
}
|
||||
|
||||
void RegisterSite::freeze(Context* c, Value* v)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
c->registerResources[number].freeze(c, v);
|
||||
c->registerResources[number.index()].freeze(c, v);
|
||||
}
|
||||
|
||||
void RegisterSite::thaw(Context* c, Value* v)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
c->registerResources[number].thaw(c, v);
|
||||
c->registerResources[number.index()].thaw(c, v);
|
||||
}
|
||||
|
||||
bool RegisterSite::frozen(Context* c UNUSED)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
return c->registerResources[number].freezeCount != 0;
|
||||
return c->registerResources[number.index()].freezeCount != 0;
|
||||
}
|
||||
|
||||
lir::OperandType RegisterSite::type(Context*)
|
||||
lir::Operand::Type RegisterSite::type(Context*)
|
||||
{
|
||||
return lir::RegisterOperand;
|
||||
return lir::Operand::Type::RegisterPair;
|
||||
}
|
||||
|
||||
void RegisterSite::asAssemblerOperand(Context* c UNUSED,
|
||||
Site* high,
|
||||
lir::Operand* result)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
int highNumber;
|
||||
Register highNumber;
|
||||
if (high != this) {
|
||||
highNumber = static_cast<RegisterSite*>(high)->number;
|
||||
assertT(c, highNumber != lir::NoRegister);
|
||||
assertT(c, highNumber != NoRegister);
|
||||
} else {
|
||||
highNumber = lir::NoRegister;
|
||||
highNumber = NoRegister;
|
||||
}
|
||||
|
||||
new (result) lir::Register(number, highNumber);
|
||||
new (result) lir::RegisterPair(number, highNumber);
|
||||
}
|
||||
|
||||
Site* RegisterSite::copy(Context* c)
|
||||
{
|
||||
uint32_t mask;
|
||||
RegisterMask mask;
|
||||
|
||||
if (number != lir::NoRegister) {
|
||||
mask = 1 << number;
|
||||
if (number != NoRegister) {
|
||||
mask = RegisterMask(number);
|
||||
} else {
|
||||
mask = mask_;
|
||||
}
|
||||
@ -377,64 +377,64 @@ Site* RegisterSite::copyHigh(Context* c)
|
||||
|
||||
Site* RegisterSite::makeNextWord(Context* c, unsigned)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, ((1 << number) & c->regFile->generalRegisters.mask));
|
||||
assertT(c, number != NoRegister);
|
||||
assertT(c, c->regFile->generalRegisters.contains(number));
|
||||
|
||||
return freeRegisterSite(c, c->regFile->generalRegisters.mask);
|
||||
return freeRegisterSite(c, c->regFile->generalRegisters);
|
||||
}
|
||||
|
||||
SiteMask RegisterSite::mask(Context* c UNUSED)
|
||||
{
|
||||
return SiteMask(1 << lir::RegisterOperand, mask_, NoFrameIndex);
|
||||
return SiteMask(lir::Operand::RegisterPairMask, mask_, NoFrameIndex);
|
||||
}
|
||||
|
||||
SiteMask RegisterSite::nextWordMask(Context* c, unsigned)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
if (registerSize(c) > c->targetInfo.pointerSize) {
|
||||
return SiteMask(1 << lir::RegisterOperand, number, NoFrameIndex);
|
||||
return SiteMask(lir::Operand::RegisterPairMask, number, NoFrameIndex);
|
||||
} else {
|
||||
return SiteMask(1 << lir::RegisterOperand,
|
||||
c->regFile->generalRegisters.mask,
|
||||
return SiteMask(lir::Operand::RegisterPairMask,
|
||||
c->regFile->generalRegisters,
|
||||
NoFrameIndex);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned RegisterSite::registerSize(Context* c)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
if ((1 << number) & c->regFile->floatRegisters.mask) {
|
||||
if (c->regFile->floatRegisters.contains(number)) {
|
||||
return c->arch->floatRegisterSize();
|
||||
} else {
|
||||
return c->targetInfo.pointerSize;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned RegisterSite::registerMask(Context* c UNUSED)
|
||||
RegisterMask RegisterSite::registerMask(Context* c UNUSED)
|
||||
{
|
||||
assertT(c, number != lir::NoRegister);
|
||||
assertT(c, number != NoRegister);
|
||||
|
||||
return 1 << number;
|
||||
return RegisterMask(number);
|
||||
}
|
||||
|
||||
Site* registerSite(Context* c, int number)
|
||||
Site* registerSite(Context* c, Register number)
|
||||
{
|
||||
assertT(c, number >= 0);
|
||||
assertT(c, number != NoRegister);
|
||||
assertT(c,
|
||||
(1 << number) & (c->regFile->generalRegisters.mask
|
||||
| c->regFile->floatRegisters.mask));
|
||||
(c->regFile->generalRegisters
|
||||
| c->regFile->floatRegisters).contains(number));
|
||||
|
||||
return new (c->zone) RegisterSite(1 << number, number);
|
||||
return new (c->zone) RegisterSite(RegisterMask(number), number);
|
||||
}
|
||||
|
||||
Site* freeRegisterSite(Context* c, uint32_t mask)
|
||||
Site* freeRegisterSite(Context* c, RegisterMask mask)
|
||||
{
|
||||
return new (c->zone) RegisterSite(mask, lir::NoRegister);
|
||||
return new (c->zone) RegisterSite(mask, NoRegister);
|
||||
}
|
||||
|
||||
MemorySite::MemorySite(int base, int offset, int index, unsigned scale)
|
||||
MemorySite::MemorySite(Register base, int offset, Register index, unsigned scale)
|
||||
: acquired(false), base(base), offset(offset), index(index), scale(scale)
|
||||
{
|
||||
}
|
||||
@ -453,7 +453,7 @@ unsigned MemorySite::copyCost(Context* c, Site* s)
|
||||
{
|
||||
assertT(c, acquired);
|
||||
|
||||
if (s and (this == s or (s->type(c) == lir::MemoryOperand
|
||||
if (s and (this == s or (s->type(c) == lir::Operand::Type::Memory
|
||||
and static_cast<MemorySite*>(s)->base == base
|
||||
and static_cast<MemorySite*>(s)->offset == offset
|
||||
and static_cast<MemorySite*>(s)->index == index
|
||||
@ -466,20 +466,20 @@ unsigned MemorySite::copyCost(Context* c, Site* s)
|
||||
|
||||
bool MemorySite::conflicts(const SiteMask& mask)
|
||||
{
|
||||
return (mask.typeMask & (1 << lir::RegisterOperand)) != 0
|
||||
and (((1 << base) & mask.registerMask) == 0
|
||||
or (index != lir::NoRegister
|
||||
and ((1 << index) & mask.registerMask) == 0));
|
||||
return (mask.typeMask & lir::Operand::RegisterPairMask) != 0
|
||||
and (!mask.registerMask.contains(base)
|
||||
or (index != NoRegister
|
||||
and !mask.registerMask.contains(index)));
|
||||
}
|
||||
|
||||
bool MemorySite::match(Context* c, const SiteMask& mask)
|
||||
{
|
||||
assertT(c, acquired);
|
||||
|
||||
if (mask.typeMask & (1 << lir::MemoryOperand)) {
|
||||
if (mask.typeMask & lir::Operand::MemoryMask) {
|
||||
if (mask.frameIndex >= 0) {
|
||||
if (base == c->arch->stack()) {
|
||||
assertT(c, index == lir::NoRegister);
|
||||
assertT(c, index == NoRegister);
|
||||
return static_cast<int>(frameIndexToOffset(c, mask.frameIndex))
|
||||
== offset;
|
||||
} else {
|
||||
@ -497,9 +497,9 @@ bool MemorySite::loneMatch(Context* c, const SiteMask& mask)
|
||||
{
|
||||
assertT(c, acquired);
|
||||
|
||||
if (mask.typeMask & (1 << lir::MemoryOperand)) {
|
||||
if (mask.typeMask & lir::Operand::MemoryMask) {
|
||||
if (base == c->arch->stack()) {
|
||||
assertT(c, index == lir::NoRegister);
|
||||
assertT(c, index == NoRegister);
|
||||
|
||||
if (mask.frameIndex == AnyFrameIndex) {
|
||||
return false;
|
||||
@ -513,7 +513,7 @@ bool MemorySite::loneMatch(Context* c, const SiteMask& mask)
|
||||
|
||||
bool MemorySite::matchNextWord(Context* c, Site* s, unsigned index)
|
||||
{
|
||||
if (s->type(c) == lir::MemoryOperand) {
|
||||
if (s->type(c) == lir::Operand::Type::Memory) {
|
||||
MemorySite* ms = static_cast<MemorySite*>(s);
|
||||
return ms->base == this->base
|
||||
and ((index == 1
|
||||
@ -532,13 +532,13 @@ bool MemorySite::matchNextWord(Context* c, Site* s, unsigned index)
|
||||
|
||||
void MemorySite::acquire(Context* c, Value* v)
|
||||
{
|
||||
c->registerResources[base].increment(c);
|
||||
if (index != lir::NoRegister) {
|
||||
c->registerResources[index].increment(c);
|
||||
c->registerResources[base.index()].increment(c);
|
||||
if (index != NoRegister) {
|
||||
c->registerResources[index.index()].increment(c);
|
||||
}
|
||||
|
||||
if (base == c->arch->stack()) {
|
||||
assertT(c, index == lir::NoRegister);
|
||||
assertT(c, index == NoRegister);
|
||||
assertT(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
|
||||
|
||||
compiler::acquire(
|
||||
@ -551,16 +551,16 @@ void MemorySite::acquire(Context* c, Value* v)
|
||||
void MemorySite::release(Context* c, Value* v)
|
||||
{
|
||||
if (base == c->arch->stack()) {
|
||||
assertT(c, index == lir::NoRegister);
|
||||
assertT(c, index == NoRegister);
|
||||
assertT(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
|
||||
|
||||
compiler::release(
|
||||
c, c->frameResources + offsetToFrameIndex(c, offset), v, this);
|
||||
}
|
||||
|
||||
c->registerResources[base].decrement(c);
|
||||
if (index != lir::NoRegister) {
|
||||
c->registerResources[index].decrement(c);
|
||||
c->registerResources[base.index()].decrement(c);
|
||||
if (index != NoRegister) {
|
||||
c->registerResources[index.index()].decrement(c);
|
||||
}
|
||||
|
||||
acquired = false;
|
||||
@ -571,9 +571,9 @@ void MemorySite::freeze(Context* c, Value* v)
|
||||
if (base == c->arch->stack()) {
|
||||
c->frameResources[offsetToFrameIndex(c, offset)].freeze(c, v);
|
||||
} else {
|
||||
c->registerResources[base].increment(c);
|
||||
if (index != lir::NoRegister) {
|
||||
c->registerResources[index].increment(c);
|
||||
c->registerResources[base.index()].increment(c);
|
||||
if (index != NoRegister) {
|
||||
c->registerResources[index.index()].increment(c);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -583,9 +583,9 @@ void MemorySite::thaw(Context* c, Value* v)
|
||||
if (base == c->arch->stack()) {
|
||||
c->frameResources[offsetToFrameIndex(c, offset)].thaw(c, v);
|
||||
} else {
|
||||
c->registerResources[base].decrement(c);
|
||||
if (index != lir::NoRegister) {
|
||||
c->registerResources[index].decrement(c);
|
||||
c->registerResources[base.index()].decrement(c);
|
||||
if (index != NoRegister) {
|
||||
c->registerResources[index.index()].decrement(c);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -596,9 +596,9 @@ bool MemorySite::frozen(Context* c)
|
||||
and c->frameResources[offsetToFrameIndex(c, offset)].freezeCount != 0;
|
||||
}
|
||||
|
||||
lir::OperandType MemorySite::type(Context*)
|
||||
lir::Operand::Type MemorySite::type(Context*)
|
||||
{
|
||||
return lir::MemoryOperand;
|
||||
return lir::Operand::Type::Memory;
|
||||
}
|
||||
|
||||
void MemorySite::asAssemblerOperand(Context* c UNUSED,
|
||||
@ -657,7 +657,7 @@ Site* MemorySite::makeNextWord(Context* c, unsigned index)
|
||||
|
||||
SiteMask MemorySite::mask(Context* c)
|
||||
{
|
||||
return SiteMask(1 << lir::MemoryOperand,
|
||||
return SiteMask(lir::Operand::MemoryMask,
|
||||
0,
|
||||
(base == c->arch->stack())
|
||||
? static_cast<int>(offsetToFrameIndex(c, offset))
|
||||
@ -668,13 +668,13 @@ SiteMask MemorySite::nextWordMask(Context* c, unsigned index)
|
||||
{
|
||||
int frameIndex;
|
||||
if (base == c->arch->stack()) {
|
||||
assertT(c, this->index == lir::NoRegister);
|
||||
assertT(c, this->index == NoRegister);
|
||||
frameIndex = static_cast<int>(offsetToFrameIndex(c, offset))
|
||||
+ ((index == 1) xor c->arch->bigEndian() ? 1 : -1);
|
||||
} else {
|
||||
frameIndex = NoFrameIndex;
|
||||
}
|
||||
return SiteMask(1 << lir::MemoryOperand, 0, frameIndex);
|
||||
return SiteMask(lir::Operand::MemoryMask, 0, frameIndex);
|
||||
}
|
||||
|
||||
bool MemorySite::isVolatile(Context* c)
|
||||
@ -683,9 +683,9 @@ bool MemorySite::isVolatile(Context* c)
|
||||
}
|
||||
|
||||
MemorySite* memorySite(Context* c,
|
||||
int base,
|
||||
Register base,
|
||||
int offset,
|
||||
int index,
|
||||
Register index,
|
||||
unsigned scale)
|
||||
{
|
||||
return new (c->zone) MemorySite(base, offset, index, scale);
|
||||
@ -697,7 +697,7 @@ MemorySite* frameSite(Context* c, int frameIndex)
|
||||
return memorySite(c,
|
||||
c->arch->stack(),
|
||||
frameIndexToOffset(c, frameIndex),
|
||||
lir::NoRegister,
|
||||
NoRegister,
|
||||
0);
|
||||
}
|
||||
|
||||
|
@ -34,30 +34,30 @@ class SiteMask {
|
||||
{
|
||||
}
|
||||
|
||||
SiteMask(uint8_t typeMask, uint32_t registerMask, int frameIndex)
|
||||
SiteMask(uint8_t typeMask, RegisterMask registerMask, int frameIndex)
|
||||
: typeMask(typeMask), registerMask(registerMask), frameIndex(frameIndex)
|
||||
{
|
||||
}
|
||||
|
||||
SiteMask intersectionWith(const SiteMask& b);
|
||||
|
||||
static SiteMask fixedRegisterMask(int number)
|
||||
static SiteMask fixedRegisterMask(Register number)
|
||||
{
|
||||
return SiteMask(1 << lir::RegisterOperand, 1 << number, NoFrameIndex);
|
||||
return SiteMask(lir::Operand::RegisterPairMask, 1 << number.index(), NoFrameIndex);
|
||||
}
|
||||
|
||||
static SiteMask lowPart(const OperandMask& mask)
|
||||
{
|
||||
return SiteMask(mask.typeMask, mask.registerMask, AnyFrameIndex);
|
||||
return SiteMask(mask.typeMask, mask.lowRegisterMask, AnyFrameIndex);
|
||||
}
|
||||
|
||||
static SiteMask highPart(const OperandMask& mask)
|
||||
{
|
||||
return SiteMask(mask.typeMask, mask.registerMask >> 32, AnyFrameIndex);
|
||||
return SiteMask(mask.typeMask, mask.highRegisterMask, AnyFrameIndex);
|
||||
}
|
||||
|
||||
uint8_t typeMask;
|
||||
uint32_t registerMask;
|
||||
RegisterMask registerMask;
|
||||
int frameIndex;
|
||||
};
|
||||
|
||||
@ -103,7 +103,7 @@ class Site {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual lir::OperandType type(Context*) = 0;
|
||||
virtual lir::Operand::Type type(Context*) = 0;
|
||||
|
||||
virtual void asAssemblerOperand(Context*, Site*, lir::Operand*) = 0;
|
||||
|
||||
@ -121,7 +121,7 @@ class Site {
|
||||
|
||||
virtual unsigned registerSize(Context*);
|
||||
|
||||
virtual unsigned registerMask(Context*)
|
||||
virtual RegisterMask registerMask(Context*)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -187,7 +187,7 @@ class ConstantSite : public Site {
|
||||
|
||||
virtual bool match(Context*, const SiteMask& mask)
|
||||
{
|
||||
return mask.typeMask & (1 << lir::ConstantOperand);
|
||||
return mask.typeMask & lir::Operand::ConstantMask;
|
||||
}
|
||||
|
||||
virtual bool loneMatch(Context*, const SiteMask&)
|
||||
@ -197,12 +197,12 @@ class ConstantSite : public Site {
|
||||
|
||||
virtual bool matchNextWord(Context* c, Site* s, unsigned)
|
||||
{
|
||||
return s->type(c) == lir::ConstantOperand;
|
||||
return s->type(c) == lir::Operand::Type::Constant;
|
||||
}
|
||||
|
||||
virtual lir::OperandType type(Context*)
|
||||
virtual lir::Operand::Type type(Context*)
|
||||
{
|
||||
return lir::ConstantOperand;
|
||||
return lir::Operand::Type::Constant;
|
||||
}
|
||||
|
||||
virtual void asAssemblerOperand(Context* c, Site* high, lir::Operand* result)
|
||||
@ -236,12 +236,12 @@ class ConstantSite : public Site {
|
||||
|
||||
virtual SiteMask mask(Context*)
|
||||
{
|
||||
return SiteMask(1 << lir::ConstantOperand, 0, NoFrameIndex);
|
||||
return SiteMask(lir::Operand::ConstantMask, 0, NoFrameIndex);
|
||||
}
|
||||
|
||||
virtual SiteMask nextWordMask(Context*, unsigned)
|
||||
{
|
||||
return SiteMask(1 << lir::ConstantOperand, 0, NoFrameIndex);
|
||||
return SiteMask(lir::Operand::ConstantMask, 0, NoFrameIndex);
|
||||
}
|
||||
|
||||
Promise* value;
|
||||
@ -251,7 +251,7 @@ Site* addressSite(Context* c, Promise* address);
|
||||
|
||||
class RegisterSite : public Site {
|
||||
public:
|
||||
RegisterSite(uint32_t mask, int number);
|
||||
RegisterSite(RegisterMask mask, Register number);
|
||||
|
||||
virtual unsigned toString(Context*, char* buffer, unsigned bufferSize);
|
||||
|
||||
@ -273,7 +273,7 @@ class RegisterSite : public Site {
|
||||
|
||||
virtual bool frozen(Context* c UNUSED);
|
||||
|
||||
virtual lir::OperandType type(Context*);
|
||||
virtual lir::Operand::Type type(Context*);
|
||||
|
||||
virtual void asAssemblerOperand(Context* c UNUSED,
|
||||
Site* high,
|
||||
@ -293,18 +293,18 @@ class RegisterSite : public Site {
|
||||
|
||||
virtual unsigned registerSize(Context* c);
|
||||
|
||||
virtual unsigned registerMask(Context* c UNUSED);
|
||||
virtual RegisterMask registerMask(Context* c UNUSED);
|
||||
|
||||
uint32_t mask_;
|
||||
int number;
|
||||
RegisterMask mask_;
|
||||
Register number;
|
||||
};
|
||||
|
||||
Site* registerSite(Context* c, int number);
|
||||
Site* freeRegisterSite(Context* c, uint32_t mask);
|
||||
Site* registerSite(Context* c, Register number);
|
||||
Site* freeRegisterSite(Context* c, RegisterMask mask);
|
||||
|
||||
class MemorySite : public Site {
|
||||
public:
|
||||
MemorySite(int base, int offset, int index, unsigned scale);
|
||||
MemorySite(Register base, int offset, Register index, unsigned scale);
|
||||
|
||||
virtual unsigned toString(Context*, char* buffer, unsigned bufferSize);
|
||||
|
||||
@ -328,7 +328,7 @@ class MemorySite : public Site {
|
||||
|
||||
virtual bool frozen(Context* c);
|
||||
|
||||
virtual lir::OperandType type(Context*);
|
||||
virtual lir::Operand::Type type(Context*);
|
||||
|
||||
virtual void asAssemblerOperand(Context* c UNUSED,
|
||||
Site* high UNUSED,
|
||||
@ -351,16 +351,16 @@ class MemorySite : public Site {
|
||||
virtual bool isVolatile(Context* c);
|
||||
|
||||
bool acquired;
|
||||
int base;
|
||||
Register base;
|
||||
int offset;
|
||||
int index;
|
||||
Register index;
|
||||
unsigned scale;
|
||||
};
|
||||
|
||||
MemorySite* memorySite(Context* c,
|
||||
int base,
|
||||
Register base,
|
||||
int offset = 0,
|
||||
int index = lir::NoRegister,
|
||||
Register index = NoRegister,
|
||||
unsigned scale = 1);
|
||||
MemorySite* frameSite(Context* c, int frameIndex);
|
||||
|
||||
|
@ -1,35 +0,0 @@
|
||||
/* Copyright (c) 2008-2014, Avian Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software
|
||||
for any purpose with or without fee is hereby granted, provided
|
||||
that the above copyright notice and this permission notice appear
|
||||
in all copies.
|
||||
|
||||
There is NO WARRANTY for this software. See license.txt for
|
||||
details. */
|
||||
|
||||
#include <avian/codegen/registers.h>
|
||||
|
||||
namespace avian {
|
||||
namespace codegen {
|
||||
|
||||
unsigned RegisterMask::maskStart(uint32_t mask)
|
||||
{
|
||||
for (int i = 0; i <= 31; ++i) {
|
||||
if (mask & (1 << i))
|
||||
return i;
|
||||
}
|
||||
return 32;
|
||||
}
|
||||
|
||||
unsigned RegisterMask::maskLimit(uint32_t mask)
|
||||
{
|
||||
for (int i = 31; i >= 0; --i) {
|
||||
if (mask & (1 << i))
|
||||
return i + 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace codegen
|
||||
} // namespace avian
|
@ -52,11 +52,6 @@ bool vfpSupported()
|
||||
}
|
||||
} // namespace isa
|
||||
|
||||
inline unsigned lo8(int64_t i)
|
||||
{
|
||||
return (unsigned)(i & MASK_LO8);
|
||||
}
|
||||
|
||||
const RegisterFile MyRegisterFileWithoutFloats(GPR_MASK, 0);
|
||||
const RegisterFile MyRegisterFileWithFloats(GPR_MASK, FPR_MASK);
|
||||
|
||||
@ -169,39 +164,39 @@ class MyArchitecture : public Architecture {
|
||||
: &MyRegisterFileWithoutFloats;
|
||||
}
|
||||
|
||||
virtual int scratch()
|
||||
virtual Register scratch()
|
||||
{
|
||||
return 5;
|
||||
return Register(5);
|
||||
}
|
||||
|
||||
virtual int stack()
|
||||
virtual Register stack()
|
||||
{
|
||||
return StackRegister;
|
||||
}
|
||||
|
||||
virtual int thread()
|
||||
virtual Register thread()
|
||||
{
|
||||
return ThreadRegister;
|
||||
}
|
||||
|
||||
virtual int returnLow()
|
||||
virtual Register returnLow()
|
||||
{
|
||||
return 0;
|
||||
return Register(0);
|
||||
}
|
||||
|
||||
virtual int returnHigh()
|
||||
virtual Register returnHigh()
|
||||
{
|
||||
return 1;
|
||||
return Register(1);
|
||||
}
|
||||
|
||||
virtual int virtualCallTarget()
|
||||
virtual Register virtualCallTarget()
|
||||
{
|
||||
return 4;
|
||||
return Register(4);
|
||||
}
|
||||
|
||||
virtual int virtualCallIndex()
|
||||
virtual Register virtualCallIndex()
|
||||
{
|
||||
return 3;
|
||||
return Register(3);
|
||||
}
|
||||
|
||||
virtual ir::TargetInfo targetInfo()
|
||||
@ -219,13 +214,13 @@ class MyArchitecture : public Architecture {
|
||||
return 0x1FFFFFF;
|
||||
}
|
||||
|
||||
virtual bool reserved(int register_)
|
||||
virtual bool reserved(Register register_)
|
||||
{
|
||||
switch (register_) {
|
||||
case LinkRegister:
|
||||
case StackRegister:
|
||||
case ThreadRegister:
|
||||
case ProgramCounter:
|
||||
switch (register_.index()) {
|
||||
case LinkRegister.index():
|
||||
case StackRegister.index():
|
||||
case ThreadRegister.index():
|
||||
case ProgramCounter.index():
|
||||
return true;
|
||||
|
||||
default:
|
||||
@ -266,11 +261,11 @@ class MyArchitecture : public Architecture {
|
||||
return 4;
|
||||
}
|
||||
|
||||
virtual int argumentRegister(unsigned index)
|
||||
virtual Register argumentRegister(unsigned index)
|
||||
{
|
||||
assertT(&con, index < argumentRegisterCount());
|
||||
|
||||
return index;
|
||||
return Register(index);
|
||||
}
|
||||
|
||||
virtual bool hasLinkRegister()
|
||||
@ -401,8 +396,8 @@ class MyArchitecture : public Architecture {
|
||||
OperandMask& aMask,
|
||||
bool* thunk)
|
||||
{
|
||||
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
|
||||
aMask.registerMask = ~static_cast<uint64_t>(0);
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
|
||||
aMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
|
||||
*thunk = false;
|
||||
}
|
||||
|
||||
@ -414,12 +409,12 @@ class MyArchitecture : public Architecture {
|
||||
{
|
||||
*thunk = false;
|
||||
aMask.typeMask = ~0;
|
||||
aMask.registerMask = GPR_MASK64;
|
||||
aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
|
||||
switch (op) {
|
||||
case lir::Negate:
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = GPR_MASK64;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
break;
|
||||
|
||||
case lir::Absolute:
|
||||
@ -431,8 +426,8 @@ class MyArchitecture : public Architecture {
|
||||
case lir::FloatNegate:
|
||||
case lir::Float2Float:
|
||||
if (vfpSupported()) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = FPR_MASK64;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -444,8 +439,8 @@ class MyArchitecture : public Architecture {
|
||||
// thunks or produce inline machine code which handles edge
|
||||
// cases properly.
|
||||
if (false && vfpSupported() && bSize == 4) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = FPR_MASK64;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -453,8 +448,8 @@ class MyArchitecture : public Architecture {
|
||||
|
||||
case lir::Int2Float:
|
||||
if (vfpSupported() && aSize == 4) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = GPR_MASK64;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -471,13 +466,13 @@ class MyArchitecture : public Architecture {
|
||||
unsigned,
|
||||
OperandMask& bMask)
|
||||
{
|
||||
bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
|
||||
bMask.registerMask = GPR_MASK64;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::MemoryMask;
|
||||
bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
|
||||
switch (op) {
|
||||
case lir::Negate:
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = GPR_MASK64;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
break;
|
||||
|
||||
case lir::FloatAbsolute:
|
||||
@ -485,18 +480,18 @@ class MyArchitecture : public Architecture {
|
||||
case lir::FloatNegate:
|
||||
case lir::Float2Float:
|
||||
case lir::Int2Float:
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = FPR_MASK64;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
|
||||
break;
|
||||
|
||||
case lir::Float2Int:
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = GPR_MASK64;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
break;
|
||||
|
||||
case lir::Move:
|
||||
if (!(aMask.typeMask & 1 << lir::RegisterOperand)) {
|
||||
bMask.typeMask = 1 << lir::RegisterOperand;
|
||||
if (!(aMask.typeMask & lir::Operand::RegisterPairMask)) {
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -511,21 +506,21 @@ class MyArchitecture : public Architecture {
|
||||
const OperandMask& dstMask)
|
||||
{
|
||||
srcMask.typeMask = ~0;
|
||||
srcMask.registerMask = ~static_cast<uint64_t>(0);
|
||||
srcMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
|
||||
|
||||
tmpMask.typeMask = 0;
|
||||
tmpMask.registerMask = 0;
|
||||
tmpMask.setLowHighRegisterMasks(0, 0);
|
||||
|
||||
if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
|
||||
if (dstMask.typeMask & lir::Operand::MemoryMask) {
|
||||
// can't move directly from memory or constant to memory
|
||||
srcMask.typeMask = 1 << lir::RegisterOperand;
|
||||
tmpMask.typeMask = 1 << lir::RegisterOperand;
|
||||
tmpMask.registerMask = GPR_MASK64;
|
||||
} else if (vfpSupported() && dstMask.typeMask & 1 << lir::RegisterOperand
|
||||
&& dstMask.registerMask & FPR_MASK) {
|
||||
srcMask.typeMask = tmpMask.typeMask = 1 << lir::RegisterOperand
|
||||
| 1 << lir::MemoryOperand;
|
||||
tmpMask.registerMask = ~static_cast<uint64_t>(0);
|
||||
srcMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
tmpMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
tmpMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
} else if (vfpSupported() && dstMask.typeMask & lir::Operand::RegisterPairMask
|
||||
&& dstMask.lowRegisterMask & FPR_MASK) {
|
||||
srcMask.typeMask = tmpMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
tmpMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
|
||||
}
|
||||
}
|
||||
|
||||
@ -537,11 +532,11 @@ class MyArchitecture : public Architecture {
|
||||
unsigned,
|
||||
bool* thunk)
|
||||
{
|
||||
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
|
||||
aMask.registerMask = GPR_MASK64;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
|
||||
aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = GPR_MASK64;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK);
|
||||
|
||||
*thunk = false;
|
||||
|
||||
@ -550,7 +545,7 @@ class MyArchitecture : public Architecture {
|
||||
case lir::ShiftRight:
|
||||
case lir::UnsignedShiftRight:
|
||||
if (bSize == 8)
|
||||
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.typeMask = bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
break;
|
||||
|
||||
case lir::Add:
|
||||
@ -558,7 +553,7 @@ class MyArchitecture : public Architecture {
|
||||
case lir::Or:
|
||||
case lir::Xor:
|
||||
case lir::Multiply:
|
||||
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.typeMask = bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
break;
|
||||
|
||||
case lir::Divide:
|
||||
@ -572,8 +567,9 @@ class MyArchitecture : public Architecture {
|
||||
case lir::FloatMultiply:
|
||||
case lir::FloatDivide:
|
||||
if (vfpSupported()) {
|
||||
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = bMask.registerMask = FPR_MASK64;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
|
||||
bMask = aMask;
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -590,8 +586,9 @@ class MyArchitecture : public Architecture {
|
||||
case lir::JumpIfFloatLessOrEqualOrUnordered:
|
||||
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
|
||||
if (vfpSupported()) {
|
||||
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = bMask.registerMask = FPR_MASK64;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK);
|
||||
bMask = aMask;
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -611,11 +608,12 @@ class MyArchitecture : public Architecture {
|
||||
OperandMask& cMask)
|
||||
{
|
||||
if (isBranch(op)) {
|
||||
cMask.typeMask = (1 << lir::ConstantOperand);
|
||||
cMask.registerMask = 0;
|
||||
cMask.typeMask = lir::Operand::ConstantMask;
|
||||
cMask.setLowHighRegisterMasks(0, 0);
|
||||
} else {
|
||||
cMask.typeMask = (1 << lir::RegisterOperand);
|
||||
cMask.registerMask = bMask.registerMask;
|
||||
cMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
cMask.lowRegisterMask = bMask.lowRegisterMask;
|
||||
cMask.highRegisterMask = bMask.highRegisterMask;
|
||||
}
|
||||
}
|
||||
|
||||
@ -658,7 +656,7 @@ class MyAssembler : public Assembler {
|
||||
virtual void checkStackOverflow(uintptr_t handler,
|
||||
unsigned stackLimitOffsetFromThread)
|
||||
{
|
||||
lir::Register stack(StackRegister);
|
||||
lir::RegisterPair stack(StackRegister);
|
||||
lir::Memory stackLimit(ThreadRegister, stackLimitOffsetFromThread);
|
||||
lir::Constant handlerConstant(new (con.zone) ResolvedPromise(handler));
|
||||
branchRM(&con,
|
||||
@ -671,11 +669,11 @@ class MyAssembler : public Assembler {
|
||||
|
||||
virtual void saveFrame(unsigned stackOffset, unsigned ipOffset)
|
||||
{
|
||||
lir::Register link(LinkRegister);
|
||||
lir::RegisterPair link(LinkRegister);
|
||||
lir::Memory linkDst(ThreadRegister, ipOffset);
|
||||
moveRM(&con, TargetBytesPerWord, &link, TargetBytesPerWord, &linkDst);
|
||||
|
||||
lir::Register stack(StackRegister);
|
||||
lir::RegisterPair stack(StackRegister);
|
||||
lir::Memory stackDst(ThreadRegister, stackOffset);
|
||||
moveRM(&con, TargetBytesPerWord, &stack, TargetBytesPerWord, &stackDst);
|
||||
}
|
||||
@ -684,7 +682,7 @@ class MyAssembler : public Assembler {
|
||||
{
|
||||
struct Argument {
|
||||
unsigned size;
|
||||
lir::OperandType type;
|
||||
lir::Operand::Type type;
|
||||
lir::Operand* operand;
|
||||
};
|
||||
RUNTIME_ARRAY(Argument, arguments, argumentCount);
|
||||
@ -695,7 +693,7 @@ class MyAssembler : public Assembler {
|
||||
for (unsigned i = 0; i < argumentCount; ++i) {
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned);
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].type
|
||||
= static_cast<lir::OperandType>(va_arg(a, int));
|
||||
= static_cast<lir::Operand::Type>(va_arg(a, int));
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*);
|
||||
footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
TargetBytesPerWord);
|
||||
@ -707,7 +705,7 @@ class MyAssembler : public Assembler {
|
||||
unsigned offset = 0;
|
||||
for (unsigned i = 0; i < argumentCount; ++i) {
|
||||
if (i < arch_->argumentRegisterCount()) {
|
||||
lir::Register dst(arch_->argumentRegister(i));
|
||||
lir::RegisterPair dst(arch_->argumentRegister(i));
|
||||
|
||||
apply(lir::Move,
|
||||
OperandInfo(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
@ -715,7 +713,7 @@ class MyAssembler : public Assembler {
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].operand),
|
||||
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
TargetBytesPerWord),
|
||||
lir::RegisterOperand,
|
||||
lir::Operand::Type::RegisterPair,
|
||||
&dst));
|
||||
|
||||
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
@ -729,7 +727,7 @@ class MyAssembler : public Assembler {
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].operand),
|
||||
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
TargetBytesPerWord),
|
||||
lir::MemoryOperand,
|
||||
lir::Operand::Type::Memory,
|
||||
&dst));
|
||||
|
||||
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
@ -747,12 +745,12 @@ class MyAssembler : public Assembler {
|
||||
// how to handle them:
|
||||
assertT(&con, footprint < 256);
|
||||
|
||||
lir::Register stack(StackRegister);
|
||||
lir::RegisterPair stack(StackRegister);
|
||||
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
|
||||
lir::Constant footprintConstant(&footprintPromise);
|
||||
subC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
|
||||
|
||||
lir::Register returnAddress(LinkRegister);
|
||||
lir::RegisterPair returnAddress(LinkRegister);
|
||||
lir::Memory returnAddressDst(StackRegister,
|
||||
(footprint - 1) * TargetBytesPerWord);
|
||||
moveRM(&con,
|
||||
@ -764,7 +762,7 @@ class MyAssembler : public Assembler {
|
||||
|
||||
virtual void adjustFrame(unsigned difference)
|
||||
{
|
||||
lir::Register stack(StackRegister);
|
||||
lir::RegisterPair stack(StackRegister);
|
||||
ResolvedPromise differencePromise(difference * TargetBytesPerWord);
|
||||
lir::Constant differenceConstant(&differencePromise);
|
||||
subC(&con, TargetBytesPerWord, &differenceConstant, &stack, &stack);
|
||||
@ -774,7 +772,7 @@ class MyAssembler : public Assembler {
|
||||
{
|
||||
footprint += FrameHeaderSize;
|
||||
|
||||
lir::Register returnAddress(LinkRegister);
|
||||
lir::RegisterPair returnAddress(LinkRegister);
|
||||
lir::Memory returnAddressSrc(StackRegister,
|
||||
(footprint - 1) * TargetBytesPerWord);
|
||||
moveMR(&con,
|
||||
@ -783,7 +781,7 @@ class MyAssembler : public Assembler {
|
||||
TargetBytesPerWord,
|
||||
&returnAddress);
|
||||
|
||||
lir::Register stack(StackRegister);
|
||||
lir::RegisterPair stack(StackRegister);
|
||||
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
|
||||
lir::Constant footprintConstant(&footprintPromise);
|
||||
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
|
||||
@ -791,16 +789,16 @@ class MyAssembler : public Assembler {
|
||||
|
||||
virtual void popFrameForTailCall(unsigned footprint,
|
||||
int offset,
|
||||
int returnAddressSurrogate,
|
||||
int framePointerSurrogate UNUSED)
|
||||
Register returnAddressSurrogate,
|
||||
Register framePointerSurrogate UNUSED)
|
||||
{
|
||||
assertT(&con, framePointerSurrogate == lir::NoRegister);
|
||||
assertT(&con, framePointerSurrogate == NoRegister);
|
||||
|
||||
if (TailCalls) {
|
||||
if (offset) {
|
||||
footprint += FrameHeaderSize;
|
||||
|
||||
lir::Register link(LinkRegister);
|
||||
lir::RegisterPair link(LinkRegister);
|
||||
lir::Memory returnAddressSrc(StackRegister,
|
||||
(footprint - 1) * TargetBytesPerWord);
|
||||
moveMR(&con,
|
||||
@ -809,16 +807,16 @@ class MyAssembler : public Assembler {
|
||||
TargetBytesPerWord,
|
||||
&link);
|
||||
|
||||
lir::Register stack(StackRegister);
|
||||
lir::RegisterPair stack(StackRegister);
|
||||
ResolvedPromise footprintPromise((footprint - offset)
|
||||
* TargetBytesPerWord);
|
||||
lir::Constant footprintConstant(&footprintPromise);
|
||||
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
|
||||
|
||||
if (returnAddressSurrogate != lir::NoRegister) {
|
||||
if (returnAddressSurrogate != NoRegister) {
|
||||
assertT(&con, offset > 0);
|
||||
|
||||
lir::Register ras(returnAddressSurrogate);
|
||||
lir::RegisterPair ras(returnAddressSurrogate);
|
||||
lir::Memory dst(StackRegister, (offset - 1) * TargetBytesPerWord);
|
||||
moveRM(&con, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
|
||||
}
|
||||
@ -842,7 +840,7 @@ class MyAssembler : public Assembler {
|
||||
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
|
||||
offset = argumentFootprint - StackAlignmentInWords;
|
||||
|
||||
lir::Register stack(StackRegister);
|
||||
lir::RegisterPair stack(StackRegister);
|
||||
ResolvedPromise adjustmentPromise(offset * TargetBytesPerWord);
|
||||
lir::Constant adjustment(&adjustmentPromise);
|
||||
addC(&con, TargetBytesPerWord, &adjustment, &stack, &stack);
|
||||
@ -858,7 +856,7 @@ class MyAssembler : public Assembler {
|
||||
{
|
||||
popFrame(frameFootprint);
|
||||
|
||||
lir::Register stack(StackRegister);
|
||||
lir::RegisterPair stack(StackRegister);
|
||||
lir::Memory newStackSrc(ThreadRegister, stackOffsetFromThread);
|
||||
moveMR(&con, TargetBytesPerWord, &newStackSrc, TargetBytesPerWord, &stack);
|
||||
|
||||
@ -890,14 +888,14 @@ class MyAssembler : public Assembler {
|
||||
if (isBranch(op)) {
|
||||
assertT(&con, a.size == b.size);
|
||||
assertT(&con, c.size == TargetBytesPerWord);
|
||||
assertT(&con, c.type == lir::ConstantOperand);
|
||||
assertT(&con, c.type == lir::Operand::Type::Constant);
|
||||
|
||||
arch_->con.branchOperations[branchIndex(&(arch_->con), a.type, b.type)](
|
||||
&con, op, a.size, a.operand, b.operand, c.operand);
|
||||
} else {
|
||||
assertT(&con, b.size == c.size);
|
||||
assertT(&con, b.type == lir::RegisterOperand);
|
||||
assertT(&con, c.type == lir::RegisterOperand);
|
||||
assertT(&con, b.type == lir::Operand::Type::RegisterPair);
|
||||
assertT(&con, c.type == lir::Operand::Type::RegisterPair);
|
||||
|
||||
arch_->con.ternaryOperations[index(&(arch_->con), op, a.type)](
|
||||
&con, b.size, a.operand, b.operand, c.operand);
|
||||
|
@ -85,15 +85,15 @@ class ArchitectureContext {
|
||||
vm::System* s;
|
||||
OperationType operations[lir::OperationCount];
|
||||
UnaryOperationType
|
||||
unaryOperations[lir::UnaryOperationCount * lir::OperandTypeCount];
|
||||
unaryOperations[lir::UnaryOperationCount * lir::Operand::TypeCount];
|
||||
BinaryOperationType binaryOperations[lir::BinaryOperationCount
|
||||
* lir::OperandTypeCount
|
||||
* lir::OperandTypeCount];
|
||||
* lir::Operand::TypeCount
|
||||
* lir::Operand::TypeCount];
|
||||
TernaryOperationType ternaryOperations[lir::NonBranchTernaryOperationCount
|
||||
* lir::OperandTypeCount];
|
||||
* lir::Operand::TypeCount];
|
||||
BranchOperationType branchOperations[lir::BranchOperationCount
|
||||
* lir::OperandTypeCount
|
||||
* lir::OperandTypeCount];
|
||||
* lir::Operand::TypeCount
|
||||
* lir::Operand::TypeCount];
|
||||
};
|
||||
|
||||
inline avian::util::Aborter* getAborter(Context* c)
|
||||
|
@ -46,34 +46,34 @@ enum CONDITION {
|
||||
enum SHIFTOP { LSL, LSR, ASR, ROR };
|
||||
// INSTRUCTION FORMATS
|
||||
inline int
|
||||
DATA(int cond, int opcode, int S, int Rn, int Rd, int shift, int Sh, int Rm)
|
||||
DATA(int cond, int opcode, int S, Register Rn, Register Rd, int shift, int Sh, Register Rm)
|
||||
{
|
||||
return cond << 28 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12 | shift << 7
|
||||
| Sh << 5 | Rm;
|
||||
return cond << 28 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12 | shift << 7
|
||||
| Sh << 5 | Rm.index();
|
||||
}
|
||||
inline int
|
||||
DATAS(int cond, int opcode, int S, int Rn, int Rd, int Rs, int Sh, int Rm)
|
||||
DATAS(int cond, int opcode, int S, Register Rn, Register Rd, Register Rs, int Sh, Register Rm)
|
||||
{
|
||||
return cond << 28 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12 | Rs << 8
|
||||
| Sh << 5 | 1 << 4 | Rm;
|
||||
return cond << 28 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12 | Rs.index() << 8
|
||||
| Sh << 5 | 1 << 4 | Rm.index();
|
||||
}
|
||||
inline int DATAI(int cond, int opcode, int S, int Rn, int Rd, int rot, int imm)
|
||||
inline int DATAI(int cond, int opcode, int S, Register Rn, Register Rd, int rot, int imm)
|
||||
{
|
||||
return cond << 28 | 1 << 25 | opcode << 21 | S << 20 | Rn << 16 | Rd << 12
|
||||
return cond << 28 | 1 << 25 | opcode << 21 | S << 20 | Rn.index() << 16 | Rd.index() << 12
|
||||
| rot << 8 | (imm & 0xff);
|
||||
}
|
||||
inline int BRANCH(int cond, int L, int offset)
|
||||
{
|
||||
return cond << 28 | 5 << 25 | L << 24 | (offset & 0xffffff);
|
||||
}
|
||||
inline int BRANCHX(int cond, int L, int Rm)
|
||||
inline int BRANCHX(int cond, int L, Register Rm)
|
||||
{
|
||||
return cond << 28 | 0x4bffc << 6 | L << 5 | 1 << 4 | Rm;
|
||||
return cond << 28 | 0x4bffc << 6 | L << 5 | 1 << 4 | Rm.index();
|
||||
}
|
||||
inline int MULTIPLY(int cond, int mul, int S, int Rd, int Rn, int Rs, int Rm)
|
||||
inline int MULTIPLY(int cond, int mul, int S, Register Rd, Register Rn, Register Rs, Register Rm)
|
||||
{
|
||||
return cond << 28 | mul << 21 | S << 20 | Rd << 16 | Rn << 12 | Rs << 8
|
||||
| 9 << 4 | Rm;
|
||||
return cond << 28 | mul << 21 | S << 20 | Rd.index() << 16 | Rn.index() << 12 | Rs.index() << 8
|
||||
| 9 << 4 | Rm.index();
|
||||
}
|
||||
inline int XFER(int cond,
|
||||
int P,
|
||||
@ -81,14 +81,14 @@ inline int XFER(int cond,
|
||||
int B,
|
||||
int W,
|
||||
int L,
|
||||
int Rn,
|
||||
int Rd,
|
||||
Register Rn,
|
||||
Register Rd,
|
||||
int shift,
|
||||
int Sh,
|
||||
int Rm)
|
||||
Register Rm)
|
||||
{
|
||||
return cond << 28 | 3 << 25 | P << 24 | U << 23 | B << 22 | W << 21 | L << 20
|
||||
| Rn << 16 | Rd << 12 | shift << 7 | Sh << 5 | Rm;
|
||||
| Rn.index() << 16 | Rd.index() << 12 | shift << 7 | Sh << 5 | Rm.index();
|
||||
}
|
||||
inline int XFERI(int cond,
|
||||
int P,
|
||||
@ -96,41 +96,41 @@ inline int XFERI(int cond,
|
||||
int B,
|
||||
int W,
|
||||
int L,
|
||||
int Rn,
|
||||
int Rd,
|
||||
Register Rn,
|
||||
Register Rd,
|
||||
int offset)
|
||||
{
|
||||
return cond << 28 | 2 << 25 | P << 24 | U << 23 | B << 22 | W << 21 | L << 20
|
||||
| Rn << 16 | Rd << 12 | (offset & 0xfff);
|
||||
| Rn.index() << 16 | Rd.index() << 12 | (offset & 0xfff);
|
||||
}
|
||||
inline int XFER2(int cond,
|
||||
int P,
|
||||
int U,
|
||||
int W,
|
||||
int L,
|
||||
int Rn,
|
||||
int Rd,
|
||||
Register Rn,
|
||||
Register Rd,
|
||||
int S,
|
||||
int H,
|
||||
int Rm)
|
||||
Register Rm)
|
||||
{
|
||||
return cond << 28 | P << 24 | U << 23 | W << 21 | L << 20 | Rn << 16
|
||||
| Rd << 12 | 1 << 7 | S << 6 | H << 5 | 1 << 4 | Rm;
|
||||
return cond << 28 | P << 24 | U << 23 | W << 21 | L << 20 | Rn.index() << 16
|
||||
| Rd.index() << 12 | 1 << 7 | S << 6 | H << 5 | 1 << 4 | Rm.index();
|
||||
}
|
||||
inline int XFER2I(int cond,
|
||||
int P,
|
||||
int U,
|
||||
int W,
|
||||
int L,
|
||||
int Rn,
|
||||
int Rd,
|
||||
Register Rn,
|
||||
Register Rd,
|
||||
int offsetH,
|
||||
int S,
|
||||
int H,
|
||||
int offsetL)
|
||||
{
|
||||
return cond << 28 | P << 24 | U << 23 | 1 << 22 | W << 21 | L << 20 | Rn << 16
|
||||
| Rd << 12 | offsetH << 8 | 1 << 7 | S << 6 | H << 5 | 1 << 4
|
||||
return cond << 28 | P << 24 | U << 23 | 1 << 22 | W << 21 | L << 20 | Rn.index() << 16
|
||||
| Rd.index() << 12 | offsetH << 8 | 1 << 7 | S << 6 | H << 5 | 1 << 4
|
||||
| (offsetL & 0xf);
|
||||
}
|
||||
inline int COOP(int cond,
|
||||
@ -150,30 +150,30 @@ inline int COXFER(int cond,
|
||||
int N,
|
||||
int W,
|
||||
int L,
|
||||
int Rn,
|
||||
Register Rn,
|
||||
int CRd,
|
||||
int cp_num,
|
||||
int offset) // offset is in words, not bytes
|
||||
{
|
||||
return cond << 28 | 0x6 << 25 | P << 24 | U << 23 | N << 22 | W << 21
|
||||
| L << 20 | Rn << 16 | CRd << 12 | cp_num << 8 | (offset & 0xff) >> 2;
|
||||
| L << 20 | Rn.index() << 16 | CRd << 12 | cp_num << 8 | (offset & 0xff) >> 2;
|
||||
}
|
||||
inline int COREG(int cond,
|
||||
int opcode_1,
|
||||
int L,
|
||||
int CRn,
|
||||
int Rd,
|
||||
Register Rd,
|
||||
int cp_num,
|
||||
int opcode_2,
|
||||
int CRm)
|
||||
{
|
||||
return cond << 28 | 0xe << 24 | opcode_1 << 21 | L << 20 | CRn << 16
|
||||
| Rd << 12 | cp_num << 8 | opcode_2 << 5 | 1 << 4 | CRm;
|
||||
| Rd.index() << 12 | cp_num << 8 | opcode_2 << 5 | 1 << 4 | CRm;
|
||||
}
|
||||
inline int
|
||||
COREG2(int cond, int L, int Rn, int Rd, int cp_num, int opcode, int CRm)
|
||||
COREG2(int cond, int L, Register Rn, Register Rd, int cp_num, int opcode, int CRm)
|
||||
{
|
||||
return cond << 28 | 0xc4 << 20 | L << 20 | Rn << 16 | Rd << 12 | cp_num << 8
|
||||
return cond << 28 | 0xc4 << 20 | L << 20 | Rn.index() << 16 | Rd.index() << 12 | cp_num << 8
|
||||
| opcode << 4 | CRm;
|
||||
}
|
||||
// FIELD CALCULATORS
|
||||
@ -191,143 +191,143 @@ inline int bl(int offset)
|
||||
{
|
||||
return BRANCH(AL, 1, offset);
|
||||
}
|
||||
inline int bx(int Rm)
|
||||
inline int bx(Register Rm)
|
||||
{
|
||||
return BRANCHX(AL, 0, Rm);
|
||||
}
|
||||
inline int blx(int Rm)
|
||||
inline int blx(Register Rm)
|
||||
{
|
||||
return BRANCHX(AL, 1, Rm);
|
||||
}
|
||||
inline int and_(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int and_(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0x0, 0, Rn, Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int eor(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int eor(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0x1, 0, Rn, Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int rsb(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int rsb(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0x3, 0, Rn, Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int add(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int add(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0x4, 0, Rn, Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int adc(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int adc(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0x5, 0, Rn, Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int rsc(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int rsc(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0x7, 0, Rn, Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int cmp(int Rn, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int cmp(Register Rn, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0xa, 1, Rn, 0, shift, Sh, Rm);
|
||||
return DATA(AL, 0xa, 1, Rn, Register(0), shift, Sh, Rm);
|
||||
}
|
||||
inline int orr(int Rd, int Rn, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int orr(Register Rd, Register Rn, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0xc, 0, Rn, Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int mov(int Rd, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int mov(Register Rd, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0xd, 0, 0, Rd, shift, Sh, Rm);
|
||||
return DATA(AL, 0xd, 0, Register(0), Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int mvn(int Rd, int Rm, int Sh = 0, int shift = 0)
|
||||
inline int mvn(Register Rd, Register Rm, int Sh = 0, int shift = 0)
|
||||
{
|
||||
return DATA(AL, 0xf, 0, 0, Rd, shift, Sh, Rm);
|
||||
return DATA(AL, 0xf, 0, Register(0), Rd, shift, Sh, Rm);
|
||||
}
|
||||
inline int andi(int Rd, int Rn, int imm, int rot = 0)
|
||||
inline int andi(Register Rd, Register Rn, int imm, int rot = 0)
|
||||
{
|
||||
return DATAI(AL, 0x0, 0, Rn, Rd, rot, imm);
|
||||
}
|
||||
inline int subi(int Rd, int Rn, int imm, int rot = 0)
|
||||
inline int subi(Register Rd, Register Rn, int imm, int rot = 0)
|
||||
{
|
||||
return DATAI(AL, 0x2, 0, Rn, Rd, rot, imm);
|
||||
}
|
||||
inline int rsbi(int Rd, int Rn, int imm, int rot = 0)
|
||||
inline int rsbi(Register Rd, Register Rn, int imm, int rot = 0)
|
||||
{
|
||||
return DATAI(AL, 0x3, 0, Rn, Rd, rot, imm);
|
||||
}
|
||||
inline int addi(int Rd, int Rn, int imm, int rot = 0)
|
||||
inline int addi(Register Rd, Register Rn, int imm, int rot = 0)
|
||||
{
|
||||
return DATAI(AL, 0x4, 0, Rn, Rd, rot, imm);
|
||||
}
|
||||
inline int adci(int Rd, int Rn, int imm, int rot = 0)
|
||||
inline int adci(Register Rd, Register Rn, int imm, int rot = 0)
|
||||
{
|
||||
return DATAI(AL, 0x5, 0, Rn, Rd, rot, imm);
|
||||
}
|
||||
inline int bici(int Rd, int Rn, int imm, int rot = 0)
|
||||
inline int bici(Register Rd, Register Rn, int imm, int rot = 0)
|
||||
{
|
||||
return DATAI(AL, 0xe, 0, Rn, Rd, rot, imm);
|
||||
}
|
||||
inline int cmpi(int Rn, int imm, int rot = 0)
|
||||
inline int cmpi(Register Rn, int imm, int rot = 0)
|
||||
{
|
||||
return DATAI(AL, 0xa, 1, Rn, 0, rot, imm);
|
||||
return DATAI(AL, 0xa, 1, Rn, Register(0), rot, imm);
|
||||
}
|
||||
inline int movi(int Rd, int imm, int rot = 0)
|
||||
inline int movi(Register Rd, int imm, int rot = 0)
|
||||
{
|
||||
return DATAI(AL, 0xd, 0, 0, Rd, rot, imm);
|
||||
return DATAI(AL, 0xd, 0, Register(0), Rd, rot, imm);
|
||||
}
|
||||
inline int orrsh(int Rd, int Rn, int Rm, int Rs, int Sh)
|
||||
inline int orrsh(Register Rd, Register Rn, Register Rm, Register Rs, int Sh)
|
||||
{
|
||||
return DATAS(AL, 0xc, 0, Rn, Rd, Rs, Sh, Rm);
|
||||
}
|
||||
inline int movsh(int Rd, int Rm, int Rs, int Sh)
|
||||
inline int movsh(Register Rd, Register Rm, Register Rs, int Sh)
|
||||
{
|
||||
return DATAS(AL, 0xd, 0, 0, Rd, Rs, Sh, Rm);
|
||||
return DATAS(AL, 0xd, 0, Register(0), Rd, Rs, Sh, Rm);
|
||||
}
|
||||
inline int mul(int Rd, int Rm, int Rs)
|
||||
inline int mul(Register Rd, Register Rm, Register Rs)
|
||||
{
|
||||
return MULTIPLY(AL, 0, 0, Rd, 0, Rs, Rm);
|
||||
return MULTIPLY(AL, 0, 0, Rd, Register(0), Rs, Rm);
|
||||
}
|
||||
inline int mla(int Rd, int Rm, int Rs, int Rn)
|
||||
inline int mla(Register Rd, Register Rm, Register Rs, Register Rn)
|
||||
{
|
||||
return MULTIPLY(AL, 1, 0, Rd, Rn, Rs, Rm);
|
||||
}
|
||||
inline int umull(int RdLo, int RdHi, int Rm, int Rs)
|
||||
inline int umull(Register RdLo, Register RdHi, Register Rm, Register Rs)
|
||||
{
|
||||
return MULTIPLY(AL, 4, 0, RdHi, RdLo, Rs, Rm);
|
||||
}
|
||||
inline int ldr(int Rd, int Rn, int Rm, int W = 0)
|
||||
inline int ldr(Register Rd, Register Rn, Register Rm, int W = 0)
|
||||
{
|
||||
return XFER(AL, 1, 1, 0, W, 1, Rn, Rd, 0, 0, Rm);
|
||||
}
|
||||
inline int ldri(int Rd, int Rn, int imm, int W = 0)
|
||||
inline int ldri(Register Rd, Register Rn, int imm, int W = 0)
|
||||
{
|
||||
return XFERI(AL, 1, calcU(imm), 0, W, 1, Rn, Rd, abs(imm));
|
||||
}
|
||||
inline int ldrb(int Rd, int Rn, int Rm)
|
||||
inline int ldrb(Register Rd, Register Rn, Register Rm)
|
||||
{
|
||||
return XFER(AL, 1, 1, 1, 0, 1, Rn, Rd, 0, 0, Rm);
|
||||
}
|
||||
inline int ldrbi(int Rd, int Rn, int imm)
|
||||
inline int ldrbi(Register Rd, Register Rn, int imm)
|
||||
{
|
||||
return XFERI(AL, 1, calcU(imm), 1, 0, 1, Rn, Rd, abs(imm));
|
||||
}
|
||||
inline int str(int Rd, int Rn, int Rm, int W = 0)
|
||||
inline int str(Register Rd, Register Rn, Register Rm, int W = 0)
|
||||
{
|
||||
return XFER(AL, 1, 1, 0, W, 0, Rn, Rd, 0, 0, Rm);
|
||||
}
|
||||
inline int stri(int Rd, int Rn, int imm, int W = 0)
|
||||
inline int stri(Register Rd, Register Rn, int imm, int W = 0)
|
||||
{
|
||||
return XFERI(AL, 1, calcU(imm), 0, W, 0, Rn, Rd, abs(imm));
|
||||
}
|
||||
inline int strb(int Rd, int Rn, int Rm)
|
||||
inline int strb(Register Rd, Register Rn, Register Rm)
|
||||
{
|
||||
return XFER(AL, 1, 1, 1, 0, 0, Rn, Rd, 0, 0, Rm);
|
||||
}
|
||||
inline int strbi(int Rd, int Rn, int imm)
|
||||
inline int strbi(Register Rd, Register Rn, int imm)
|
||||
{
|
||||
return XFERI(AL, 1, calcU(imm), 1, 0, 0, Rn, Rd, abs(imm));
|
||||
}
|
||||
inline int ldrh(int Rd, int Rn, int Rm)
|
||||
inline int ldrh(Register Rd, Register Rn, Register Rm)
|
||||
{
|
||||
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 0, 1, Rm);
|
||||
}
|
||||
inline int ldrhi(int Rd, int Rn, int imm)
|
||||
inline int ldrhi(Register Rd, Register Rn, int imm)
|
||||
{
|
||||
return XFER2I(AL,
|
||||
1,
|
||||
@ -341,11 +341,11 @@ inline int ldrhi(int Rd, int Rn, int imm)
|
||||
1,
|
||||
abs(imm) & 0xf);
|
||||
}
|
||||
inline int strh(int Rd, int Rn, int Rm)
|
||||
inline int strh(Register Rd, Register Rn, Register Rm)
|
||||
{
|
||||
return XFER2(AL, 1, 1, 0, 0, Rn, Rd, 0, 1, Rm);
|
||||
}
|
||||
inline int strhi(int Rd, int Rn, int imm)
|
||||
inline int strhi(Register Rd, Register Rn, int imm)
|
||||
{
|
||||
return XFER2I(AL,
|
||||
1,
|
||||
@ -359,11 +359,11 @@ inline int strhi(int Rd, int Rn, int imm)
|
||||
1,
|
||||
abs(imm) & 0xf);
|
||||
}
|
||||
inline int ldrsh(int Rd, int Rn, int Rm)
|
||||
inline int ldrsh(Register Rd, Register Rn, Register Rm)
|
||||
{
|
||||
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 1, Rm);
|
||||
}
|
||||
inline int ldrshi(int Rd, int Rn, int imm)
|
||||
inline int ldrshi(Register Rd, Register Rn, int imm)
|
||||
{
|
||||
return XFER2I(AL,
|
||||
1,
|
||||
@ -377,11 +377,11 @@ inline int ldrshi(int Rd, int Rn, int imm)
|
||||
1,
|
||||
abs(imm) & 0xf);
|
||||
}
|
||||
inline int ldrsb(int Rd, int Rn, int Rm)
|
||||
inline int ldrsb(Register Rd, Register Rn, Register Rm)
|
||||
{
|
||||
return XFER2(AL, 1, 1, 0, 1, Rn, Rd, 1, 0, Rm);
|
||||
}
|
||||
inline int ldrsbi(int Rd, int Rn, int imm)
|
||||
inline int ldrsbi(Register Rd, Register Rn, int imm)
|
||||
{
|
||||
return XFER2I(AL,
|
||||
1,
|
||||
@ -403,27 +403,27 @@ inline int bkpt(int16_t immed)
|
||||
// COPROCESSOR INSTRUCTIONS
|
||||
inline int mcr(int coproc,
|
||||
int opcode_1,
|
||||
int Rd,
|
||||
Register Rd,
|
||||
int CRn,
|
||||
int CRm,
|
||||
int opcode_2 = 0)
|
||||
{
|
||||
return COREG(AL, opcode_1, 0, CRn, Rd, coproc, opcode_2, CRm);
|
||||
}
|
||||
inline int mcrr(int coproc, int opcode, int Rd, int Rn, int CRm)
|
||||
inline int mcrr(int coproc, int opcode, Register Rd, Register Rn, int CRm)
|
||||
{
|
||||
return COREG2(AL, 0, Rn, Rd, coproc, opcode, CRm);
|
||||
}
|
||||
inline int mrc(int coproc,
|
||||
int opcode_1,
|
||||
int Rd,
|
||||
Register Rd,
|
||||
int CRn,
|
||||
int CRm,
|
||||
int opcode_2 = 0)
|
||||
{
|
||||
return COREG(AL, opcode_1, 1, CRn, Rd, coproc, opcode_2, CRm);
|
||||
}
|
||||
inline int mrrc(int coproc, int opcode, int Rd, int Rn, int CRm)
|
||||
inline int mrrc(int coproc, int opcode, Register Rd, Register Rn, int CRm)
|
||||
{
|
||||
return COREG2(AL, 1, Rn, Rd, coproc, opcode, CRm);
|
||||
}
|
||||
@ -551,42 +551,42 @@ inline int ftosizd(int Sd, int Dm)
|
||||
return COOP(AL, 0xb | (Sd & 1) << 2, 0xd, Sd >> 1, 11, 6, Dm);
|
||||
}
|
||||
// single load/store instructions for both precision types
|
||||
inline int flds(int Sd, int Rn, int offset = 0)
|
||||
inline int flds(int Sd, Register Rn, int offset = 0)
|
||||
{
|
||||
return COXFER(AL, 1, 1, Sd & 1, 0, 1, Rn, Sd >> 1, 10, offset);
|
||||
};
|
||||
inline int fldd(int Dd, int Rn, int offset = 0)
|
||||
inline int fldd(int Dd, Register Rn, int offset = 0)
|
||||
{
|
||||
return COXFER(AL, 1, 1, 0, 0, 1, Rn, Dd, 11, offset);
|
||||
};
|
||||
inline int fsts(int Sd, int Rn, int offset = 0)
|
||||
inline int fsts(int Sd, Register Rn, int offset = 0)
|
||||
{
|
||||
return COXFER(AL, 1, 1, Sd & 1, 0, 0, Rn, Sd >> 1, 10, offset);
|
||||
};
|
||||
inline int fstd(int Dd, int Rn, int offset = 0)
|
||||
inline int fstd(int Dd, Register Rn, int offset = 0)
|
||||
{
|
||||
return COXFER(AL, 1, 1, 0, 0, 0, Rn, Dd, 11, offset);
|
||||
};
|
||||
// move between GPRs and FPRs
|
||||
inline int fmsr(int Sn, int Rd)
|
||||
inline int fmsr(int Sn, Register Rd)
|
||||
{
|
||||
return mcr(10, 0, Rd, Sn >> 1, 0, (Sn & 1) << 2);
|
||||
}
|
||||
inline int fmrs(int Rd, int Sn)
|
||||
inline int fmrs(Register Rd, int Sn)
|
||||
{
|
||||
return mrc(10, 0, Rd, Sn >> 1, 0, (Sn & 1) << 2);
|
||||
}
|
||||
// move to/from VFP system registers
|
||||
inline int fmrx(int Rd, int reg)
|
||||
inline int fmrx(Register Rd, int reg)
|
||||
{
|
||||
return mrc(10, 7, Rd, reg, 0);
|
||||
}
|
||||
// these move around pairs of single-precision registers
|
||||
inline int fmdrr(int Dm, int Rd, int Rn)
|
||||
inline int fmdrr(int Dm, Register Rd, Register Rn)
|
||||
{
|
||||
return mcrr(11, 1, Rd, Rn, Dm);
|
||||
}
|
||||
inline int fmrrd(int Rd, int Rn, int Dm)
|
||||
inline int fmrrd(Register Rd, Register Rn, int Dm)
|
||||
{
|
||||
return mrrc(11, 1, Rd, Rn, Dm);
|
||||
}
|
||||
@ -600,27 +600,27 @@ inline int SETS(int ins)
|
||||
return ins | 1 << 20;
|
||||
}
|
||||
// PSEUDO-INSTRUCTIONS
|
||||
inline int lsl(int Rd, int Rm, int Rs)
|
||||
inline int lsl(Register Rd, Register Rm, Register Rs)
|
||||
{
|
||||
return movsh(Rd, Rm, Rs, LSL);
|
||||
}
|
||||
inline int lsli(int Rd, int Rm, int imm)
|
||||
inline int lsli(Register Rd, Register Rm, int imm)
|
||||
{
|
||||
return mov(Rd, Rm, LSL, imm);
|
||||
}
|
||||
inline int lsr(int Rd, int Rm, int Rs)
|
||||
inline int lsr(Register Rd, Register Rm, Register Rs)
|
||||
{
|
||||
return movsh(Rd, Rm, Rs, LSR);
|
||||
}
|
||||
inline int lsri(int Rd, int Rm, int imm)
|
||||
inline int lsri(Register Rd, Register Rm, int imm)
|
||||
{
|
||||
return mov(Rd, Rm, LSR, imm);
|
||||
}
|
||||
inline int asr(int Rd, int Rm, int Rs)
|
||||
inline int asr(Register Rd, Register Rm, Register Rs)
|
||||
{
|
||||
return movsh(Rd, Rm, Rs, ASR);
|
||||
}
|
||||
inline int asri(int Rd, int Rm, int imm)
|
||||
inline int asri(Register Rd, Register Rm, int imm)
|
||||
{
|
||||
return mov(Rd, Rm, ASR, imm);
|
||||
}
|
||||
@ -670,7 +670,7 @@ inline int bpl(int offset)
|
||||
}
|
||||
inline int fmstat()
|
||||
{
|
||||
return fmrx(15, FPSCR);
|
||||
return fmrx(Register(15), FPSCR);
|
||||
}
|
||||
// todo: make this pretty:
|
||||
inline int dmb()
|
||||
|
@ -22,35 +22,35 @@ using namespace util;
|
||||
|
||||
unsigned index(ArchitectureContext*,
|
||||
lir::BinaryOperation operation,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2)
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2)
|
||||
{
|
||||
return operation + (lir::BinaryOperationCount * operand1)
|
||||
+ (lir::BinaryOperationCount * lir::OperandTypeCount * operand2);
|
||||
return operation + (lir::BinaryOperationCount * (unsigned)operand1)
|
||||
+ (lir::BinaryOperationCount * lir::Operand::TypeCount * (unsigned)operand2);
|
||||
}
|
||||
|
||||
unsigned index(ArchitectureContext* con UNUSED,
|
||||
lir::TernaryOperation operation,
|
||||
lir::OperandType operand1)
|
||||
lir::Operand::Type operand1)
|
||||
{
|
||||
assertT(con, not isBranch(operation));
|
||||
|
||||
return operation + (lir::NonBranchTernaryOperationCount * operand1);
|
||||
return operation + (lir::NonBranchTernaryOperationCount * (unsigned)operand1);
|
||||
}
|
||||
|
||||
unsigned branchIndex(ArchitectureContext* con UNUSED,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2)
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2)
|
||||
{
|
||||
return operand1 + (lir::OperandTypeCount * operand2);
|
||||
return (unsigned)operand1 + (lir::Operand::TypeCount * (unsigned)operand2);
|
||||
}
|
||||
|
||||
void populateTables(ArchitectureContext* con)
|
||||
{
|
||||
const lir::OperandType C = lir::ConstantOperand;
|
||||
const lir::OperandType A = lir::AddressOperand;
|
||||
const lir::OperandType R = lir::RegisterOperand;
|
||||
const lir::OperandType M = lir::MemoryOperand;
|
||||
const lir::Operand::Type C = lir::Operand::Type::Constant;
|
||||
const lir::Operand::Type A = lir::Operand::Type::Address;
|
||||
const lir::Operand::Type R = lir::Operand::Type::RegisterPair;
|
||||
const lir::Operand::Type M = lir::Operand::Type::Memory;
|
||||
|
||||
OperationType* zo = con->operations;
|
||||
UnaryOperationType* uo = con->unaryOperations;
|
||||
|
@ -25,16 +25,16 @@ namespace arm {
|
||||
|
||||
unsigned index(ArchitectureContext*,
|
||||
lir::BinaryOperation operation,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2);
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2);
|
||||
|
||||
unsigned index(ArchitectureContext* con UNUSED,
|
||||
lir::TernaryOperation operation,
|
||||
lir::OperandType operand1);
|
||||
lir::Operand::Type operand1);
|
||||
|
||||
unsigned branchIndex(ArchitectureContext* con UNUSED,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2);
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2);
|
||||
|
||||
void populateTables(ArchitectureContext* con);
|
||||
|
||||
|
@ -35,20 +35,20 @@ inline unsigned lo8(int64_t i)
|
||||
void andC(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void shiftLeftR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
int tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
|
||||
Register tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
|
||||
ResolvedPromise maskPromise(0x3F);
|
||||
lir::Constant mask(&maskPromise);
|
||||
lir::Register dst(tmp3);
|
||||
lir::RegisterPair dst(tmp3);
|
||||
andC(con, 4, &mask, a, &dst);
|
||||
emit(con, lsl(tmp1, b->high, tmp3));
|
||||
emit(con, rsbi(tmp2, tmp3, 32));
|
||||
@ -61,10 +61,10 @@ void shiftLeftR(Context* con,
|
||||
freeTemp(con, tmp2);
|
||||
freeTemp(con, tmp3);
|
||||
} else {
|
||||
int tmp = newTemp(con);
|
||||
Register tmp = newTemp(con);
|
||||
ResolvedPromise maskPromise(0x1F);
|
||||
lir::Constant mask(&maskPromise);
|
||||
lir::Register dst(tmp);
|
||||
lir::RegisterPair dst(tmp);
|
||||
andC(con, size, &mask, a, &dst);
|
||||
emit(con, lsl(t->low, b->low, tmp));
|
||||
freeTemp(con, tmp);
|
||||
@ -73,15 +73,15 @@ void shiftLeftR(Context* con,
|
||||
|
||||
void moveRR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void shiftLeftC(Context* con,
|
||||
unsigned size UNUSED,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
if (getValue(a) & 0x1F) {
|
||||
@ -93,15 +93,15 @@ void shiftLeftC(Context* con,
|
||||
|
||||
void shiftRightR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
int tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
|
||||
Register tmp1 = newTemp(con), tmp2 = newTemp(con), tmp3 = newTemp(con);
|
||||
ResolvedPromise maskPromise(0x3F);
|
||||
lir::Constant mask(&maskPromise);
|
||||
lir::Register dst(tmp3);
|
||||
lir::RegisterPair dst(tmp3);
|
||||
andC(con, 4, &mask, a, &dst);
|
||||
emit(con, lsr(tmp1, b->low, tmp3));
|
||||
emit(con, rsbi(tmp2, tmp3, 32));
|
||||
@ -114,10 +114,10 @@ void shiftRightR(Context* con,
|
||||
freeTemp(con, tmp2);
|
||||
freeTemp(con, tmp3);
|
||||
} else {
|
||||
int tmp = newTemp(con);
|
||||
Register tmp = newTemp(con);
|
||||
ResolvedPromise maskPromise(0x1F);
|
||||
lir::Constant mask(&maskPromise);
|
||||
lir::Register dst(tmp);
|
||||
lir::RegisterPair dst(tmp);
|
||||
andC(con, size, &mask, a, &dst);
|
||||
emit(con, asr(t->low, b->low, tmp));
|
||||
freeTemp(con, tmp);
|
||||
@ -127,8 +127,8 @@ void shiftRightR(Context* con,
|
||||
void shiftRightC(Context* con,
|
||||
unsigned size UNUSED,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
if (getValue(a) & 0x1F) {
|
||||
@ -140,18 +140,18 @@ void shiftRightC(Context* con,
|
||||
|
||||
void unsignedShiftRightR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
int tmpShift = newTemp(con);
|
||||
Register tmpShift = newTemp(con);
|
||||
ResolvedPromise maskPromise(size == 8 ? 0x3F : 0x1F);
|
||||
lir::Constant mask(&maskPromise);
|
||||
lir::Register dst(tmpShift);
|
||||
lir::RegisterPair dst(tmpShift);
|
||||
andC(con, 4, &mask, a, &dst);
|
||||
emit(con, lsr(t->low, b->low, tmpShift));
|
||||
if (size == 8) {
|
||||
int tmpHi = newTemp(con), tmpLo = newTemp(con);
|
||||
Register tmpHi = newTemp(con), tmpLo = newTemp(con);
|
||||
emit(con, SETS(rsbi(tmpHi, tmpShift, 32)));
|
||||
emit(con, lsl(tmpLo, b->high, tmpHi));
|
||||
emit(con, orr(t->low, t->low, tmpLo));
|
||||
@ -168,8 +168,8 @@ void unsignedShiftRightR(Context* con,
|
||||
void unsignedShiftRightC(Context* con,
|
||||
unsigned size UNUSED,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
if (getValue(a) & 0x1F) {
|
||||
@ -274,7 +274,7 @@ void resolve(MyBlock* b)
|
||||
}
|
||||
}
|
||||
|
||||
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target)
|
||||
void jumpR(Context* con, unsigned size UNUSED, lir::RegisterPair* target)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
emit(con, bx(target->low));
|
||||
@ -282,14 +282,14 @@ void jumpR(Context* con, unsigned size UNUSED, lir::Register* target)
|
||||
|
||||
void swapRR(Context* con,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
assertT(con, aSize == vm::TargetBytesPerWord);
|
||||
assertT(con, bSize == vm::TargetBytesPerWord);
|
||||
|
||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
moveRR(con, aSize, a, bSize, &tmp);
|
||||
moveRR(con, bSize, b, aSize, a);
|
||||
moveRR(con, bSize, &tmp, bSize, b);
|
||||
@ -298,9 +298,9 @@ void swapRR(Context* con,
|
||||
|
||||
void moveRR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
bool srcIsFpr = isFpr(src);
|
||||
bool dstIsFpr = isFpr(dst);
|
||||
@ -343,8 +343,8 @@ void moveRR(Context* con,
|
||||
moveRR(con, 4, src, 4, dst);
|
||||
emit(con, asri(dst->high, src->low, 31));
|
||||
} else if (srcSize == 8 and dstSize == 8) {
|
||||
lir::Register srcHigh(src->high);
|
||||
lir::Register dstHigh(dst->high);
|
||||
lir::RegisterPair srcHigh(src->high);
|
||||
lir::RegisterPair dstHigh(dst->high);
|
||||
|
||||
if (src->high == dst->low) {
|
||||
if (src->low == dst->high) {
|
||||
@ -369,9 +369,9 @@ void moveRR(Context* con,
|
||||
|
||||
void moveZRR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
switch (srcSize) {
|
||||
case 2:
|
||||
@ -388,16 +388,16 @@ void moveCR(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* src,
|
||||
unsigned,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void moveCR2(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* src,
|
||||
lir::Register* dst,
|
||||
lir::RegisterPair* dst,
|
||||
Promise* callOffset)
|
||||
{
|
||||
if (isFpr(dst)) { // floating-point
|
||||
lir::Register tmp = size > 4 ? makeTemp64(con) : makeTemp(con);
|
||||
lir::RegisterPair tmp = size > 4 ? makeTemp64(con) : makeTemp(con);
|
||||
moveCR(con, size, src, size, &tmp);
|
||||
moveRR(con, size, &tmp, size, dst);
|
||||
freeTemp(con, tmp);
|
||||
@ -407,7 +407,7 @@ void moveCR2(Context* con,
|
||||
lir::Constant srcLo(&loBits);
|
||||
ResolvedPromise hiBits(value >> 32);
|
||||
lir::Constant srcHi(&hiBits);
|
||||
lir::Register dstHi(dst->high);
|
||||
lir::RegisterPair dstHi(dst->high);
|
||||
moveCR(con, 4, &srcLo, 4, dst);
|
||||
moveCR(con, 4, &srcHi, 4, &dstHi);
|
||||
} else if (src->value->resolved() and isOfWidth(getValue(src), 8)) {
|
||||
@ -422,16 +422,16 @@ void moveCR(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* src,
|
||||
unsigned,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
moveCR2(con, size, src, dst, 0);
|
||||
}
|
||||
|
||||
void addR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, SETS(add(t->low, a->low, b->low)));
|
||||
@ -443,9 +443,9 @@ void addR(Context* con,
|
||||
|
||||
void subR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, SETS(rsb(t->low, a->low, b->low)));
|
||||
@ -458,8 +458,8 @@ void subR(Context* con,
|
||||
void addC(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
|
||||
@ -481,8 +481,8 @@ void addC(Context* con,
|
||||
void subC(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
|
||||
@ -503,15 +503,15 @@ void subC(Context* con,
|
||||
|
||||
void multiplyR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
bool useTemporaries = b->low == t->low;
|
||||
int tmpLow = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
|
||||
Register tmpLow = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
|
||||
: t->low;
|
||||
int tmpHigh = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
|
||||
Register tmpHigh = useTemporaries ? con->client->acquireTemporary(GPR_MASK)
|
||||
: t->high;
|
||||
|
||||
emit(con, umull(tmpLow, tmpHigh, a->low, b->low));
|
||||
@ -531,9 +531,9 @@ void multiplyR(Context* con,
|
||||
|
||||
void floatAbsoluteRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, fabsd(fpr64(b), fpr64(a)));
|
||||
@ -544,9 +544,9 @@ void floatAbsoluteRR(Context* con,
|
||||
|
||||
void floatNegateRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, fnegd(fpr64(b), fpr64(a)));
|
||||
@ -557,9 +557,9 @@ void floatNegateRR(Context* con,
|
||||
|
||||
void float2FloatRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, fcvtsd(fpr32(b), fpr64(a)));
|
||||
@ -570,11 +570,11 @@ void float2FloatRR(Context* con,
|
||||
|
||||
void float2IntRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
int tmp = newTemp(con, FPR_MASK);
|
||||
Register tmp = newTemp(con, FPR_MASK);
|
||||
int ftmp = fpr32(tmp);
|
||||
if (size == 8) { // double to int
|
||||
emit(con, ftosizd(ftmp, fpr64(a)));
|
||||
@ -587,9 +587,9 @@ void float2IntRR(Context* con,
|
||||
|
||||
void int2FloatRR(Context* con,
|
||||
unsigned,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned size,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
emit(con, fmsr(fpr32(b), a->low));
|
||||
if (size == 8) { // int to double
|
||||
@ -601,9 +601,9 @@ void int2FloatRR(Context* con,
|
||||
|
||||
void floatSqrtRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, fsqrtd(fpr64(b), fpr64(a)));
|
||||
@ -614,9 +614,9 @@ void floatSqrtRR(Context* con,
|
||||
|
||||
void floatAddR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, faddd(fpr64(t), fpr64(a), fpr64(b)));
|
||||
@ -627,9 +627,9 @@ void floatAddR(Context* con,
|
||||
|
||||
void floatSubtractR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, fsubd(fpr64(t), fpr64(b), fpr64(a)));
|
||||
@ -640,9 +640,9 @@ void floatSubtractR(Context* con,
|
||||
|
||||
void floatMultiplyR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, fmuld(fpr64(t), fpr64(a), fpr64(b)));
|
||||
@ -653,9 +653,9 @@ void floatMultiplyR(Context* con,
|
||||
|
||||
void floatDivideR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t)
|
||||
{
|
||||
if (size == 8) {
|
||||
emit(con, fdivd(fpr64(t), fpr64(b), fpr64(a)));
|
||||
@ -664,15 +664,15 @@ void floatDivideR(Context* con,
|
||||
}
|
||||
}
|
||||
|
||||
int normalize(Context* con,
|
||||
Register normalize(Context* con,
|
||||
int offset,
|
||||
int index,
|
||||
Register index,
|
||||
unsigned scale,
|
||||
bool* preserveIndex,
|
||||
bool* release)
|
||||
{
|
||||
if (offset != 0 or scale != 1) {
|
||||
lir::Register normalizedIndex(
|
||||
lir::RegisterPair normalizedIndex(
|
||||
*preserveIndex ? con->client->acquireTemporary(GPR_MASK) : index);
|
||||
|
||||
if (*preserveIndex) {
|
||||
@ -682,10 +682,10 @@ int normalize(Context* con,
|
||||
*release = false;
|
||||
}
|
||||
|
||||
int scaled;
|
||||
Register scaled;
|
||||
|
||||
if (scale != 1) {
|
||||
lir::Register unscaledIndex(index);
|
||||
lir::RegisterPair unscaledIndex(index);
|
||||
|
||||
ResolvedPromise scalePromise(log(scale));
|
||||
lir::Constant scaleConstant(&scalePromise);
|
||||
@ -702,12 +702,12 @@ int normalize(Context* con,
|
||||
}
|
||||
|
||||
if (offset != 0) {
|
||||
lir::Register untranslatedIndex(scaled);
|
||||
lir::RegisterPair untranslatedIndex(scaled);
|
||||
|
||||
ResolvedPromise offsetPromise(offset);
|
||||
lir::Constant offsetConstant(&offsetPromise);
|
||||
|
||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
moveCR(con,
|
||||
vm::TargetBytesPerWord,
|
||||
&offsetConstant,
|
||||
@ -730,16 +730,16 @@ int normalize(Context* con,
|
||||
|
||||
void store(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* src,
|
||||
int base,
|
||||
lir::RegisterPair* src,
|
||||
Register base,
|
||||
int offset,
|
||||
int index,
|
||||
Register index,
|
||||
unsigned scale,
|
||||
bool preserveIndex)
|
||||
{
|
||||
if (index != lir::NoRegister) {
|
||||
if (index != NoRegister) {
|
||||
bool release;
|
||||
int normalized
|
||||
Register normalized
|
||||
= normalize(con, offset, index, scale, &preserveIndex, &release);
|
||||
|
||||
if (!isFpr(src)) { // GPR store
|
||||
@ -757,7 +757,7 @@ void store(Context* con,
|
||||
break;
|
||||
|
||||
case 8: { // split into 2 32-bit stores
|
||||
lir::Register srcHigh(src->high);
|
||||
lir::RegisterPair srcHigh(src->high);
|
||||
store(con, 4, &srcHigh, base, 0, normalized, 1, preserveIndex);
|
||||
store(con, 4, src, base, 4, normalized, 1, preserveIndex);
|
||||
} break;
|
||||
@ -766,7 +766,7 @@ void store(Context* con,
|
||||
abort(con);
|
||||
}
|
||||
} else { // FPR store
|
||||
lir::Register base_(base), normalized_(normalized),
|
||||
lir::RegisterPair base_(base), normalized_(normalized),
|
||||
absAddr = makeTemp(con);
|
||||
// FPR stores have only bases, so we must add the index
|
||||
addR(con, vm::TargetBytesPerWord, &base_, &normalized_, &absAddr);
|
||||
@ -798,9 +798,9 @@ void store(Context* con,
|
||||
break;
|
||||
|
||||
case 8: { // split into 2 32-bit stores
|
||||
lir::Register srcHigh(src->high);
|
||||
store(con, 4, &srcHigh, base, offset, lir::NoRegister, 1, false);
|
||||
store(con, 4, src, base, offset + 4, lir::NoRegister, 1, false);
|
||||
lir::RegisterPair srcHigh(src->high);
|
||||
store(con, 4, &srcHigh, base, offset, NoRegister, 1, false);
|
||||
store(con, 4, src, base, offset + 4, NoRegister, 1, false);
|
||||
} break;
|
||||
|
||||
default:
|
||||
@ -815,7 +815,7 @@ void store(Context* con,
|
||||
emit(con, fsts(fpr32(src), base, offset));
|
||||
}
|
||||
} else {
|
||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
ResolvedPromise offsetPromise(offset);
|
||||
lir::Constant offsetConstant(&offsetPromise);
|
||||
moveCR(con,
|
||||
@ -832,7 +832,7 @@ void store(Context* con,
|
||||
|
||||
void moveRM(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned dstSize UNUSED,
|
||||
lir::Memory* dst)
|
||||
{
|
||||
@ -844,18 +844,18 @@ void moveRM(Context* con,
|
||||
|
||||
void load(Context* con,
|
||||
unsigned srcSize,
|
||||
int base,
|
||||
Register base,
|
||||
int offset,
|
||||
int index,
|
||||
Register index,
|
||||
unsigned scale,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst,
|
||||
lir::RegisterPair* dst,
|
||||
bool preserveIndex,
|
||||
bool signExtend)
|
||||
{
|
||||
if (index != lir::NoRegister) {
|
||||
if (index != NoRegister) {
|
||||
bool release;
|
||||
int normalized
|
||||
Register normalized
|
||||
= normalize(con, offset, index, scale, &preserveIndex, &release);
|
||||
|
||||
if (!isFpr(dst)) { // GPR load
|
||||
@ -882,7 +882,7 @@ void load(Context* con,
|
||||
load(con, 4, base, 0, normalized, 1, 4, dst, preserveIndex, false);
|
||||
moveRR(con, 4, dst, 8, dst);
|
||||
} else if (srcSize == 8 and dstSize == 8) {
|
||||
lir::Register dstHigh(dst->high);
|
||||
lir::RegisterPair dstHigh(dst->high);
|
||||
load(con,
|
||||
4,
|
||||
base,
|
||||
@ -903,7 +903,7 @@ void load(Context* con,
|
||||
abort(con);
|
||||
}
|
||||
} else { // FPR load
|
||||
lir::Register base_(base), normalized_(normalized),
|
||||
lir::RegisterPair base_(base), normalized_(normalized),
|
||||
absAddr = makeTemp(con);
|
||||
// VFP loads only have bases, so we must add the index
|
||||
addR(con, vm::TargetBytesPerWord, &base_, &normalized_, &absAddr);
|
||||
@ -946,12 +946,12 @@ void load(Context* con,
|
||||
|
||||
case 8: {
|
||||
if (dstSize == 8) {
|
||||
lir::Register dstHigh(dst->high);
|
||||
lir::RegisterPair dstHigh(dst->high);
|
||||
load(con,
|
||||
4,
|
||||
base,
|
||||
offset,
|
||||
lir::NoRegister,
|
||||
NoRegister,
|
||||
1,
|
||||
4,
|
||||
&dstHigh,
|
||||
@ -961,7 +961,7 @@ void load(Context* con,
|
||||
4,
|
||||
base,
|
||||
offset + 4,
|
||||
lir::NoRegister,
|
||||
NoRegister,
|
||||
1,
|
||||
4,
|
||||
dst,
|
||||
@ -984,7 +984,7 @@ void load(Context* con,
|
||||
emit(con, flds(fpr32(dst), base, offset));
|
||||
}
|
||||
} else {
|
||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
ResolvedPromise offsetPromise(offset);
|
||||
lir::Constant offsetConstant(&offsetPromise);
|
||||
moveCR(con,
|
||||
@ -1003,7 +1003,7 @@ void moveMR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Memory* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
load(con,
|
||||
srcSize,
|
||||
@ -1021,7 +1021,7 @@ void moveZMR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Memory* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
load(con,
|
||||
srcSize,
|
||||
@ -1037,9 +1037,9 @@ void moveZMR(Context* con,
|
||||
|
||||
void andR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
if (size == 8)
|
||||
emit(con, and_(dst->high, a->high, b->high));
|
||||
@ -1049,8 +1049,8 @@ void andR(Context* con,
|
||||
void andC(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
int64_t v = a->value->value();
|
||||
|
||||
@ -1061,8 +1061,8 @@ void andC(Context* con,
|
||||
ResolvedPromise low(v & 0xFFFFFFFF);
|
||||
lir::Constant al(&low);
|
||||
|
||||
lir::Register bh(b->high);
|
||||
lir::Register dh(dst->high);
|
||||
lir::RegisterPair bh(b->high);
|
||||
lir::RegisterPair dh(dst->high);
|
||||
|
||||
andC(con, 4, &al, b, dst);
|
||||
andC(con, 4, &ah, &bh, &dh);
|
||||
@ -1078,7 +1078,7 @@ void andC(Context* con,
|
||||
// instruction
|
||||
|
||||
bool useTemporary = b->low == dst->low;
|
||||
lir::Register tmp(dst->low);
|
||||
lir::RegisterPair tmp(dst->low);
|
||||
if (useTemporary) {
|
||||
tmp.low = con->client->acquireTemporary(GPR_MASK);
|
||||
}
|
||||
@ -1098,9 +1098,9 @@ void andC(Context* con,
|
||||
|
||||
void orR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
if (size == 8)
|
||||
emit(con, orr(dst->high, a->high, b->high));
|
||||
@ -1109,9 +1109,9 @@ void orR(Context* con,
|
||||
|
||||
void xorR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
if (size == 8)
|
||||
emit(con, eor(dst->high, a->high, b->high));
|
||||
@ -1122,14 +1122,14 @@ void moveAR2(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Address* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
assertT(con, srcSize == 4 and dstSize == 4);
|
||||
|
||||
lir::Constant constant(src->address);
|
||||
moveCR(con, srcSize, &constant, dstSize, dst);
|
||||
|
||||
lir::Memory memory(dst->low, 0, -1, 0);
|
||||
lir::Memory memory(dst->low, 0, NoRegister, 0);
|
||||
moveMR(con, dstSize, &memory, dstSize, dst);
|
||||
}
|
||||
|
||||
@ -1137,16 +1137,16 @@ void moveAR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Address* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
moveAR2(con, srcSize, src, dstSize, dst);
|
||||
}
|
||||
|
||||
void compareRR(Context* con,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
assertT(con, !(isFpr(a) ^ isFpr(b))); // regs must be of the same type
|
||||
|
||||
@ -1168,14 +1168,14 @@ void compareCR(Context* con,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
assertT(con, aSize == 4 and bSize == 4);
|
||||
|
||||
if (!isFpr(b) && a->value->resolved() && isOfWidth(a->value->value(), 8)) {
|
||||
emit(con, cmpi(b->low, a->value->value()));
|
||||
} else {
|
||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
moveCR(con, aSize, a, bSize, &tmp);
|
||||
compareRR(con, bSize, &tmp, bSize, b);
|
||||
con->client->releaseTemporary(tmp.low);
|
||||
@ -1190,7 +1190,7 @@ void compareCM(Context* con,
|
||||
{
|
||||
assertT(con, aSize == 4 and bSize == 4);
|
||||
|
||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
moveMR(con, bSize, b, bSize, &tmp);
|
||||
compareCR(con, aSize, a, bSize, &tmp);
|
||||
con->client->releaseTemporary(tmp.low);
|
||||
@ -1198,13 +1198,13 @@ void compareCM(Context* con,
|
||||
|
||||
void compareRM(Context* con,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Memory* b)
|
||||
{
|
||||
assertT(con, aSize == 4 and bSize == 4);
|
||||
|
||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
moveMR(con, bSize, b, bSize, &tmp);
|
||||
compareRR(con, aSize, a, bSize, &tmp);
|
||||
con->client->releaseTemporary(tmp.low);
|
||||
@ -1352,13 +1352,13 @@ void branchLong(Context* con,
|
||||
void branchRR(Context* con,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::Constant* target)
|
||||
{
|
||||
if (!isFpr(a) && size > vm::TargetBytesPerWord) {
|
||||
lir::Register ah(a->high);
|
||||
lir::Register bh(b->high);
|
||||
lir::RegisterPair ah(a->high);
|
||||
lir::RegisterPair bh(b->high);
|
||||
|
||||
branchLong(
|
||||
con, op, a, &ah, b, &bh, target, CAST2(compareRR), CAST2(compareRR));
|
||||
@ -1372,7 +1372,7 @@ void branchCR(Context* con,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
lir::Constant* target)
|
||||
{
|
||||
assertT(con, !isFloatBranch(op));
|
||||
@ -1386,7 +1386,7 @@ void branchCR(Context* con,
|
||||
ResolvedPromise high((v >> 32) & ~static_cast<vm::target_uintptr_t>(0));
|
||||
lir::Constant ah(&high);
|
||||
|
||||
lir::Register bh(b->high);
|
||||
lir::RegisterPair bh(b->high);
|
||||
|
||||
branchLong(
|
||||
con, op, &al, &ah, b, &bh, target, CAST2(compareCR), CAST2(compareCR));
|
||||
@ -1399,7 +1399,7 @@ void branchCR(Context* con,
|
||||
void branchRM(Context* con,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
lir::Memory* b,
|
||||
lir::Constant* target)
|
||||
{
|
||||
@ -1450,7 +1450,7 @@ void moveCM(Context* con,
|
||||
} break;
|
||||
|
||||
default:
|
||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
lir::RegisterPair tmp(con->client->acquireTemporary(GPR_MASK));
|
||||
moveCR(con, srcSize, src, dstSize, &tmp);
|
||||
moveRM(con, dstSize, &tmp, dstSize, dst);
|
||||
con->client->releaseTemporary(tmp.low);
|
||||
@ -1459,9 +1459,9 @@ void moveCM(Context* con,
|
||||
|
||||
void negateRR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned dstSize UNUSED,
|
||||
lir::Register* dst)
|
||||
lir::RegisterPair* dst)
|
||||
{
|
||||
assertT(con, srcSize == dstSize);
|
||||
|
||||
@ -1473,7 +1473,7 @@ void negateRR(Context* con,
|
||||
}
|
||||
}
|
||||
|
||||
void callR(Context* con, unsigned size UNUSED, lir::Register* target)
|
||||
void callR(Context* con, unsigned size UNUSED, lir::RegisterPair* target)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
emit(con, blx(target->low));
|
||||
@ -1491,7 +1491,7 @@ void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
|
||||
lir::Register tmp(4);
|
||||
lir::RegisterPair tmp(Register(4));
|
||||
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
|
||||
callR(con, vm::TargetBytesPerWord, &tmp);
|
||||
}
|
||||
@ -1500,7 +1500,7 @@ void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
||||
{
|
||||
assertT(con, size == vm::TargetBytesPerWord);
|
||||
|
||||
lir::Register tmp(4); // a non-arg reg that we don't mind clobbering
|
||||
lir::RegisterPair tmp(Register(4)); // a non-arg reg that we don't mind clobbering
|
||||
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
|
||||
jumpR(con, vm::TargetBytesPerWord, &tmp);
|
||||
}
|
||||
|
@ -25,17 +25,17 @@ class Context;
|
||||
|
||||
// shortcut functions
|
||||
|
||||
inline int newTemp(Context* con)
|
||||
inline Register newTemp(Context* con)
|
||||
{
|
||||
return con->client->acquireTemporary(GPR_MASK);
|
||||
}
|
||||
|
||||
inline int newTemp(Context* con, unsigned mask)
|
||||
inline Register newTemp(Context* con, RegisterMask mask)
|
||||
{
|
||||
return con->client->acquireTemporary(mask);
|
||||
}
|
||||
|
||||
inline void freeTemp(Context* con, int r)
|
||||
inline void freeTemp(Context* con, Register r)
|
||||
{
|
||||
con->client->releaseTemporary(r);
|
||||
}
|
||||
@ -45,67 +45,67 @@ inline int64_t getValue(lir::Constant* con)
|
||||
return con->value->value();
|
||||
}
|
||||
|
||||
inline lir::Register makeTemp(Context* con)
|
||||
inline lir::RegisterPair makeTemp(Context* con)
|
||||
{
|
||||
lir::Register tmp(newTemp(con));
|
||||
lir::RegisterPair tmp(newTemp(con));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
inline lir::Register makeTemp64(Context* con)
|
||||
inline lir::RegisterPair makeTemp64(Context* con)
|
||||
{
|
||||
lir::Register tmp(newTemp(con), newTemp(con));
|
||||
lir::RegisterPair tmp(newTemp(con), newTemp(con));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
inline void freeTemp(Context* con, const lir::Register& tmp)
|
||||
inline void freeTemp(Context* con, const lir::RegisterPair& tmp)
|
||||
{
|
||||
if (tmp.low != lir::NoRegister)
|
||||
if (tmp.low != NoRegister)
|
||||
freeTemp(con, tmp.low);
|
||||
if (tmp.high != lir::NoRegister)
|
||||
if (tmp.high != NoRegister)
|
||||
freeTemp(con, tmp.high);
|
||||
}
|
||||
|
||||
void shiftLeftR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void moveRR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void shiftLeftC(Context* con,
|
||||
unsigned size UNUSED,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void shiftRightR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void shiftRightC(Context* con,
|
||||
unsigned size UNUSED,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void unsignedShiftRightR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void unsignedShiftRightC(Context* con,
|
||||
unsigned size UNUSED,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
bool needJump(MyBlock* b);
|
||||
|
||||
@ -113,133 +113,133 @@ unsigned padding(MyBlock* b, unsigned offset);
|
||||
|
||||
void resolve(MyBlock* b);
|
||||
|
||||
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target);
|
||||
void jumpR(Context* con, unsigned size UNUSED, lir::RegisterPair* target);
|
||||
|
||||
void swapRR(Context* con,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void moveRR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void moveZRR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void moveCR(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* src,
|
||||
unsigned,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void moveCR2(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* src,
|
||||
lir::Register* dst,
|
||||
lir::RegisterPair* dst,
|
||||
Promise* callOffset);
|
||||
|
||||
void moveCR(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* src,
|
||||
unsigned,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void addR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void subR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void addC(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void subC(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void multiplyR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void floatAbsoluteRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatNegateRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void float2FloatRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void float2IntRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void int2FloatRR(Context* con,
|
||||
unsigned,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned size,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatSqrtRR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatAddR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void floatSubtractR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void floatMultiplyR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
void floatDivideR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* t);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* t);
|
||||
|
||||
int normalize(Context* con,
|
||||
int offset,
|
||||
@ -250,7 +250,7 @@ int normalize(Context* con,
|
||||
|
||||
void store(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
int base,
|
||||
int offset,
|
||||
int index,
|
||||
@ -259,7 +259,7 @@ void store(Context* con,
|
||||
|
||||
void moveRM(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned dstSize UNUSED,
|
||||
lir::Memory* dst);
|
||||
|
||||
@ -270,7 +270,7 @@ void load(Context* con,
|
||||
int index,
|
||||
unsigned scale,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst,
|
||||
lir::RegisterPair* dst,
|
||||
bool preserveIndex,
|
||||
bool signExtend);
|
||||
|
||||
@ -278,61 +278,61 @@ void moveMR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Memory* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void moveZMR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Memory* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void andR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void andC(Context* con,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void orR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void xorR(Context* con,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void moveAR2(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Address* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void moveAR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Address* src,
|
||||
unsigned dstSize,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void compareRR(Context* con,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void compareCR(Context* con,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void compareCM(Context* con,
|
||||
unsigned aSize,
|
||||
@ -342,7 +342,7 @@ void compareCM(Context* con,
|
||||
|
||||
void compareRM(Context* con,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Memory* b);
|
||||
|
||||
@ -365,21 +365,21 @@ void branchLong(Context* con,
|
||||
void branchRR(Context* con,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::Constant* target);
|
||||
|
||||
void branchCR(Context* con,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
lir::Constant* target);
|
||||
|
||||
void branchRM(Context* con,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
lir::Memory* b,
|
||||
lir::Constant* target);
|
||||
|
||||
@ -403,11 +403,11 @@ void moveCM(Context* con,
|
||||
|
||||
void negateRR(Context* con,
|
||||
unsigned srcSize,
|
||||
lir::Register* src,
|
||||
lir::RegisterPair* src,
|
||||
unsigned dstSize UNUSED,
|
||||
lir::Register* dst);
|
||||
lir::RegisterPair* dst);
|
||||
|
||||
void callR(Context* con, unsigned size UNUSED, lir::Register* target);
|
||||
void callR(Context* con, unsigned size UNUSED, lir::RegisterPair* target);
|
||||
|
||||
void callC(Context* con, unsigned size UNUSED, lir::Constant* target);
|
||||
|
||||
|
@ -19,49 +19,45 @@ namespace codegen {
|
||||
namespace arm {
|
||||
|
||||
const uint64_t MASK_LO32 = 0xffffffff;
|
||||
const unsigned MASK_LO16 = 0xffff;
|
||||
const unsigned MASK_LO8 = 0xff;
|
||||
|
||||
const int N_GPRS = 16;
|
||||
const int N_FPRS = 16;
|
||||
const uint32_t GPR_MASK = 0xffff;
|
||||
const uint32_t FPR_MASK = 0xffff0000;
|
||||
const RegisterMask GPR_MASK = 0xffff;
|
||||
const RegisterMask FPR_MASK = 0xffff0000;
|
||||
|
||||
const uint64_t GPR_MASK64 = GPR_MASK | (uint64_t)GPR_MASK << 32;
|
||||
const uint64_t FPR_MASK64 = FPR_MASK | (uint64_t)FPR_MASK << 32;
|
||||
|
||||
inline bool isFpr(lir::Register* reg)
|
||||
inline bool isFpr(lir::RegisterPair* reg)
|
||||
{
|
||||
return reg->low >= N_GPRS;
|
||||
return reg->low.index() >= N_GPRS;
|
||||
}
|
||||
|
||||
inline int fpr64(int reg)
|
||||
inline int fpr64(Register reg)
|
||||
{
|
||||
return reg - N_GPRS;
|
||||
return reg.index() - N_GPRS;
|
||||
}
|
||||
inline int fpr64(lir::Register* reg)
|
||||
inline int fpr64(lir::RegisterPair* reg)
|
||||
{
|
||||
return fpr64(reg->low);
|
||||
}
|
||||
inline int fpr32(int reg)
|
||||
inline int fpr32(Register reg)
|
||||
{
|
||||
return fpr64(reg) << 1;
|
||||
}
|
||||
inline int fpr32(lir::Register* reg)
|
||||
inline int fpr32(lir::RegisterPair* reg)
|
||||
{
|
||||
return fpr64(reg) << 1;
|
||||
}
|
||||
|
||||
#ifdef ARCH_arm64
|
||||
const int ThreadRegister = 19;
|
||||
const int StackRegister = 31;
|
||||
const int LinkRegister = 30;
|
||||
const int ProgramCounter = 0xFF; // i.e. unaddressable
|
||||
constexpr Register ThreadRegister(19);
|
||||
constexpr Register StackRegister(31);
|
||||
constexpr Register LinkRegister(30);
|
||||
constexpr Register ProgramCounter(0xFE); // i.e. unaddressable
|
||||
#else
|
||||
const int ThreadRegister = 8;
|
||||
const int StackRegister = 13;
|
||||
const int LinkRegister = 14;
|
||||
const int ProgramCounter = 15;
|
||||
constexpr Register ThreadRegister(8);
|
||||
constexpr Register StackRegister(13);
|
||||
constexpr Register LinkRegister(14);
|
||||
constexpr Register ProgramCounter(15);
|
||||
#endif
|
||||
|
||||
} // namespace arm
|
||||
|
@ -17,9 +17,9 @@ namespace codegen {
|
||||
class Multimethod {
|
||||
public:
|
||||
inline static unsigned index(lir::UnaryOperation operation,
|
||||
lir::OperandType operand)
|
||||
lir::Operand::Type operand)
|
||||
{
|
||||
return operation + (lir::UnaryOperationCount * operand);
|
||||
return operation + (lir::UnaryOperationCount * (unsigned)operand);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -183,37 +183,37 @@ class MyArchitecture : public Architecture {
|
||||
return &myRegisterFile;
|
||||
}
|
||||
|
||||
virtual int scratch()
|
||||
virtual Register scratch()
|
||||
{
|
||||
return rax;
|
||||
}
|
||||
|
||||
virtual int stack()
|
||||
virtual Register stack()
|
||||
{
|
||||
return rsp;
|
||||
}
|
||||
|
||||
virtual int thread()
|
||||
virtual Register thread()
|
||||
{
|
||||
return rbx;
|
||||
}
|
||||
|
||||
virtual int returnLow()
|
||||
virtual Register returnLow()
|
||||
{
|
||||
return rax;
|
||||
}
|
||||
|
||||
virtual int returnHigh()
|
||||
virtual Register returnHigh()
|
||||
{
|
||||
return (TargetBytesPerWord == 4 ? rdx : lir::NoRegister);
|
||||
return (TargetBytesPerWord == 4 ? rdx : NoRegister);
|
||||
}
|
||||
|
||||
virtual int virtualCallTarget()
|
||||
virtual Register virtualCallTarget()
|
||||
{
|
||||
return rax;
|
||||
}
|
||||
|
||||
virtual int virtualCallIndex()
|
||||
virtual Register virtualCallIndex()
|
||||
{
|
||||
return rdx;
|
||||
}
|
||||
@ -233,14 +233,14 @@ class MyArchitecture : public Architecture {
|
||||
return 0x7FFFFFFF;
|
||||
}
|
||||
|
||||
virtual bool reserved(int register_)
|
||||
virtual bool reserved(Register register_)
|
||||
{
|
||||
switch (register_) {
|
||||
case rbp:
|
||||
switch (register_.index()) {
|
||||
case rbp.index():
|
||||
return UseFramePointer;
|
||||
|
||||
case rsp:
|
||||
case rbx:
|
||||
case rsp.index():
|
||||
case rbx.index():
|
||||
return true;
|
||||
|
||||
default:
|
||||
@ -289,7 +289,7 @@ class MyArchitecture : public Architecture {
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int argumentRegister(unsigned index)
|
||||
virtual Register argumentRegister(unsigned index)
|
||||
{
|
||||
assertT(&c, TargetBytesPerWord == 8);
|
||||
switch (index) {
|
||||
@ -501,8 +501,8 @@ class MyArchitecture : public Architecture {
|
||||
OperandMask& aMask,
|
||||
bool* thunk)
|
||||
{
|
||||
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand)
|
||||
| (1 << lir::ConstantOperand);
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::MemoryMask
|
||||
| lir::Operand::ConstantMask;
|
||||
*thunk = false;
|
||||
}
|
||||
|
||||
@ -512,22 +512,20 @@ class MyArchitecture : public Architecture {
|
||||
unsigned bSize,
|
||||
bool* thunk)
|
||||
{
|
||||
aMask.registerMask = GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
|
||||
*thunk = false;
|
||||
|
||||
switch (op) {
|
||||
case lir::Negate:
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32))
|
||||
| (static_cast<uint64_t>(1) << rax);
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(rax, rdx);
|
||||
break;
|
||||
|
||||
case lir::Absolute:
|
||||
if (aSize <= TargetBytesPerWord) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = (static_cast<uint64_t>(1) << rax);
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(rax, 0);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -535,9 +533,8 @@ class MyArchitecture : public Architecture {
|
||||
|
||||
case lir::FloatAbsolute:
|
||||
if (useSSE(&c)) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
|
||||
| FloatRegisterMask;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -546,8 +543,8 @@ class MyArchitecture : public Architecture {
|
||||
case lir::FloatNegate:
|
||||
// floatNegateRR does not support doubles
|
||||
if (useSSE(&c) and aSize == 4 and bSize == 4) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = FloatRegisterMask;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(FloatRegisterMask, 0);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -555,10 +552,9 @@ class MyArchitecture : public Architecture {
|
||||
|
||||
case lir::FloatSquareRoot:
|
||||
if (useSSE(&c)) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
|
||||
| FloatRegisterMask;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -566,10 +562,9 @@ class MyArchitecture : public Architecture {
|
||||
|
||||
case lir::Float2Float:
|
||||
if (useSSE(&c)) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
|
||||
| FloatRegisterMask;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -581,10 +576,9 @@ class MyArchitecture : public Architecture {
|
||||
// thunks or produce inline machine code which handles edge
|
||||
// cases properly.
|
||||
if (false and useSSE(&c) and bSize <= TargetBytesPerWord) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
|
||||
| FloatRegisterMask;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -592,11 +586,9 @@ class MyArchitecture : public Architecture {
|
||||
|
||||
case lir::Int2Float:
|
||||
if (useSSE(&c) and aSize <= TargetBytesPerWord) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
aMask.registerMask
|
||||
= GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -604,21 +596,20 @@ class MyArchitecture : public Architecture {
|
||||
|
||||
case lir::Move:
|
||||
aMask.typeMask = ~0;
|
||||
aMask.registerMask = ~static_cast<uint64_t>(0);
|
||||
aMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
|
||||
|
||||
if (TargetBytesPerWord == 4) {
|
||||
if (aSize == 4 and bSize == 8) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
const uint32_t mask = GeneralRegisterMask
|
||||
& ~((1 << rax) | (1 << rdx));
|
||||
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
const RegisterMask mask = GeneralRegisterMask
|
||||
.excluding(rax).excluding(rdx);
|
||||
aMask.setLowHighRegisterMasks(mask, mask);
|
||||
} else if (aSize == 1 or bSize == 1) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
const uint32_t mask = (1 << rax) | (1 << rcx) | (1 << rdx)
|
||||
| (1 << rbx);
|
||||
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
const RegisterMask mask = rax | rcx | rdx | rbx;
|
||||
aMask.setLowHighRegisterMasks(mask, mask);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -635,68 +626,62 @@ class MyArchitecture : public Architecture {
|
||||
OperandMask& bMask)
|
||||
{
|
||||
bMask.typeMask = ~0;
|
||||
bMask.registerMask = GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
|
||||
switch (op) {
|
||||
case lir::Absolute:
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = (static_cast<uint64_t>(1) << rax);
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.setLowHighRegisterMasks(rax, 0);
|
||||
break;
|
||||
|
||||
case lir::FloatAbsolute:
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = aMask.registerMask;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.lowRegisterMask = aMask.lowRegisterMask;
|
||||
bMask.highRegisterMask = aMask.highRegisterMask;
|
||||
break;
|
||||
|
||||
case lir::Negate:
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = aMask.registerMask;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.lowRegisterMask = aMask.lowRegisterMask;
|
||||
bMask.highRegisterMask = aMask.highRegisterMask;
|
||||
break;
|
||||
|
||||
case lir::FloatNegate:
|
||||
case lir::FloatSquareRoot:
|
||||
case lir::Float2Float:
|
||||
case lir::Int2Float:
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
|
||||
| FloatRegisterMask;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
break;
|
||||
|
||||
case lir::Float2Int:
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
break;
|
||||
|
||||
case lir::Move:
|
||||
if (aMask.typeMask
|
||||
& ((1 << lir::MemoryOperand) | 1 << lir::AddressOperand)) {
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask)
|
||||
<< 32) | FloatRegisterMask;
|
||||
} else if (aMask.typeMask & (1 << lir::RegisterOperand)) {
|
||||
bMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
if (aMask.registerMask & FloatRegisterMask) {
|
||||
bMask.registerMask = FloatRegisterMask;
|
||||
& (lir::Operand::MemoryMask | lir::Operand::AddressMask)) {
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.setLowHighRegisterMasks(GeneralRegisterMask | FloatRegisterMask, GeneralRegisterMask);
|
||||
} else if (aMask.typeMask & lir::Operand::RegisterPairMask) {
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
if (aMask.lowRegisterMask & FloatRegisterMask) {
|
||||
bMask.setLowHighRegisterMasks(FloatRegisterMask, 0);
|
||||
} else {
|
||||
bMask.registerMask
|
||||
= GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
}
|
||||
} else {
|
||||
bMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
}
|
||||
|
||||
if (TargetBytesPerWord == 4) {
|
||||
if (aSize == 4 and bSize == 8) {
|
||||
bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32))
|
||||
| (static_cast<uint64_t>(1) << rax);
|
||||
bMask.setLowHighRegisterMasks(rax, rdx);
|
||||
} else if (aSize == 1 or bSize == 1) {
|
||||
const uint32_t mask = (1 << rax) | (1 << rcx) | (1 << rdx)
|
||||
| (1 << rbx);
|
||||
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
|
||||
const RegisterMask mask = rax | rcx | rdx | rbx;
|
||||
bMask.setLowHighRegisterMasks(mask, mask);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -712,46 +697,38 @@ class MyArchitecture : public Architecture {
|
||||
const OperandMask& dstMask)
|
||||
{
|
||||
srcMask.typeMask = ~0;
|
||||
srcMask.registerMask = ~static_cast<uint64_t>(0);
|
||||
srcMask.setLowHighRegisterMasks(AnyRegisterMask, AnyRegisterMask);
|
||||
|
||||
tmpMask.typeMask = 0;
|
||||
tmpMask.registerMask = 0;
|
||||
tmpMask.setLowHighRegisterMasks(0, 0);
|
||||
|
||||
if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
|
||||
if (dstMask.typeMask & lir::Operand::MemoryMask) {
|
||||
// can't move directly from memory to memory
|
||||
srcMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::ConstantOperand);
|
||||
tmpMask.typeMask = 1 << lir::RegisterOperand;
|
||||
tmpMask.registerMask
|
||||
= GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
} else if (dstMask.typeMask & (1 << lir::RegisterOperand)) {
|
||||
srcMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::ConstantMask;
|
||||
tmpMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
tmpMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
} else if (dstMask.typeMask & lir::Operand::RegisterPairMask) {
|
||||
if (size > TargetBytesPerWord) {
|
||||
// can't move directly from FPR to GPR or vice-versa for
|
||||
// values larger than the GPR size
|
||||
if (dstMask.registerMask & FloatRegisterMask) {
|
||||
srcMask.registerMask
|
||||
= FloatRegisterMask
|
||||
| (static_cast<uint64_t>(FloatRegisterMask) << 32);
|
||||
tmpMask.typeMask = 1 << lir::MemoryOperand;
|
||||
} else if (dstMask.registerMask & GeneralRegisterMask) {
|
||||
srcMask.registerMask
|
||||
= GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
tmpMask.typeMask = 1 << lir::MemoryOperand;
|
||||
if (dstMask.lowRegisterMask & FloatRegisterMask) {
|
||||
srcMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
tmpMask.typeMask = lir::Operand::MemoryMask;
|
||||
} else if (dstMask.lowRegisterMask & GeneralRegisterMask) {
|
||||
srcMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
tmpMask.typeMask = lir::Operand::MemoryMask;
|
||||
}
|
||||
}
|
||||
if (dstMask.registerMask & FloatRegisterMask) {
|
||||
if (dstMask.lowRegisterMask & FloatRegisterMask) {
|
||||
// can't move directly from constant to FPR
|
||||
srcMask.typeMask &= ~(1 << lir::ConstantOperand);
|
||||
srcMask.typeMask &= ~lir::Operand::ConstantMask;
|
||||
if (size > TargetBytesPerWord) {
|
||||
tmpMask.typeMask = 1 << lir::MemoryOperand;
|
||||
tmpMask.typeMask = lir::Operand::MemoryMask;
|
||||
} else {
|
||||
tmpMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
tmpMask.registerMask
|
||||
= GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
tmpMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
tmpMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -765,13 +742,11 @@ class MyArchitecture : public Architecture {
|
||||
unsigned,
|
||||
bool* thunk)
|
||||
{
|
||||
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
|
||||
aMask.registerMask = GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask | lir::Operand::ConstantMask;
|
||||
aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
bMask.registerMask = GeneralRegisterMask
|
||||
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask);
|
||||
|
||||
*thunk = false;
|
||||
|
||||
@ -781,14 +756,12 @@ class MyArchitecture : public Architecture {
|
||||
case lir::FloatMultiply:
|
||||
case lir::FloatDivide:
|
||||
if (useSSE(&c)) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand)
|
||||
| (1 << lir::MemoryOperand);
|
||||
bMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask
|
||||
| lir::Operand::MemoryMask;
|
||||
bMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
|
||||
const uint64_t mask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
|
||||
| FloatRegisterMask;
|
||||
aMask.registerMask = mask;
|
||||
bMask.registerMask = mask;
|
||||
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
bMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -800,12 +773,12 @@ class MyArchitecture : public Architecture {
|
||||
|
||||
case lir::Multiply:
|
||||
if (TargetBytesPerWord == 4 and aSize == 8) {
|
||||
const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
|
||||
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
|
||||
bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32)) | mask;
|
||||
const RegisterMask mask = GeneralRegisterMask .excluding(rax).excluding(rdx);
|
||||
aMask.setLowHighRegisterMasks(mask, mask);
|
||||
bMask.setLowHighRegisterMasks(mask, rdx);
|
||||
} else {
|
||||
aMask.registerMask = GeneralRegisterMask;
|
||||
bMask.registerMask = GeneralRegisterMask;
|
||||
aMask.setLowHighRegisterMasks(GeneralRegisterMask, 0);
|
||||
bMask.setLowHighRegisterMasks(GeneralRegisterMask, 0);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -813,9 +786,9 @@ class MyArchitecture : public Architecture {
|
||||
if (TargetBytesPerWord == 4 and aSize == 8) {
|
||||
*thunk = true;
|
||||
} else {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
|
||||
bMask.registerMask = 1 << rax;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(GeneralRegisterMask .excluding(rax).excluding(rdx), 0);
|
||||
bMask.setLowHighRegisterMasks(rax, 0);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -823,9 +796,9 @@ class MyArchitecture : public Architecture {
|
||||
if (TargetBytesPerWord == 4 and aSize == 8) {
|
||||
*thunk = true;
|
||||
} else {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
|
||||
bMask.registerMask = 1 << rax;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(GeneralRegisterMask .excluding(rax).excluding(rdx), 0);
|
||||
bMask.setLowHighRegisterMasks(rax, 0);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -833,14 +806,13 @@ class MyArchitecture : public Architecture {
|
||||
case lir::ShiftRight:
|
||||
case lir::UnsignedShiftRight: {
|
||||
if (TargetBytesPerWord == 4 and bSize == 8) {
|
||||
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx);
|
||||
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
|
||||
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
|
||||
const RegisterMask mask = GeneralRegisterMask.excluding(rcx);
|
||||
aMask.setLowHighRegisterMasks(mask, mask);
|
||||
bMask.setLowHighRegisterMasks(mask, mask);
|
||||
} else {
|
||||
aMask.registerMask = (static_cast<uint64_t>(GeneralRegisterMask) << 32)
|
||||
| (static_cast<uint64_t>(1) << rcx);
|
||||
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx);
|
||||
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
|
||||
aMask.setLowHighRegisterMasks(rcx, GeneralRegisterMask);
|
||||
const RegisterMask mask = GeneralRegisterMask.excluding(rcx);
|
||||
bMask.setLowHighRegisterMasks(mask, mask);
|
||||
}
|
||||
} break;
|
||||
|
||||
@ -855,11 +827,11 @@ class MyArchitecture : public Architecture {
|
||||
case lir::JumpIfFloatLessOrEqualOrUnordered:
|
||||
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
|
||||
if (useSSE(&c)) {
|
||||
aMask.typeMask = (1 << lir::RegisterOperand);
|
||||
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
|
||||
| FloatRegisterMask;
|
||||
aMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask);
|
||||
bMask.typeMask = aMask.typeMask;
|
||||
bMask.registerMask = aMask.registerMask;
|
||||
bMask.lowRegisterMask = aMask.lowRegisterMask;
|
||||
bMask.highRegisterMask = aMask.highRegisterMask;
|
||||
} else {
|
||||
*thunk = true;
|
||||
}
|
||||
@ -879,11 +851,12 @@ class MyArchitecture : public Architecture {
|
||||
OperandMask& cMask)
|
||||
{
|
||||
if (isBranch(op)) {
|
||||
cMask.typeMask = (1 << lir::ConstantOperand);
|
||||
cMask.registerMask = 0;
|
||||
cMask.typeMask = lir::Operand::ConstantMask;
|
||||
cMask.setLowHighRegisterMasks(0, 0);
|
||||
} else {
|
||||
cMask.typeMask = (1 << lir::RegisterOperand);
|
||||
cMask.registerMask = bMask.registerMask;
|
||||
cMask.typeMask = lir::Operand::RegisterPairMask;
|
||||
cMask.lowRegisterMask = bMask.lowRegisterMask;
|
||||
cMask.highRegisterMask = bMask.highRegisterMask;
|
||||
}
|
||||
}
|
||||
|
||||
@ -927,7 +900,7 @@ class MyAssembler : public Assembler {
|
||||
virtual void checkStackOverflow(uintptr_t handler,
|
||||
unsigned stackLimitOffsetFromThread)
|
||||
{
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
lir::Memory stackLimit(rbx, stackLimitOffsetFromThread);
|
||||
lir::Constant handlerConstant(resolvedPromise(&c, handler));
|
||||
branchRM(&c,
|
||||
@ -940,11 +913,11 @@ class MyAssembler : public Assembler {
|
||||
|
||||
virtual void saveFrame(unsigned stackOffset, unsigned)
|
||||
{
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
lir::Memory stackDst(rbx, stackOffset);
|
||||
apply(lir::Move,
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &stackDst));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &stackDst));
|
||||
}
|
||||
|
||||
virtual void pushFrame(unsigned argumentCount, ...)
|
||||
@ -952,7 +925,7 @@ class MyAssembler : public Assembler {
|
||||
// TODO: Argument should be replaced by OperandInfo...
|
||||
struct Argument {
|
||||
unsigned size;
|
||||
lir::OperandType type;
|
||||
lir::Operand::Type type;
|
||||
lir::Operand* operand;
|
||||
};
|
||||
RUNTIME_ARRAY(Argument, arguments, argumentCount);
|
||||
@ -962,7 +935,7 @@ class MyAssembler : public Assembler {
|
||||
for (unsigned i = 0; i < argumentCount; ++i) {
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned);
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].type
|
||||
= static_cast<lir::OperandType>(va_arg(a, int));
|
||||
= static_cast<lir::Operand::Type>(va_arg(a, int));
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*);
|
||||
footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
TargetBytesPerWord);
|
||||
@ -974,14 +947,14 @@ class MyAssembler : public Assembler {
|
||||
unsigned offset = 0;
|
||||
for (unsigned i = 0; i < argumentCount; ++i) {
|
||||
if (i < arch_->argumentRegisterCount()) {
|
||||
lir::Register dst(arch_->argumentRegister(i));
|
||||
lir::RegisterPair dst(arch_->argumentRegister(i));
|
||||
apply(lir::Move,
|
||||
OperandInfo(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].type,
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].operand),
|
||||
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
TargetBytesPerWord),
|
||||
lir::RegisterOperand,
|
||||
lir::Operand::Type::RegisterPair,
|
||||
&dst));
|
||||
} else {
|
||||
lir::Memory dst(rsp, offset * TargetBytesPerWord);
|
||||
@ -991,7 +964,7 @@ class MyAssembler : public Assembler {
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].operand),
|
||||
OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
TargetBytesPerWord),
|
||||
lir::MemoryOperand,
|
||||
lir::Operand::Type::Memory,
|
||||
&dst));
|
||||
offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size,
|
||||
TargetBytesPerWord);
|
||||
@ -1001,67 +974,67 @@ class MyAssembler : public Assembler {
|
||||
|
||||
virtual void allocateFrame(unsigned footprint)
|
||||
{
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
|
||||
if (UseFramePointer) {
|
||||
lir::Register base(rbp);
|
||||
lir::RegisterPair base(rbp);
|
||||
pushR(&c, TargetBytesPerWord, &base);
|
||||
|
||||
apply(lir::Move,
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &base));
|
||||
}
|
||||
|
||||
lir::Constant footprintConstant(
|
||||
resolvedPromise(&c, footprint * TargetBytesPerWord));
|
||||
apply(lir::Subtract,
|
||||
OperandInfo(
|
||||
TargetBytesPerWord, lir::ConstantOperand, &footprintConstant),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack));
|
||||
TargetBytesPerWord, lir::Operand::Type::Constant, &footprintConstant),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
|
||||
}
|
||||
|
||||
virtual void adjustFrame(unsigned difference)
|
||||
{
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
lir::Constant differenceConstant(
|
||||
resolvedPromise(&c, difference * TargetBytesPerWord));
|
||||
apply(lir::Subtract,
|
||||
OperandInfo(
|
||||
TargetBytesPerWord, lir::ConstantOperand, &differenceConstant),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack));
|
||||
TargetBytesPerWord, lir::Operand::Type::Constant, &differenceConstant),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
|
||||
}
|
||||
|
||||
virtual void popFrame(unsigned frameFootprint)
|
||||
{
|
||||
if (UseFramePointer) {
|
||||
lir::Register base(rbp);
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair base(rbp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
apply(lir::Move,
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &base),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
|
||||
|
||||
popR(&c, TargetBytesPerWord, &base);
|
||||
} else {
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
lir::Constant footprint(
|
||||
resolvedPromise(&c, frameFootprint * TargetBytesPerWord));
|
||||
apply(lir::Add,
|
||||
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &footprint),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &footprint),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack));
|
||||
}
|
||||
}
|
||||
|
||||
virtual void popFrameForTailCall(unsigned frameFootprint,
|
||||
int offset,
|
||||
int returnAddressSurrogate,
|
||||
int framePointerSurrogate)
|
||||
Register returnAddressSurrogate,
|
||||
Register framePointerSurrogate)
|
||||
{
|
||||
if (TailCalls) {
|
||||
if (offset) {
|
||||
lir::Register tmp(c.client->acquireTemporary());
|
||||
lir::RegisterPair tmp(c.client->acquireTemporary());
|
||||
|
||||
unsigned baseSize = UseFramePointer ? 1 : 0;
|
||||
|
||||
@ -1085,28 +1058,28 @@ class MyAssembler : public Assembler {
|
||||
|
||||
if (UseFramePointer) {
|
||||
lir::Memory baseSrc(rsp, frameFootprint * TargetBytesPerWord);
|
||||
lir::Register base(rbp);
|
||||
lir::RegisterPair base(rbp);
|
||||
moveMR(&c, TargetBytesPerWord, &baseSrc, TargetBytesPerWord, &base);
|
||||
}
|
||||
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
lir::Constant footprint(resolvedPromise(
|
||||
&c, (frameFootprint - offset + baseSize) * TargetBytesPerWord));
|
||||
|
||||
addCR(&c, TargetBytesPerWord, &footprint, TargetBytesPerWord, &stack);
|
||||
|
||||
if (returnAddressSurrogate != lir::NoRegister) {
|
||||
if (returnAddressSurrogate != NoRegister) {
|
||||
assertT(&c, offset > 0);
|
||||
|
||||
lir::Register ras(returnAddressSurrogate);
|
||||
lir::RegisterPair ras(returnAddressSurrogate);
|
||||
lir::Memory dst(rsp, offset * TargetBytesPerWord);
|
||||
moveRM(&c, TargetBytesPerWord, &ras, TargetBytesPerWord, &dst);
|
||||
}
|
||||
|
||||
if (framePointerSurrogate != lir::NoRegister) {
|
||||
if (framePointerSurrogate != NoRegister) {
|
||||
assertT(&c, offset > 0);
|
||||
|
||||
lir::Register fps(framePointerSurrogate);
|
||||
lir::RegisterPair fps(framePointerSurrogate);
|
||||
lir::Memory dst(rsp, (offset - 1) * TargetBytesPerWord);
|
||||
moveRM(&c, TargetBytesPerWord, &fps, TargetBytesPerWord, &dst);
|
||||
}
|
||||
@ -1127,10 +1100,10 @@ class MyAssembler : public Assembler {
|
||||
assertT(&c, (argumentFootprint % StackAlignmentInWords) == 0);
|
||||
|
||||
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
|
||||
lir::Register returnAddress(rcx);
|
||||
lir::RegisterPair returnAddress(rcx);
|
||||
popR(&c, TargetBytesPerWord, &returnAddress);
|
||||
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
lir::Constant adjustment(resolvedPromise(
|
||||
&c,
|
||||
(argumentFootprint - StackAlignmentInWords) * TargetBytesPerWord));
|
||||
@ -1147,10 +1120,10 @@ class MyAssembler : public Assembler {
|
||||
{
|
||||
popFrame(frameFootprint);
|
||||
|
||||
lir::Register returnAddress(rcx);
|
||||
lir::RegisterPair returnAddress(rcx);
|
||||
popR(&c, TargetBytesPerWord, &returnAddress);
|
||||
|
||||
lir::Register stack(rsp);
|
||||
lir::RegisterPair stack(rsp);
|
||||
lir::Memory stackSrc(rbx, stackOffsetFromThread);
|
||||
moveMR(&c, TargetBytesPerWord, &stackSrc, TargetBytesPerWord, &stack);
|
||||
|
||||
@ -1182,7 +1155,7 @@ class MyAssembler : public Assembler {
|
||||
if (isBranch(op)) {
|
||||
assertT(&this->c, a.size == b.size);
|
||||
assertT(&this->c, c.size == TargetBytesPerWord);
|
||||
assertT(&this->c, c.type == lir::ConstantOperand);
|
||||
assertT(&this->c, c.type == lir::Operand::Type::Constant);
|
||||
|
||||
arch_->c.branchOperations[branchIndex(&(arch_->c), a.type, b.type)](
|
||||
&this->c, op, a.size, a.operand, b.operand, c.operand);
|
||||
|
@ -68,13 +68,13 @@ class ArchitectureContext {
|
||||
bool useNativeFeatures;
|
||||
OperationType operations[lir::OperationCount];
|
||||
UnaryOperationType
|
||||
unaryOperations[lir::UnaryOperationCount * lir::OperandTypeCount];
|
||||
unaryOperations[lir::UnaryOperationCount * lir::Operand::TypeCount];
|
||||
BinaryOperationType binaryOperations
|
||||
[(lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
|
||||
* lir::OperandTypeCount * lir::OperandTypeCount];
|
||||
* lir::Operand::TypeCount * lir::Operand::TypeCount];
|
||||
BranchOperationType branchOperations[lir::BranchOperationCount
|
||||
* lir::OperandTypeCount
|
||||
* lir::OperandTypeCount];
|
||||
* lir::Operand::TypeCount
|
||||
* lir::Operand::TypeCount];
|
||||
};
|
||||
|
||||
class Context {
|
||||
|
@ -53,9 +53,9 @@ namespace x86 {
|
||||
|
||||
void maybeRex(Context* c,
|
||||
unsigned size,
|
||||
int a,
|
||||
int index,
|
||||
int base,
|
||||
Register a,
|
||||
Register index,
|
||||
Register base,
|
||||
bool always)
|
||||
{
|
||||
if (vm::TargetBytesPerWord == 8) {
|
||||
@ -65,63 +65,63 @@ void maybeRex(Context* c,
|
||||
} else {
|
||||
byte = REX_NONE;
|
||||
}
|
||||
if (a != lir::NoRegister and (a & 8))
|
||||
if (a != NoRegister and (a.index() & 8))
|
||||
byte |= REX_R;
|
||||
if (index != lir::NoRegister and (index & 8))
|
||||
if (index != NoRegister and (index.index() & 8))
|
||||
byte |= REX_X;
|
||||
if (base != lir::NoRegister and (base & 8))
|
||||
if (base != NoRegister and (base.index() & 8))
|
||||
byte |= REX_B;
|
||||
if (always or byte != REX_NONE)
|
||||
c->code.append(byte);
|
||||
}
|
||||
}
|
||||
|
||||
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b)
|
||||
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b)
|
||||
{
|
||||
maybeRex(c, size, a->low, lir::NoRegister, b->low, false);
|
||||
maybeRex(c, size, a->low, NoRegister, b->low, false);
|
||||
}
|
||||
|
||||
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b)
|
||||
void alwaysRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b)
|
||||
{
|
||||
maybeRex(c, size, a->low, lir::NoRegister, b->low, true);
|
||||
maybeRex(c, size, a->low, NoRegister, b->low, true);
|
||||
}
|
||||
|
||||
void maybeRex(Context* c, unsigned size, lir::Register* a)
|
||||
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a)
|
||||
{
|
||||
maybeRex(c, size, lir::NoRegister, lir::NoRegister, a->low, false);
|
||||
maybeRex(c, size, NoRegister, NoRegister, a->low, false);
|
||||
}
|
||||
|
||||
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b)
|
||||
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::Memory* b)
|
||||
{
|
||||
maybeRex(c, size, a->low, b->index, b->base, size == 1 and (a->low & 4));
|
||||
maybeRex(c, size, a->low, b->index, b->base, size == 1 and (a->low.index() & 4));
|
||||
}
|
||||
|
||||
void maybeRex(Context* c, unsigned size, lir::Memory* a)
|
||||
{
|
||||
maybeRex(c, size, lir::NoRegister, a->index, a->base, false);
|
||||
maybeRex(c, size, NoRegister, a->index, a->base, false);
|
||||
}
|
||||
|
||||
void modrm(Context* c, uint8_t mod, int a, int b)
|
||||
void modrm(Context* c, uint8_t mod, Register a, Register b)
|
||||
{
|
||||
c->code.append(mod | (regCode(b) << 3) | regCode(a));
|
||||
}
|
||||
|
||||
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b)
|
||||
void modrm(Context* c, uint8_t mod, lir::RegisterPair* a, lir::RegisterPair* b)
|
||||
{
|
||||
modrm(c, mod, a->low, b->low);
|
||||
}
|
||||
|
||||
void sib(Context* c, unsigned scale, int index, int base)
|
||||
void sib(Context* c, unsigned scale, Register index, Register base)
|
||||
{
|
||||
c->code.append((util::log(scale) << 6) | (regCode(index) << 3)
|
||||
| regCode(base));
|
||||
}
|
||||
|
||||
void modrmSib(Context* c, int width, int a, int scale, int index, int base)
|
||||
void modrmSib(Context* c, int width, Register a, int scale, Register index, Register base)
|
||||
{
|
||||
if (index == lir::NoRegister) {
|
||||
if (index == NoRegister) {
|
||||
modrm(c, width, base, a);
|
||||
if (regCode(base) == rsp) {
|
||||
if (regCode(base) == rsp.index()) {
|
||||
sib(c, 0x00, rsp, rsp);
|
||||
}
|
||||
} else {
|
||||
@ -130,9 +130,9 @@ void modrmSib(Context* c, int width, int a, int scale, int index, int base)
|
||||
}
|
||||
}
|
||||
|
||||
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset)
|
||||
void modrmSibImm(Context* c, Register a, int scale, Register index, Register base, int offset)
|
||||
{
|
||||
if (offset == 0 and regCode(base) != rbp) {
|
||||
if (offset == 0 and regCode(base) != rbp.index()) {
|
||||
modrmSib(c, 0x00, a, scale, index, base);
|
||||
} else if (vm::fitsInInt8(offset)) {
|
||||
modrmSib(c, 0x40, a, scale, index, base);
|
||||
@ -143,7 +143,7 @@ void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset)
|
||||
}
|
||||
}
|
||||
|
||||
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b)
|
||||
void modrmSibImm(Context* c, lir::RegisterPair* a, lir::Memory* b)
|
||||
{
|
||||
modrmSibImm(c, a->low, b->scale, b->index, b->base, b->offset);
|
||||
}
|
||||
@ -177,9 +177,9 @@ void conditional(Context* c, unsigned condition, lir::Constant* a)
|
||||
|
||||
void sseMoveRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
assertT(c, aSize >= 4);
|
||||
assertT(c, aSize == bSize);
|
||||
@ -213,10 +213,10 @@ void sseMoveCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
assertT(c, aSize <= vm::TargetBytesPerWord);
|
||||
lir::Register tmp(c->client->acquireTemporary(GeneralRegisterMask));
|
||||
lir::RegisterPair tmp(c->client->acquireTemporary(GeneralRegisterMask));
|
||||
moveCR2(c, aSize, a, aSize, &tmp, 0);
|
||||
sseMoveRR(c, aSize, &tmp, bSize, b);
|
||||
c->client->releaseTemporary(tmp.low);
|
||||
@ -226,7 +226,7 @@ void sseMoveMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b)
|
||||
lir::RegisterPair* b)
|
||||
{
|
||||
assertT(c, aSize >= 4);
|
||||
|
||||
@ -244,7 +244,7 @@ void sseMoveMR(Context* c,
|
||||
|
||||
void sseMoveRM(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
UNUSED unsigned bSize,
|
||||
lir::Memory* b)
|
||||
{
|
||||
@ -353,9 +353,9 @@ void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target)
|
||||
|
||||
void floatRegOp(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
uint8_t op,
|
||||
uint8_t mod)
|
||||
{
|
||||
@ -373,7 +373,7 @@ void floatMemOp(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
uint8_t op)
|
||||
{
|
||||
if (aSize == 4) {
|
||||
@ -390,13 +390,13 @@ void moveCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void moveCR2(Context* c,
|
||||
UNUSED unsigned aSize,
|
||||
lir::Constant* a,
|
||||
UNUSED unsigned bSize,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
unsigned promiseOffset)
|
||||
{
|
||||
if (vm::TargetBytesPerWord == 4 and bSize == 8) {
|
||||
@ -408,7 +408,7 @@ void moveCR2(Context* c,
|
||||
ResolvedPromise low(v & 0xFFFFFFFF);
|
||||
lir::Constant al(&low);
|
||||
|
||||
lir::Register bh(b->high);
|
||||
lir::RegisterPair bh(b->high);
|
||||
|
||||
moveCR(c, 4, &al, 4, b);
|
||||
moveCR(c, 4, &ah, 4, &bh);
|
||||
|
@ -32,42 +32,42 @@ void maybeRex(Context* c,
|
||||
int base,
|
||||
bool always);
|
||||
|
||||
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Register* b);
|
||||
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
|
||||
|
||||
void alwaysRex(Context* c, unsigned size, lir::Register* a, lir::Register* b);
|
||||
void alwaysRex(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
|
||||
|
||||
void maybeRex(Context* c, unsigned size, lir::Register* a);
|
||||
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a);
|
||||
|
||||
void maybeRex(Context* c, unsigned size, lir::Register* a, lir::Memory* b);
|
||||
void maybeRex(Context* c, unsigned size, lir::RegisterPair* a, lir::Memory* b);
|
||||
|
||||
void maybeRex(Context* c, unsigned size, lir::Memory* a);
|
||||
|
||||
inline int regCode(int a)
|
||||
inline int regCode(Register a)
|
||||
{
|
||||
return a & 7;
|
||||
return a.index() & 7;
|
||||
}
|
||||
|
||||
inline int regCode(lir::Register* a)
|
||||
inline int regCode(lir::RegisterPair* a)
|
||||
{
|
||||
return regCode(a->low);
|
||||
}
|
||||
|
||||
inline bool isFloatReg(lir::Register* a)
|
||||
inline bool isFloatReg(lir::RegisterPair* a)
|
||||
{
|
||||
return a->low >= xmm0;
|
||||
}
|
||||
|
||||
void modrm(Context* c, uint8_t mod, int a, int b);
|
||||
void modrm(Context* c, uint8_t mod, Register a, Register b);
|
||||
|
||||
void modrm(Context* c, uint8_t mod, lir::Register* a, lir::Register* b);
|
||||
void modrm(Context* c, uint8_t mod, lir::RegisterPair* a, lir::RegisterPair* b);
|
||||
|
||||
void sib(Context* c, unsigned scale, int index, int base);
|
||||
void sib(Context* c, unsigned scale, Register index, Register base);
|
||||
|
||||
void modrmSib(Context* c, int width, int a, int scale, int index, int base);
|
||||
void modrmSib(Context* c, int width, Register a, int scale, Register index, Register base);
|
||||
|
||||
void modrmSibImm(Context* c, int a, int scale, int index, int base, int offset);
|
||||
void modrmSibImm(Context* c, Register a, int scale, Register index, Register base, int offset);
|
||||
|
||||
void modrmSibImm(Context* c, lir::Register* a, lir::Memory* b);
|
||||
void modrmSibImm(Context* c, lir::RegisterPair* a, lir::Memory* b);
|
||||
|
||||
void opcode(Context* c, uint8_t op);
|
||||
|
||||
@ -79,25 +79,25 @@ void conditional(Context* c, unsigned condition, lir::Constant* a);
|
||||
|
||||
void sseMoveRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void sseMoveCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void sseMoveMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void sseMoveRM(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
UNUSED unsigned bSize,
|
||||
lir::Memory* b);
|
||||
|
||||
@ -107,9 +107,9 @@ void branchFloat(Context* c, lir::TernaryOperation op, lir::Constant* target);
|
||||
|
||||
void floatRegOp(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
uint8_t op,
|
||||
uint8_t mod = 0xc0);
|
||||
|
||||
@ -117,14 +117,14 @@ void floatMemOp(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
uint8_t op);
|
||||
|
||||
void moveCR2(Context* c,
|
||||
UNUSED unsigned aSize,
|
||||
lir::Constant* a,
|
||||
UNUSED unsigned bSize,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
unsigned promiseOffset);
|
||||
|
||||
} // namespace x86
|
||||
|
@ -28,42 +28,42 @@ using namespace util;
|
||||
|
||||
unsigned index(ArchitectureContext*,
|
||||
lir::BinaryOperation operation,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2)
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2)
|
||||
{
|
||||
return operation + ((lir::BinaryOperationCount
|
||||
+ lir::NonBranchTernaryOperationCount) * operand1)
|
||||
+ lir::NonBranchTernaryOperationCount) * (unsigned)operand1)
|
||||
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
|
||||
* lir::OperandTypeCount * operand2);
|
||||
* lir::Operand::TypeCount * (unsigned)operand2);
|
||||
}
|
||||
|
||||
unsigned index(ArchitectureContext* c UNUSED,
|
||||
lir::TernaryOperation operation,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2)
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2)
|
||||
{
|
||||
assertT(c, not isBranch(operation));
|
||||
|
||||
return lir::BinaryOperationCount + operation
|
||||
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
|
||||
* operand1)
|
||||
* (unsigned)operand1)
|
||||
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount)
|
||||
* lir::OperandTypeCount * operand2);
|
||||
* lir::Operand::TypeCount * (unsigned)operand2);
|
||||
}
|
||||
|
||||
unsigned branchIndex(ArchitectureContext* c UNUSED,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2)
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2)
|
||||
{
|
||||
return operand1 + (lir::OperandTypeCount * operand2);
|
||||
return (unsigned)operand1 + (lir::Operand::TypeCount * (unsigned)operand2);
|
||||
}
|
||||
|
||||
void populateTables(ArchitectureContext* c)
|
||||
{
|
||||
const lir::OperandType C = lir::ConstantOperand;
|
||||
const lir::OperandType A = lir::AddressOperand;
|
||||
const lir::OperandType R = lir::RegisterOperand;
|
||||
const lir::OperandType M = lir::MemoryOperand;
|
||||
const lir::Operand::Type C = lir::Operand::Type::Constant;
|
||||
const lir::Operand::Type A = lir::Operand::Type::Address;
|
||||
const lir::Operand::Type R = lir::Operand::Type::RegisterPair;
|
||||
const lir::Operand::Type M = lir::Operand::Type::Memory;
|
||||
|
||||
OperationType* zo = c->operations;
|
||||
UnaryOperationType* uo = c->unaryOperations;
|
||||
|
@ -23,17 +23,17 @@ class ArchitectureContext;
|
||||
|
||||
unsigned index(ArchitectureContext*,
|
||||
lir::BinaryOperation operation,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2);
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2);
|
||||
|
||||
unsigned index(ArchitectureContext* c UNUSED,
|
||||
lir::TernaryOperation operation,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2);
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2);
|
||||
|
||||
unsigned branchIndex(ArchitectureContext* c UNUSED,
|
||||
lir::OperandType operand1,
|
||||
lir::OperandType operand2);
|
||||
lir::Operand::Type operand1,
|
||||
lir::Operand::Type operand2);
|
||||
|
||||
void populateTables(ArchitectureContext* c);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -33,7 +33,7 @@ void callC(Context* c, unsigned size UNUSED, lir::Constant* a);
|
||||
|
||||
void longCallC(Context* c, unsigned size, lir::Constant* a);
|
||||
|
||||
void jumpR(Context* c, unsigned size UNUSED, lir::Register* a);
|
||||
void jumpR(Context* c, unsigned size UNUSED, lir::RegisterPair* a);
|
||||
|
||||
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* a);
|
||||
|
||||
@ -41,7 +41,7 @@ void jumpM(Context* c, unsigned size UNUSED, lir::Memory* a);
|
||||
|
||||
void longJumpC(Context* c, unsigned size, lir::Constant* a);
|
||||
|
||||
void callR(Context* c, unsigned size UNUSED, lir::Register* a);
|
||||
void callR(Context* c, unsigned size UNUSED, lir::RegisterPair* a);
|
||||
|
||||
void callM(Context* c, unsigned size UNUSED, lir::Memory* a);
|
||||
|
||||
@ -53,51 +53,51 @@ void alignedJumpC(Context* c, unsigned size, lir::Constant* a);
|
||||
|
||||
void alignedLongJumpC(Context* c, unsigned size, lir::Constant* a);
|
||||
|
||||
void pushR(Context* c, unsigned size, lir::Register* a);
|
||||
void pushR(Context* c, unsigned size, lir::RegisterPair* a);
|
||||
|
||||
void popR(Context* c, unsigned size, lir::Register* a);
|
||||
void popR(Context* c, unsigned size, lir::RegisterPair* a);
|
||||
|
||||
void negateR(Context* c, unsigned size, lir::Register* a);
|
||||
void negateR(Context* c, unsigned size, lir::RegisterPair* a);
|
||||
|
||||
void negateRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b UNUSED);
|
||||
lir::RegisterPair* b UNUSED);
|
||||
|
||||
void moveCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void moveZCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void swapRR(Context* c,
|
||||
unsigned aSize UNUSED,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void moveRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
UNUSED unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void moveMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void moveRM(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Memory* b);
|
||||
|
||||
@ -105,7 +105,7 @@ void moveAR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Address* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void moveCM(Context* c,
|
||||
unsigned aSize UNUSED,
|
||||
@ -115,111 +115,111 @@ void moveCM(Context* c,
|
||||
|
||||
void moveZRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void moveZMR(Context* c,
|
||||
unsigned aSize UNUSED,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void addCarryRR(Context* c, unsigned size, lir::Register* a, lir::Register* b);
|
||||
void addCarryRR(Context* c, unsigned size, lir::RegisterPair* a, lir::RegisterPair* b);
|
||||
|
||||
void addRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void addCarryCR(Context* c, unsigned size, lir::Constant* a, lir::Register* b);
|
||||
void addCarryCR(Context* c, unsigned size, lir::Constant* a, lir::RegisterPair* b);
|
||||
|
||||
void addCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void subtractBorrowCR(Context* c,
|
||||
unsigned size UNUSED,
|
||||
lir::Constant* a,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void subtractCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void subtractBorrowRR(Context* c,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void subtractRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void andRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void andCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void orRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void orCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void xorRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void xorCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void multiplyRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void compareRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void compareCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void compareRM(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Memory* b);
|
||||
|
||||
@ -231,9 +231,9 @@ void compareCM(Context* c,
|
||||
|
||||
void compareFloatRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void branchLong(Context* c,
|
||||
lir::TernaryOperation op,
|
||||
@ -247,21 +247,21 @@ void branchLong(Context* c,
|
||||
void branchRR(Context* c,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* a,
|
||||
lir::RegisterPair* b,
|
||||
lir::Constant* target);
|
||||
|
||||
void branchCR(Context* c,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Constant* a,
|
||||
lir::Register* b,
|
||||
lir::RegisterPair* b,
|
||||
lir::Constant* target);
|
||||
|
||||
void branchRM(Context* c,
|
||||
lir::TernaryOperation op,
|
||||
unsigned size,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
lir::Memory* b,
|
||||
lir::Constant* target);
|
||||
|
||||
@ -276,181 +276,181 @@ void multiplyCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void divideRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b UNUSED);
|
||||
lir::RegisterPair* b UNUSED);
|
||||
|
||||
void remainderRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void doShift(Context* c,
|
||||
UNUSED void (*shift)(Context*,
|
||||
unsigned,
|
||||
lir::Register*,
|
||||
lir::RegisterPair*,
|
||||
unsigned,
|
||||
lir::Register*),
|
||||
lir::RegisterPair*),
|
||||
int type,
|
||||
UNUSED unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void shiftLeftRR(Context* c,
|
||||
UNUSED unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void shiftLeftCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void shiftRightRR(Context* c,
|
||||
UNUSED unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void shiftRightCR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void unsignedShiftRightRR(Context* c,
|
||||
UNUSED unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void unsignedShiftRightCR(Context* c,
|
||||
unsigned aSize UNUSED,
|
||||
lir::Constant* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatSqrtRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatSqrtMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatAddRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatAddMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatSubtractRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatSubtractMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatMultiplyRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatMultiplyMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatDivideRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatDivideMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void float2FloatRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void float2FloatMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void float2IntRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void float2IntMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void int2FloatRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void int2FloatMR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Memory* a,
|
||||
unsigned bSize,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatNegateRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void floatAbsoluteRR(Context* c,
|
||||
unsigned aSize UNUSED,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b);
|
||||
lir::RegisterPair* b);
|
||||
|
||||
void absoluteRR(Context* c,
|
||||
unsigned aSize,
|
||||
lir::Register* a,
|
||||
lir::RegisterPair* a,
|
||||
unsigned bSize UNUSED,
|
||||
lir::Register* b UNUSED);
|
||||
lir::RegisterPair* b UNUSED);
|
||||
|
||||
} // namespace x86
|
||||
} // namespace codegen
|
||||
|
@ -15,50 +15,45 @@ namespace avian {
|
||||
namespace codegen {
|
||||
namespace x86 {
|
||||
|
||||
enum {
|
||||
rax = 0,
|
||||
rcx = 1,
|
||||
rdx = 2,
|
||||
rbx = 3,
|
||||
rsp = 4,
|
||||
rbp = 5,
|
||||
rsi = 6,
|
||||
rdi = 7,
|
||||
r8 = 8,
|
||||
r9 = 9,
|
||||
r10 = 10,
|
||||
r11 = 11,
|
||||
r12 = 12,
|
||||
r13 = 13,
|
||||
r14 = 14,
|
||||
r15 = 15,
|
||||
};
|
||||
constexpr Register rax((int)0);
|
||||
constexpr Register rcx(1);
|
||||
constexpr Register rdx(2);
|
||||
constexpr Register rbx(3);
|
||||
constexpr Register rsp(4);
|
||||
constexpr Register rbp(5);
|
||||
constexpr Register rsi(6);
|
||||
constexpr Register rdi(7);
|
||||
constexpr Register r8(8);
|
||||
constexpr Register r9(9);
|
||||
constexpr Register r10(10);
|
||||
constexpr Register r11(11);
|
||||
constexpr Register r12(12);
|
||||
constexpr Register r13(13);
|
||||
constexpr Register r14(14);
|
||||
constexpr Register r15(15);
|
||||
constexpr Register xmm0(16);
|
||||
constexpr Register xmm1(16 + 1);
|
||||
constexpr Register xmm2(16 + 2);
|
||||
constexpr Register xmm3(16 + 3);
|
||||
constexpr Register xmm4(16 + 4);
|
||||
constexpr Register xmm5(16 + 5);
|
||||
constexpr Register xmm6(16 + 6);
|
||||
constexpr Register xmm7(16 + 7);
|
||||
constexpr Register xmm8(16 + 8);
|
||||
constexpr Register xmm9(16 + 9);
|
||||
constexpr Register xmm10(16 + 10);
|
||||
constexpr Register xmm11(16 + 11);
|
||||
constexpr Register xmm12(16 + 12);
|
||||
constexpr Register xmm13(16 + 13);
|
||||
constexpr Register xmm14(16 + 14);
|
||||
constexpr Register xmm15(16 + 15);
|
||||
|
||||
enum {
|
||||
xmm0 = r15 + 1,
|
||||
xmm1,
|
||||
xmm2,
|
||||
xmm3,
|
||||
xmm4,
|
||||
xmm5,
|
||||
xmm6,
|
||||
xmm7,
|
||||
xmm8,
|
||||
xmm9,
|
||||
xmm10,
|
||||
xmm11,
|
||||
xmm12,
|
||||
xmm13,
|
||||
xmm14,
|
||||
xmm15,
|
||||
};
|
||||
constexpr Register LongJumpRegister = r10;
|
||||
|
||||
const int LongJumpRegister = r10;
|
||||
|
||||
const unsigned GeneralRegisterMask = vm::TargetBytesPerWord == 4 ? 0x000000ff
|
||||
constexpr RegisterMask GeneralRegisterMask = vm::TargetBytesPerWord == 4 ? 0x000000ff
|
||||
: 0x0000ffff;
|
||||
|
||||
const unsigned FloatRegisterMask = vm::TargetBytesPerWord == 4 ? 0x00ff0000
|
||||
constexpr RegisterMask FloatRegisterMask = vm::TargetBytesPerWord == 4 ? 0x00ff0000
|
||||
: 0xffff0000;
|
||||
|
||||
} // namespace x86
|
||||
|
@ -9783,22 +9783,22 @@ void compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true)
|
||||
|
||||
if (processor(t)->bootImage) {
|
||||
lir::Memory table(t->arch->thread(), TARGET_THREAD_THUNKTABLE);
|
||||
lir::Register scratch(t->arch->scratch());
|
||||
lir::RegisterPair scratch(t->arch->scratch());
|
||||
a->apply(lir::Move,
|
||||
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &table),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &table),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
|
||||
lir::Memory proc(scratch.low, index * TargetBytesPerWord);
|
||||
a->apply(lir::Move,
|
||||
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &proc),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &proc),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
|
||||
a->apply(call ? lir::Call : lir::Jump,
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch));
|
||||
} else {
|
||||
lir::Constant proc(new (&c->zone) avian::codegen::ResolvedPromise(
|
||||
reinterpret_cast<intptr_t>(t->thunkTable[index])));
|
||||
|
||||
a->apply(call ? lir::LongCall : lir::LongJump,
|
||||
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &proc));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &proc));
|
||||
}
|
||||
}
|
||||
|
||||
@ -9814,16 +9814,16 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
|
||||
|
||||
p->thunks.default_.frameSavedOffset = a->length();
|
||||
|
||||
lir::Register thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
|
||||
lir::RegisterPair thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
||||
|
||||
compileCall(t, &context, compileMethodIndex);
|
||||
|
||||
a->popFrame(t->arch->alignFrameSize(1));
|
||||
|
||||
lir::Register result(t->arch->returnLow());
|
||||
lir::RegisterPair result(t->arch->returnLow());
|
||||
a->apply(lir::Jump,
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &result));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result));
|
||||
|
||||
p->thunks.default_.length = a->endBlock(false)->resolve(0, 0);
|
||||
|
||||
@ -9835,7 +9835,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
|
||||
Context context(t);
|
||||
avian::codegen::Assembler* a = context.assembler;
|
||||
|
||||
lir::Register class_(t->arch->virtualCallTarget());
|
||||
lir::RegisterPair class_(t->arch->virtualCallTarget());
|
||||
lir::Memory virtualCallTargetSrc(
|
||||
t->arch->stack(),
|
||||
(t->arch->frameFooterSize() + t->arch->frameReturnAddressSize())
|
||||
@ -9843,41 +9843,41 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
|
||||
|
||||
a->apply(lir::Move,
|
||||
OperandInfo(
|
||||
TargetBytesPerWord, lir::MemoryOperand, &virtualCallTargetSrc),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &class_));
|
||||
TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetSrc),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_));
|
||||
|
||||
lir::Memory virtualCallTargetDst(t->arch->thread(),
|
||||
TARGET_THREAD_VIRTUALCALLTARGET);
|
||||
|
||||
a->apply(
|
||||
lir::Move,
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &class_),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_),
|
||||
OperandInfo(
|
||||
TargetBytesPerWord, lir::MemoryOperand, &virtualCallTargetDst));
|
||||
TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetDst));
|
||||
|
||||
lir::Register index(t->arch->virtualCallIndex());
|
||||
lir::RegisterPair index(t->arch->virtualCallIndex());
|
||||
lir::Memory virtualCallIndex(t->arch->thread(),
|
||||
TARGET_THREAD_VIRTUALCALLINDEX);
|
||||
|
||||
a->apply(
|
||||
lir::Move,
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &index),
|
||||
OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &virtualCallIndex));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &index),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallIndex));
|
||||
|
||||
a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP);
|
||||
|
||||
p->thunks.defaultVirtual.frameSavedOffset = a->length();
|
||||
|
||||
lir::Register thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
|
||||
lir::RegisterPair thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
||||
|
||||
compileCall(t, &context, compileVirtualMethodIndex);
|
||||
|
||||
a->popFrame(t->arch->alignFrameSize(1));
|
||||
|
||||
lir::Register result(t->arch->returnLow());
|
||||
lir::RegisterPair result(t->arch->returnLow());
|
||||
a->apply(lir::Jump,
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &result));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result));
|
||||
|
||||
p->thunks.defaultVirtual.length = a->endBlock(false)->resolve(0, 0);
|
||||
|
||||
@ -9893,8 +9893,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
|
||||
|
||||
p->thunks.native.frameSavedOffset = a->length();
|
||||
|
||||
lir::Register thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
|
||||
lir::RegisterPair thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
||||
|
||||
compileCall(t, &context, invokeNativeIndex);
|
||||
|
||||
@ -9915,8 +9915,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
|
||||
|
||||
p->thunks.aioob.frameSavedOffset = a->length();
|
||||
|
||||
lir::Register thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
|
||||
lir::RegisterPair thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
||||
|
||||
compileCall(t, &context, throwArrayIndexOutOfBoundsIndex);
|
||||
|
||||
@ -9934,8 +9934,8 @@ void compileThunks(MyThread* t, FixedAllocator* allocator)
|
||||
|
||||
p->thunks.stackOverflow.frameSavedOffset = a->length();
|
||||
|
||||
lir::Register thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread);
|
||||
lir::RegisterPair thread(t->arch->thread());
|
||||
a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread);
|
||||
|
||||
compileCall(t, &context, throwStackOverflowIndex);
|
||||
|
||||
@ -10058,17 +10058,17 @@ uintptr_t compileVirtualThunk(MyThread* t, unsigned index, unsigned* size)
|
||||
|
||||
avian::codegen::ResolvedPromise indexPromise(index);
|
||||
lir::Constant indexConstant(&indexPromise);
|
||||
lir::Register indexRegister(t->arch->virtualCallIndex());
|
||||
lir::RegisterPair indexRegister(t->arch->virtualCallIndex());
|
||||
a->apply(
|
||||
lir::Move,
|
||||
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &indexConstant),
|
||||
OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &indexRegister));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &indexConstant),
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &indexRegister));
|
||||
|
||||
avian::codegen::ResolvedPromise defaultVirtualThunkPromise(
|
||||
defaultVirtualThunk(t));
|
||||
lir::Constant thunk(&defaultVirtualThunkPromise);
|
||||
a->apply(lir::Jump,
|
||||
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &thunk));
|
||||
OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &thunk));
|
||||
|
||||
*size = a->endBlock(false)->resolve(0, 0);
|
||||
|
||||
|
@ -79,6 +79,6 @@ TEST(ArchitecturePlan)
|
||||
(lir::UnaryOperation)op, vm::TargetBytesPerWord, mask, &thunk);
|
||||
assertFalse(thunk);
|
||||
assertNotEqual(static_cast<uint8_t>(0), mask.typeMask);
|
||||
assertNotEqual(static_cast<uint64_t>(0), mask.registerMask);
|
||||
assertNotEqual(static_cast<uint64_t>(0), (uint64_t)mask.lowRegisterMask);
|
||||
}
|
||||
}
|
||||
|
@ -19,18 +19,29 @@ using namespace vm;
|
||||
|
||||
TEST(RegisterIterator)
|
||||
{
|
||||
RegisterMask regs(0x55);
|
||||
BoundedRegisterMask regs(0x55);
|
||||
assertEqual<unsigned>(0, regs.start);
|
||||
assertEqual<unsigned>(7, regs.limit);
|
||||
|
||||
RegisterIterator it(regs);
|
||||
assertTrue(it.hasNext());
|
||||
assertEqual<unsigned>(0, it.next());
|
||||
assertTrue(it.hasNext());
|
||||
assertEqual<unsigned>(2, it.next());
|
||||
assertTrue(it.hasNext());
|
||||
assertEqual<unsigned>(4, it.next());
|
||||
assertTrue(it.hasNext());
|
||||
assertEqual<unsigned>(6, it.next());
|
||||
assertFalse(it.hasNext());
|
||||
for(int i = 0; i < 64; i++) {
|
||||
assertEqual<unsigned>(i, BoundedRegisterMask(static_cast<uint64_t>(1) << i).start);
|
||||
assertEqual<unsigned>(i + 1, BoundedRegisterMask(static_cast<uint64_t>(1) << i).limit);
|
||||
}
|
||||
|
||||
auto it = regs.begin();
|
||||
auto end = regs.end();
|
||||
|
||||
assertTrue(it != end);
|
||||
assertEqual<unsigned>(6, (*it).index());
|
||||
++it;
|
||||
assertTrue(it != end);
|
||||
assertEqual<unsigned>(4, (*it).index());
|
||||
++it;
|
||||
assertTrue(it != end);
|
||||
assertEqual<unsigned>(2, (*it).index());
|
||||
++it;
|
||||
assertTrue(it != end);
|
||||
assertEqual<unsigned>(0, (*it).index());
|
||||
++it;
|
||||
assertFalse(it != end);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user