group typeMask and registerMask into OperandMask, for Architecture::plan

This commit is contained in:
Joshua Warner
2013-02-15 20:04:30 -07:00
committed by Joshua Warner
parent 5a5b9248e6
commit 4462b87f10
8 changed files with 529 additions and 523 deletions

View File

@ -2254,27 +2254,27 @@ class MyArchitecture: public Assembler::Architecture {
virtual void plan virtual void plan
(lir::UnaryOperation, (lir::UnaryOperation,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned, OperandMask& aMask,
bool* thunk) bool* thunk)
{ {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = ~static_cast<uint64_t>(0); aMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false; *thunk = false;
} }
virtual void planSource virtual void planSource
(lir::BinaryOperation op, (lir::BinaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned aSize, OperandMask& aMask,
unsigned bSize, bool* thunk) unsigned bSize, bool* thunk)
{ {
*thunk = false; *thunk = false;
*aTypeMask = ~0; aMask.typeMask = ~0;
*aRegisterMask = GPR_MASK64; aMask.registerMask = GPR_MASK64;
switch (op) { switch (op) {
case lir::Negate: case lir::Negate:
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = GPR_MASK64; aMask.registerMask = GPR_MASK64;
break; break;
case lir::Absolute: case lir::Absolute:
@ -2286,8 +2286,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatNegate: case lir::FloatNegate:
case lir::Float2Float: case lir::Float2Float:
if (vfpSupported()) { if (vfpSupported()) {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = FPR_MASK64; aMask.registerMask = FPR_MASK64;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -2299,8 +2299,8 @@ class MyArchitecture: public Assembler::Architecture {
// thunks or produce inline machine code which handles edge // thunks or produce inline machine code which handles edge
// cases properly. // cases properly.
if (false && vfpSupported() && bSize == 4) { if (false && vfpSupported() && bSize == 4) {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = FPR_MASK64; aMask.registerMask = FPR_MASK64;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -2308,8 +2308,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Int2Float: case lir::Int2Float:
if (vfpSupported() && aSize == 4) { if (vfpSupported() && aSize == 4) {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = GPR_MASK64; aMask.registerMask = GPR_MASK64;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -2322,16 +2322,16 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planDestination virtual void planDestination
(lir::BinaryOperation op, (lir::BinaryOperation op,
unsigned, uint8_t aTypeMask, uint64_t, unsigned, const OperandMask& aMask,
unsigned , uint8_t* bTypeMask, uint64_t* bRegisterMask) unsigned, OperandMask& bMask)
{ {
*bTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*bRegisterMask = GPR_MASK64; bMask.registerMask = GPR_MASK64;
switch (op) { switch (op) {
case lir::Negate: case lir::Negate:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GPR_MASK64; bMask.registerMask = GPR_MASK64;
break; break;
case lir::FloatAbsolute: case lir::FloatAbsolute:
@ -2339,18 +2339,18 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatNegate: case lir::FloatNegate:
case lir::Float2Float: case lir::Float2Float:
case lir::Int2Float: case lir::Int2Float:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = FPR_MASK64; bMask.registerMask = FPR_MASK64;
break; break;
case lir::Float2Int: case lir::Float2Int:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GPR_MASK64; bMask.registerMask = GPR_MASK64;
break; break;
case lir::Move: case lir::Move:
if (!(aTypeMask & 1 << lir::RegisterOperand)) { if (!(aMask.typeMask & 1 << lir::RegisterOperand)) {
*bTypeMask = 1 << lir::RegisterOperand; bMask.typeMask = 1 << lir::RegisterOperand;
} }
break; break;
@ -2360,41 +2360,41 @@ class MyArchitecture: public Assembler::Architecture {
} }
virtual void planMove virtual void planMove
(unsigned, uint8_t* srcTypeMask, uint64_t* srcRegisterMask, (unsigned, OperandMask& srcMask,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask, OperandMask& tmpMask,
uint8_t dstTypeMask, uint64_t dstRegisterMask) const OperandMask& dstMask)
{ {
*srcTypeMask = ~0; srcMask.typeMask = ~0;
*srcRegisterMask = ~static_cast<uint64_t>(0); srcMask.registerMask = ~static_cast<uint64_t>(0);
*tmpTypeMask = 0; tmpMask.typeMask = 0;
*tmpRegisterMask = 0; tmpMask.registerMask = 0;
if (dstTypeMask & (1 << lir::MemoryOperand)) { if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
// can't move directly from memory or constant to memory // can't move directly from memory or constant to memory
*srcTypeMask = 1 << lir::RegisterOperand; srcMask.typeMask = 1 << lir::RegisterOperand;
*tmpTypeMask = 1 << lir::RegisterOperand; tmpMask.typeMask = 1 << lir::RegisterOperand;
*tmpRegisterMask = GPR_MASK64; tmpMask.registerMask = GPR_MASK64;
} else if (vfpSupported() && } else if (vfpSupported() &&
dstTypeMask & 1 << lir::RegisterOperand && dstMask.typeMask & 1 << lir::RegisterOperand &&
dstRegisterMask & FPR_MASK) { dstMask.registerMask & FPR_MASK) {
*srcTypeMask = *tmpTypeMask = 1 << lir::RegisterOperand | srcMask.typeMask = tmpMask.typeMask = 1 << lir::RegisterOperand |
1 << lir::MemoryOperand; 1 << lir::MemoryOperand;
*tmpRegisterMask = ~static_cast<uint64_t>(0); tmpMask.registerMask = ~static_cast<uint64_t>(0);
} }
} }
virtual void planSource virtual void planSource
(lir::TernaryOperation op, (lir::TernaryOperation op,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned, OperandMask& aMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask, unsigned bSize, OperandMask& bMask,
unsigned, bool* thunk) unsigned, bool* thunk)
{ {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = GPR_MASK64; aMask.registerMask = GPR_MASK64;
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GPR_MASK64; bMask.registerMask = GPR_MASK64;
*thunk = false; *thunk = false;
@ -2402,7 +2402,7 @@ class MyArchitecture: public Assembler::Architecture {
case lir::ShiftLeft: case lir::ShiftLeft:
case lir::ShiftRight: case lir::ShiftRight:
case lir::UnsignedShiftRight: case lir::UnsignedShiftRight:
if (bSize == 8) *aTypeMask = *bTypeMask = (1 << lir::RegisterOperand); if (bSize == 8) aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
break; break;
case lir::Add: case lir::Add:
@ -2410,7 +2410,7 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Or: case lir::Or:
case lir::Xor: case lir::Xor:
case lir::Multiply: case lir::Multiply:
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
break; break;
case lir::Divide: case lir::Divide:
@ -2424,8 +2424,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatMultiply: case lir::FloatMultiply:
case lir::FloatDivide: case lir::FloatDivide:
if (vfpSupported()) { if (vfpSupported()) {
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = *bRegisterMask = FPR_MASK64; aMask.registerMask = bMask.registerMask = FPR_MASK64;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -2442,8 +2442,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::JumpIfFloatLessOrEqualOrUnordered: case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered: case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (vfpSupported()) { if (vfpSupported()) {
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = *bRegisterMask = FPR_MASK64; aMask.registerMask = bMask.registerMask = FPR_MASK64;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -2456,16 +2456,16 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planDestination virtual void planDestination
(lir::TernaryOperation op, (lir::TernaryOperation op,
unsigned, uint8_t, uint64_t, unsigned, const OperandMask& aMask UNUSED,
unsigned, uint8_t, const uint64_t bRegisterMask, unsigned, const OperandMask& bMask,
unsigned, uint8_t* cTypeMask, uint64_t* cRegisterMask) unsigned, OperandMask& cMask)
{ {
if (isBranch(op)) { if (isBranch(op)) {
*cTypeMask = (1 << lir::ConstantOperand); cMask.typeMask = (1 << lir::ConstantOperand);
*cRegisterMask = 0; cMask.registerMask = 0;
} else { } else {
*cTypeMask = (1 << lir::RegisterOperand); cMask.typeMask = (1 << lir::RegisterOperand);
*cRegisterMask = bRegisterMask; cMask.registerMask = bMask.registerMask;
} }
} }

View File

@ -35,6 +35,22 @@ public:
{ } { }
}; };
class OperandMask {
public:
uint8_t typeMask;
uint64_t registerMask;
OperandMask(uint8_t typeMask, uint64_t registerMask):
typeMask(typeMask),
registerMask(registerMask)
{ }
OperandMask():
typeMask(~0),
registerMask(~static_cast<uint64_t>(0))
{ }
};
#ifdef AVIAN_TAILS #ifdef AVIAN_TAILS
const bool TailCalls = true; const bool TailCalls = true;
#else #else
@ -120,35 +136,35 @@ class Assembler {
virtual void plan virtual void plan
(lir::UnaryOperation op, (lir::UnaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned aSize, OperandMask& aMask,
bool* thunk) = 0; bool* thunk) = 0;
virtual void planSource virtual void planSource
(lir::BinaryOperation op, (lir::BinaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned aSize, OperandMask& aMask,
unsigned bSize, bool* thunk) = 0; unsigned bSize, bool* thunk) = 0;
virtual void planDestination virtual void planDestination
(lir::BinaryOperation op, (lir::BinaryOperation op,
unsigned aSize, uint8_t aTypeMask, uint64_t aRegisterMask, unsigned aSize, const OperandMask& aMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask) = 0; unsigned bSize, OperandMask& bMask) = 0;
virtual void planMove virtual void planMove
(unsigned size, uint8_t* srcTypeMask, uint64_t* srcRegisterMask, (unsigned size, OperandMask& src,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask, OperandMask& tmp,
uint8_t dstTypeMask, uint64_t dstRegisterMask) = 0; const OperandMask& dst) = 0;
virtual void planSource virtual void planSource
(lir::TernaryOperation op, (lir::TernaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned aSize, OperandMask& aMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask, unsigned bSize, OperandMask& bMask,
unsigned cSize, bool* thunk) = 0; unsigned cSize, bool* thunk) = 0;
virtual void planDestination virtual void planDestination
(lir::TernaryOperation op, (lir::TernaryOperation op,
unsigned aSize, uint8_t aTypeMask, uint64_t aRegisterMask, unsigned aSize, const OperandMask& aMask,
unsigned bSize, uint8_t bTypeMask, uint64_t bRegisterMask, unsigned bSize, const OperandMask& bMask,
unsigned cSize, uint8_t* cTypeMask, uint64_t* cRegisterMask) = 0; unsigned cSize, OperandMask& cMask) = 0;
virtual Assembler* makeAssembler(vm::Allocator*, vm::Zone*) = 0; virtual Assembler* makeAssembler(vm::Allocator*, vm::Zone*) = 0;

View File

@ -334,16 +334,13 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
virtual unsigned cost(Context* c, SiteMask dstMask) virtual unsigned cost(Context* c, SiteMask dstMask)
{ {
uint8_t srcTypeMask; OperandMask src;
uint64_t srcRegisterMask; OperandMask tmp;
uint8_t tmpTypeMask;
uint64_t tmpRegisterMask;
c->arch->planMove c->arch->planMove
(size, &srcTypeMask, &srcRegisterMask, (size, src, tmp,
&tmpTypeMask, &tmpRegisterMask, OperandMask(dstMask.typeMask, dstMask.registerMask));
dstMask.typeMask, dstMask.registerMask);
SiteMask srcMask(srcTypeMask, srcRegisterMask, AnyFrameIndex); SiteMask srcMask(src.typeMask, src.registerMask, AnyFrameIndex);
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) { for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
Site* s = it.next(); Site* s = it.next();
if (s->match(c, srcMask) or s->match(c, dstMask)) { if (s->match(c, srcMask) or s->match(c, dstMask)) {
@ -359,26 +356,23 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
bool includeNextWord; bool includeNextWord;
} costCalculator(value, size, includeNextWord); } costCalculator(value, size, includeNextWord);
Site* dst = pickTargetSite Site* dstSite = pickTargetSite
(c, read, intersectRead, registerReserveCount, &costCalculator); (c, read, intersectRead, registerReserveCount, &costCalculator);
uint8_t srcTypeMask; OperandMask src;
uint64_t srcRegisterMask; OperandMask tmp;
uint8_t tmpTypeMask;
uint64_t tmpRegisterMask;
c->arch->planMove c->arch->planMove
(size, &srcTypeMask, &srcRegisterMask, (size, src, tmp,
&tmpTypeMask, &tmpRegisterMask, OperandMask(1 << dstSite->type(c), dstSite->registerMask(c)));
1 << dst->type(c), dst->registerMask(c));
SiteMask srcMask(srcTypeMask, srcRegisterMask, AnyFrameIndex); SiteMask srcMask(src.typeMask, src.registerMask, AnyFrameIndex);
unsigned cost = 0xFFFFFFFF; unsigned cost = 0xFFFFFFFF;
Site* src = 0; Site* srcSite = 0;
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) { for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
Site* s = it.next(); Site* s = it.next();
unsigned v = s->copyCost(c, dst); unsigned v = s->copyCost(c, dstSite);
if (v == 0) { if (v == 0) {
src = s; srcSite = s;
cost = 0; cost = 0;
break; break;
} }
@ -386,50 +380,50 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
v += CopyPenalty; v += CopyPenalty;
} }
if (v < cost) { if (v < cost) {
src = s; srcSite = s;
cost = v; cost = v;
} }
} }
if (cost) { if (cost) {
if (DebugMoves) { if (DebugMoves) {
char srcb[256]; src->toString(c, srcb, 256); char srcb[256]; srcSite->toString(c, srcb, 256);
char dstb[256]; dst->toString(c, dstb, 256); char dstb[256]; dstSite->toString(c, dstb, 256);
fprintf(stderr, "maybe move %s to %s for %p to %p\n", fprintf(stderr, "maybe move %s to %s for %p to %p\n",
srcb, dstb, value, value); srcb, dstb, value, value);
} }
src->freeze(c, value); srcSite->freeze(c, value);
value->addSite(c, dst); value->addSite(c, dstSite);
src->thaw(c, value); srcSite->thaw(c, value);
if (not src->match(c, srcMask)) { if (not srcSite->match(c, srcMask)) {
src->freeze(c, value); srcSite->freeze(c, value);
dst->freeze(c, value); dstSite->freeze(c, value);
SiteMask tmpMask(tmpTypeMask, tmpRegisterMask, AnyFrameIndex); SiteMask tmpMask(tmp.typeMask, tmp.registerMask, AnyFrameIndex);
SingleRead tmpRead(tmpMask, 0); SingleRead tmpRead(tmpMask, 0);
tmpRead.value = value; tmpRead.value = value;
tmpRead.successor_ = value; tmpRead.successor_ = value;
Site* tmp = pickTargetSite(c, &tmpRead, true); Site* tmpSite = pickTargetSite(c, &tmpRead, true);
value->addSite(c, tmp); value->addSite(c, tmpSite);
move(c, value, src, tmp); move(c, value, srcSite, tmpSite);
dst->thaw(c, value); dstSite->thaw(c, value);
src->thaw(c, value); srcSite->thaw(c, value);
src = tmp; srcSite = tmpSite;
} }
move(c, value, src, dst); move(c, value, srcSite, dstSite);
} }
return dst; return dstSite;
} }
Site* Site*
@ -757,145 +751,143 @@ saveLocals(Context* c, Event* e)
void void
maybeMove(Context* c, lir::BinaryOperation type, unsigned srcSize, maybeMove(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst, unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue,
const SiteMask& dstMask) const SiteMask& dstMask)
{ {
Read* read = live(c, dst); Read* read = live(c, dstValue);
bool isStore = read == 0; bool isStore = read == 0;
Site* target; Site* target;
if (dst->target) { if (dstValue->target) {
target = dst->target; target = dstValue->target;
} else if (isStore) { } else if (isStore) {
return; return;
} else { } else {
target = pickTargetSite(c, read); target = pickTargetSite(c, read);
} }
unsigned cost = src->source->copyCost(c, target); unsigned cost = srcValue->source->copyCost(c, target);
if (srcSelectSize < dstSize) cost = 1; if (srcSelectSize < dstSize) cost = 1;
if (cost) { if (cost) {
// todo: let c->arch->planMove decide this: // todo: let c->arch->planMove decide this:
bool useTemporary = ((target->type(c) == lir::MemoryOperand bool useTemporary = ((target->type(c) == lir::MemoryOperand
and src->source->type(c) == lir::MemoryOperand) and srcValue->source->type(c) == lir::MemoryOperand)
or (srcSelectSize < dstSize or (srcSelectSize < dstSize
and target->type(c) != lir::RegisterOperand)); and target->type(c) != lir::RegisterOperand));
src->source->freeze(c, src); srcValue->source->freeze(c, srcValue);
dst->addSite(c, target); dstValue->addSite(c, target);
src->source->thaw(c, src); srcValue->source->thaw(c, srcValue);
bool addOffset = srcSize != srcSelectSize bool addOffset = srcSize != srcSelectSize
and c->arch->bigEndian() and c->arch->bigEndian()
and src->source->type(c) == lir::MemoryOperand; and srcValue->source->type(c) == lir::MemoryOperand;
if (addOffset) { if (addOffset) {
static_cast<MemorySite*>(src->source)->offset static_cast<MemorySite*>(srcValue->source)->offset
+= (srcSize - srcSelectSize); += (srcSize - srcSelectSize);
} }
target->freeze(c, dst); target->freeze(c, dstValue);
if (target->match(c, dstMask) and not useTemporary) { if (target->match(c, dstMask) and not useTemporary) {
if (DebugMoves) { if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256); char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256); char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n", fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst); srcb, dstb, srcValue, dstValue);
} }
src->source->freeze(c, src); srcValue->source->freeze(c, srcValue);
apply(c, type, min(srcSelectSize, dstSize), src->source, src->source, apply(c, type, min(srcSelectSize, dstSize), srcValue->source, srcValue->source,
dstSize, target, target); dstSize, target, target);
src->source->thaw(c, src); srcValue->source->thaw(c, srcValue);
} else { } else {
// pick a temporary register which is valid as both a // pick a temporary register which is valid as both a
// destination and a source for the moves we need to perform: // destination and a source for the moves we need to perform:
dst->removeSite(c, target); dstValue->removeSite(c, target);
bool thunk; bool thunk;
uint8_t srcTypeMask; OperandMask src;
uint64_t srcRegisterMask;
c->arch->planSource(type, dstSize, &srcTypeMask, &srcRegisterMask, c->arch->planSource(type, dstSize, src, dstSize, &thunk);
dstSize, &thunk);
if (src->type == lir::ValueGeneral) { if (srcValue->type == lir::ValueGeneral) {
srcRegisterMask &= c->regFile->generalRegisters.mask; src.registerMask &= c->regFile->generalRegisters.mask;
} }
assert(c, thunk == 0); assert(c, thunk == 0);
assert(c, dstMask.typeMask & srcTypeMask & (1 << lir::RegisterOperand)); assert(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand));
Site* tmpTarget = freeRegisterSite Site* tmpTarget = freeRegisterSite
(c, dstMask.registerMask & srcRegisterMask); (c, dstMask.registerMask & src.registerMask);
src->source->freeze(c, src); srcValue->source->freeze(c, srcValue);
dst->addSite(c, tmpTarget); dstValue->addSite(c, tmpTarget);
tmpTarget->freeze(c, dst); tmpTarget->freeze(c, dstValue);
if (DebugMoves) { if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256); char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; tmpTarget->toString(c, dstb, 256); char dstb[256]; tmpTarget->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n", fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst); srcb, dstb, srcValue, dstValue);
} }
apply(c, type, srcSelectSize, src->source, src->source, apply(c, type, srcSelectSize, srcValue->source, srcValue->source,
dstSize, tmpTarget, tmpTarget); dstSize, tmpTarget, tmpTarget);
tmpTarget->thaw(c, dst); tmpTarget->thaw(c, dstValue);
src->source->thaw(c, src); srcValue->source->thaw(c, srcValue);
if (useTemporary or isStore) { if (useTemporary or isStore) {
if (DebugMoves) { if (DebugMoves) {
char srcb[256]; tmpTarget->toString(c, srcb, 256); char srcb[256]; tmpTarget->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256); char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n", fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst); srcb, dstb, srcValue, dstValue);
} }
dst->addSite(c, target); dstValue->addSite(c, target);
tmpTarget->freeze(c, dst); tmpTarget->freeze(c, dstValue);
apply(c, lir::Move, dstSize, tmpTarget, tmpTarget, dstSize, target, target); apply(c, lir::Move, dstSize, tmpTarget, tmpTarget, dstSize, target, target);
tmpTarget->thaw(c, dst); tmpTarget->thaw(c, dstValue);
if (isStore) { if (isStore) {
dst->removeSite(c, tmpTarget); dstValue->removeSite(c, tmpTarget);
} }
} }
} }
target->thaw(c, dst); target->thaw(c, dstValue);
if (addOffset) { if (addOffset) {
static_cast<MemorySite*>(src->source)->offset static_cast<MemorySite*>(srcValue->source)->offset
-= (srcSize - srcSelectSize); -= (srcSize - srcSelectSize);
} }
} else { } else {
target = src->source; target = srcValue->source;
if (DebugMoves) { if (DebugMoves) {
char dstb[256]; target->toString(c, dstb, 256); char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "null move in %s for %p to %p\n", dstb, src, dst); fprintf(stderr, "null move in %s for %p to %p\n", dstb, srcValue, dstValue);
} }
} }
if (isStore) { if (isStore) {
dst->removeSite(c, target); dstValue->removeSite(c, target);
} }
} }

View File

@ -264,16 +264,15 @@ class CallEvent: public Event {
} }
{ bool thunk; { bool thunk;
uint8_t typeMask; OperandMask op;
uint64_t planRegisterMask;
c->arch->plan c->arch->plan
((flags & Compiler::Aligned) ? lir::AlignedCall : lir::Call, vm::TargetBytesPerWord, ((flags & Compiler::Aligned) ? lir::AlignedCall : lir::Call, vm::TargetBytesPerWord,
&typeMask, &planRegisterMask, &thunk); op, &thunk);
assert(c, not thunk); assert(c, not thunk);
this->addRead(c, address, SiteMask this->addRead(c, address, SiteMask
(typeMask, registerMask & planRegisterMask, AnyFrameIndex)); (op.typeMask, registerMask & op.registerMask, AnyFrameIndex));
} }
Stack* stack = stackBefore; Stack* stack = stackBefore;
@ -543,26 +542,26 @@ void appendReturn(Context* c, unsigned size, Value* value) {
class MoveEvent: public Event { class MoveEvent: public Event {
public: public:
MoveEvent(Context* c, lir::BinaryOperation type, unsigned srcSize, MoveEvent(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst, unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue,
const SiteMask& srcLowMask, const SiteMask& srcHighMask): const SiteMask& srcLowMask, const SiteMask& srcHighMask):
Event(c), type(type), srcSize(srcSize), srcSelectSize(srcSelectSize), Event(c), type(type), srcSize(srcSize), srcSelectSize(srcSelectSize),
src(src), dstSize(dstSize), dst(dst) srcValue(srcValue), dstSize(dstSize), dstValue(dstValue)
{ {
assert(c, srcSelectSize <= srcSize); assert(c, srcSelectSize <= srcSize);
bool noop = srcSelectSize >= dstSize; bool noop = srcSelectSize >= dstSize;
if (dstSize > vm::TargetBytesPerWord) { if (dstSize > vm::TargetBytesPerWord) {
dst->grow(c); dstValue->grow(c);
} }
if (srcSelectSize > vm::TargetBytesPerWord) { if (srcSelectSize > vm::TargetBytesPerWord) {
src->maybeSplit(c); srcValue->maybeSplit(c);
} }
this->addReads(c, src, srcSelectSize, srcLowMask, noop ? dst : 0, this->addReads(c, srcValue, srcSelectSize, srcLowMask, noop ? dstValue : 0,
srcHighMask, srcHighMask,
noop and dstSize > vm::TargetBytesPerWord ? dst->nextWord : 0); noop and dstSize > vm::TargetBytesPerWord ? dstValue->nextWord : 0);
} }
virtual const char* name() { virtual const char* name() {
@ -570,118 +569,116 @@ class MoveEvent: public Event {
} }
virtual void compile(Context* c) { virtual void compile(Context* c) {
uint8_t dstTypeMask; OperandMask dst;
uint64_t dstRegisterMask;
c->arch->planDestination c->arch->planDestination
(type, (type,
srcSelectSize, srcSelectSize,
1 << src->source->type(c), OperandMask(
(static_cast<uint64_t>(src->nextWord->source->registerMask(c)) << 32) 1 << srcValue->source->type(c),
| static_cast<uint64_t>(src->source->registerMask(c)), (static_cast<uint64_t>(srcValue->nextWord->source->registerMask(c)) << 32)
dstSize, | static_cast<uint64_t>(srcValue->source->registerMask(c))),
&dstTypeMask, dstSize, dst);
&dstRegisterMask);
SiteMask dstLowMask(dstTypeMask, dstRegisterMask, AnyFrameIndex); SiteMask dstLowMask(dst.typeMask, dst.registerMask, AnyFrameIndex);
SiteMask dstHighMask(dstTypeMask, dstRegisterMask >> 32, AnyFrameIndex); SiteMask dstHighMask(dst.typeMask, dst.registerMask >> 32, AnyFrameIndex);
if (srcSelectSize >= vm::TargetBytesPerWord if (srcSelectSize >= vm::TargetBytesPerWord
and dstSize >= vm::TargetBytesPerWord and dstSize >= vm::TargetBytesPerWord
and srcSelectSize >= dstSize) and srcSelectSize >= dstSize)
{ {
if (dst->target) { if (dstValue->target) {
if (dstSize > vm::TargetBytesPerWord) { if (dstSize > vm::TargetBytesPerWord) {
if (src->source->registerSize(c) > vm::TargetBytesPerWord) { if (srcValue->source->registerSize(c) > vm::TargetBytesPerWord) {
apply(c, lir::Move, srcSelectSize, src->source, src->source, apply(c, lir::Move, srcSelectSize, srcValue->source, srcValue->source,
dstSize, dst->target, dst->target); dstSize, dstValue->target, dstValue->target);
if (live(c, dst) == 0) { if (live(c, dstValue) == 0) {
dst->removeSite(c, dst->target); dstValue->removeSite(c, dstValue->target);
if (dstSize > vm::TargetBytesPerWord) { if (dstSize > vm::TargetBytesPerWord) {
dst->nextWord->removeSite(c, dst->nextWord->target); dstValue->nextWord->removeSite(c, dstValue->nextWord->target);
} }
} }
} else { } else {
src->nextWord->source->freeze(c, src->nextWord); srcValue->nextWord->source->freeze(c, srcValue->nextWord);
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src, maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue,
vm::TargetBytesPerWord, dst, dstLowMask); vm::TargetBytesPerWord, dstValue, dstLowMask);
src->nextWord->source->thaw(c, src->nextWord); srcValue->nextWord->source->thaw(c, srcValue->nextWord);
maybeMove maybeMove
(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src->nextWord, (c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue->nextWord,
vm::TargetBytesPerWord, dst->nextWord, dstHighMask); vm::TargetBytesPerWord, dstValue->nextWord, dstHighMask);
} }
} else { } else {
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src, maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue,
vm::TargetBytesPerWord, dst, dstLowMask); vm::TargetBytesPerWord, dstValue, dstLowMask);
} }
} else { } else {
Site* low = pickSiteOrMove(c, src, dst, 0, 0); Site* low = pickSiteOrMove(c, srcValue, dstValue, 0, 0);
if (dstSize > vm::TargetBytesPerWord) { if (dstSize > vm::TargetBytesPerWord) {
pickSiteOrMove(c, src->nextWord, dst->nextWord, low, 1); pickSiteOrMove(c, srcValue->nextWord, dstValue->nextWord, low, 1);
} }
} }
} else if (srcSelectSize <= vm::TargetBytesPerWord } else if (srcSelectSize <= vm::TargetBytesPerWord
and dstSize <= vm::TargetBytesPerWord) and dstSize <= vm::TargetBytesPerWord)
{ {
maybeMove(c, type, srcSize, srcSelectSize, src, dstSize, dst, maybeMove(c, type, srcSize, srcSelectSize, srcValue, dstSize, dstValue,
dstLowMask); dstLowMask);
} else { } else {
assert(c, srcSize == vm::TargetBytesPerWord); assert(c, srcSize == vm::TargetBytesPerWord);
assert(c, srcSelectSize == vm::TargetBytesPerWord); assert(c, srcSelectSize == vm::TargetBytesPerWord);
if (dst->nextWord->target or live(c, dst->nextWord)) { if (dstValue->nextWord->target or live(c, dstValue->nextWord)) {
assert(c, dstLowMask.typeMask & (1 << lir::RegisterOperand)); assert(c, dstLowMask.typeMask & (1 << lir::RegisterOperand));
Site* low = freeRegisterSite(c, dstLowMask.registerMask); Site* low = freeRegisterSite(c, dstLowMask.registerMask);
src->source->freeze(c, src); srcValue->source->freeze(c, srcValue);
dst->addSite(c, low); dstValue->addSite(c, low);
low->freeze(c, dst); low->freeze(c, dstValue);
if (DebugMoves) { if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256); char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; low->toString(c, dstb, 256); char dstb[256]; low->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p\n", fprintf(stderr, "move %s to %s for %p\n",
srcb, dstb, src); srcb, dstb, srcValue);
} }
apply(c, lir::Move, vm::TargetBytesPerWord, src->source, src->source, apply(c, lir::Move, vm::TargetBytesPerWord, srcValue->source, srcValue->source,
vm::TargetBytesPerWord, low, low); vm::TargetBytesPerWord, low, low);
low->thaw(c, dst); low->thaw(c, dstValue);
src->source->thaw(c, src); srcValue->source->thaw(c, srcValue);
assert(c, dstHighMask.typeMask & (1 << lir::RegisterOperand)); assert(c, dstHighMask.typeMask & (1 << lir::RegisterOperand));
Site* high = freeRegisterSite(c, dstHighMask.registerMask); Site* high = freeRegisterSite(c, dstHighMask.registerMask);
low->freeze(c, dst); low->freeze(c, dstValue);
dst->nextWord->addSite(c, high); dstValue->nextWord->addSite(c, high);
high->freeze(c, dst->nextWord); high->freeze(c, dstValue->nextWord);
if (DebugMoves) { if (DebugMoves) {
char srcb[256]; low->toString(c, srcb, 256); char srcb[256]; low->toString(c, srcb, 256);
char dstb[256]; high->toString(c, dstb, 256); char dstb[256]; high->toString(c, dstb, 256);
fprintf(stderr, "extend %s to %s for %p %p\n", fprintf(stderr, "extend %s to %s for %p %p\n",
srcb, dstb, dst, dst->nextWord); srcb, dstb, dstValue, dstValue->nextWord);
} }
apply(c, lir::Move, vm::TargetBytesPerWord, low, low, dstSize, low, high); apply(c, lir::Move, vm::TargetBytesPerWord, low, low, dstSize, low, high);
high->thaw(c, dst->nextWord); high->thaw(c, dstValue->nextWord);
low->thaw(c, dst); low->thaw(c, dstValue);
} else { } else {
pickSiteOrMove(c, src, dst, 0, 0); pickSiteOrMove(c, srcValue, dstValue, 0, 0);
} }
} }
@ -693,29 +690,28 @@ class MoveEvent: public Event {
lir::BinaryOperation type; lir::BinaryOperation type;
unsigned srcSize; unsigned srcSize;
unsigned srcSelectSize; unsigned srcSelectSize;
Value* src; Value* srcValue;
unsigned dstSize; unsigned dstSize;
Value* dst; Value* dstValue;
}; };
void void
appendMove(Context* c, lir::BinaryOperation type, unsigned srcSize, appendMove(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst) unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue)
{ {
bool thunk; bool thunk;
uint8_t srcTypeMask; OperandMask src;
uint64_t srcRegisterMask;
c->arch->planSource c->arch->planSource
(type, srcSelectSize, &srcTypeMask, &srcRegisterMask, dstSize, &thunk); (type, srcSelectSize, src, dstSize, &thunk);
assert(c, not thunk); assert(c, not thunk);
append(c, new(c->zone) append(c, new(c->zone)
MoveEvent MoveEvent
(c, type, srcSize, srcSelectSize, src, dstSize, dst, (c, type, srcSize, srcSelectSize, srcValue, dstSize, dstValue,
SiteMask(srcTypeMask, srcRegisterMask, AnyFrameIndex), SiteMask(src.typeMask, src.registerMask, AnyFrameIndex),
SiteMask(srcTypeMask, srcRegisterMask >> 32, AnyFrameIndex))); SiteMask(src.typeMask, src.registerMask >> 32, AnyFrameIndex)));
} }
@ -791,28 +787,28 @@ Site* getTarget(Context* c, Value* value, Value* result, const SiteMask& resultM
class CombineEvent: public Event { class CombineEvent: public Event {
public: public:
CombineEvent(Context* c, lir::TernaryOperation type, CombineEvent(Context* c, lir::TernaryOperation type,
unsigned firstSize, Value* first, unsigned firstSize, Value* firstValue,
unsigned secondSize, Value* second, unsigned secondSize, Value* secondValue,
unsigned resultSize, Value* result, unsigned resultSize, Value* resultValue,
const SiteMask& firstLowMask, const SiteMask& firstLowMask,
const SiteMask& firstHighMask, const SiteMask& firstHighMask,
const SiteMask& secondLowMask, const SiteMask& secondLowMask,
const SiteMask& secondHighMask): const SiteMask& secondHighMask):
Event(c), type(type), firstSize(firstSize), first(first), Event(c), type(type), firstSize(firstSize), firstValue(firstValue),
secondSize(secondSize), second(second), resultSize(resultSize), secondSize(secondSize), secondValue(secondValue), resultSize(resultSize),
result(result) resultValue(resultValue)
{ {
this->addReads(c, first, firstSize, firstLowMask, firstHighMask); this->addReads(c, firstValue, firstSize, firstLowMask, firstHighMask);
if (resultSize > vm::TargetBytesPerWord) { if (resultSize > vm::TargetBytesPerWord) {
result->grow(c); resultValue->grow(c);
} }
bool condensed = c->arch->alwaysCondensed(type); bool condensed = c->arch->alwaysCondensed(type);
this->addReads(c, second, secondSize, this->addReads(c, secondValue, secondSize,
secondLowMask, condensed ? result : 0, secondLowMask, condensed ? resultValue : 0,
secondHighMask, condensed ? result->nextWord : 0); secondHighMask, condensed ? resultValue->nextWord : 0);
} }
virtual const char* name() { virtual const char* name() {
@ -820,99 +816,99 @@ class CombineEvent: public Event {
} }
virtual void compile(Context* c) { virtual void compile(Context* c) {
assert(c, first->source->type(c) == first->nextWord->source->type(c)); assert(c, firstValue->source->type(c) == firstValue->nextWord->source->type(c));
// if (second->source->type(c) != second->nextWord->source->type(c)) { // if (secondValue->source->type(c) != secondValue->nextWord->source->type(c)) {
// fprintf(stderr, "%p %p %d : %p %p %d\n", // fprintf(stderr, "%p %p %d : %p %p %d\n",
// second, second->source, second->source->type(c), // secondValue, secondValue->source, secondValue->source->type(c),
// second->nextWord, second->nextWord->source, // secondValue->nextWord, secondValue->nextWord->source,
// second->nextWord->source->type(c)); // secondValue->nextWord->source->type(c));
// } // }
assert(c, second->source->type(c) == second->nextWord->source->type(c)); assert(c, secondValue->source->type(c) == secondValue->nextWord->source->type(c));
freezeSource(c, firstSize, first); freezeSource(c, firstSize, firstValue);
uint8_t cTypeMask; OperandMask cMask;
uint64_t cRegisterMask;
c->arch->planDestination c->arch->planDestination
(type, (type,
firstSize, firstSize,
1 << first->source->type(c), OperandMask(
(static_cast<uint64_t>(first->nextWord->source->registerMask(c)) << 32) 1 << firstValue->source->type(c),
| static_cast<uint64_t>(first->source->registerMask(c)), (static_cast<uint64_t>(firstValue->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
secondSize, secondSize,
1 << second->source->type(c), OperandMask(
(static_cast<uint64_t>(second->nextWord->source->registerMask(c)) << 32) 1 << secondValue->source->type(c),
| static_cast<uint64_t>(second->source->registerMask(c)), (static_cast<uint64_t>(secondValue->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(secondValue->source->registerMask(c))),
resultSize, resultSize,
&cTypeMask, cMask);
&cRegisterMask);
SiteMask resultLowMask(cTypeMask, cRegisterMask, AnyFrameIndex); SiteMask resultLowMask(cMask.typeMask, cMask.registerMask, AnyFrameIndex);
SiteMask resultHighMask(cTypeMask, cRegisterMask >> 32, AnyFrameIndex); SiteMask resultHighMask(cMask.typeMask, cMask.registerMask >> 32, AnyFrameIndex);
Site* low = getTarget(c, second, result, resultLowMask); Site* low = getTarget(c, secondValue, resultValue, resultLowMask);
unsigned lowSize = low->registerSize(c); unsigned lowSize = low->registerSize(c);
Site* high Site* high
= (resultSize > lowSize = (resultSize > lowSize
? getTarget(c, second->nextWord, result->nextWord, resultHighMask) ? getTarget(c, secondValue->nextWord, resultValue->nextWord, resultHighMask)
: low); : low);
// fprintf(stderr, "combine %p:%p and %p:%p into %p:%p\n", // fprintf(stderr, "combine %p:%p and %p:%p into %p:%p\n",
// first, first->nextWord, // firstValue, firstValue->nextWord,
// second, second->nextWord, // secondValue, secondValue->nextWord,
// result, result->nextWord); // resultValue, resultValue->nextWord);
apply(c, type, apply(c, type,
firstSize, first->source, first->nextWord->source, firstSize, firstValue->source, firstValue->nextWord->source,
secondSize, second->source, second->nextWord->source, secondSize, secondValue->source, secondValue->nextWord->source,
resultSize, low, high); resultSize, low, high);
thawSource(c, firstSize, first); thawSource(c, firstSize, firstValue);
for (Read* r = reads; r; r = r->eventNext) { for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value); popRead(c, this, r->value);
} }
low->thaw(c, second); low->thaw(c, secondValue);
if (resultSize > lowSize) { if (resultSize > lowSize) {
high->thaw(c, second->nextWord); high->thaw(c, secondValue->nextWord);
} }
if (live(c, result)) { if (live(c, resultValue)) {
result->addSite(c, low); resultValue->addSite(c, low);
if (resultSize > lowSize and live(c, result->nextWord)) { if (resultSize > lowSize and live(c, resultValue->nextWord)) {
result->nextWord->addSite(c, high); resultValue->nextWord->addSite(c, high);
} }
} }
} }
lir::TernaryOperation type; lir::TernaryOperation type;
unsigned firstSize; unsigned firstSize;
Value* first; Value* firstValue;
unsigned secondSize; unsigned secondSize;
Value* second; Value* secondValue;
unsigned resultSize; unsigned resultSize;
Value* result; Value* resultValue;
}; };
void void
appendCombine(Context* c, lir::TernaryOperation type, appendCombine(Context* c, lir::TernaryOperation type,
unsigned firstSize, Value* first, unsigned firstSize, Value* firstValue,
unsigned secondSize, Value* second, unsigned secondSize, Value* secondValue,
unsigned resultSize, Value* result) unsigned resultSize, Value* resultValue)
{ {
bool thunk; bool thunk;
uint8_t firstTypeMask; OperandMask firstMask;
uint64_t firstRegisterMask; OperandMask secondMask;
uint8_t secondTypeMask;
uint64_t secondRegisterMask;
c->arch->planSource(type, firstSize, &firstTypeMask, &firstRegisterMask, c->arch->planSource(type,
secondSize, &secondTypeMask, &secondRegisterMask, firstSize, firstMask,
resultSize, &thunk); secondSize, secondMask,
resultSize,
&thunk);
if (thunk) { if (thunk) {
Stack* oldStack = c->stack; Stack* oldStack = c->stack;
@ -924,8 +920,8 @@ appendCombine(Context* c, lir::TernaryOperation type,
unsigned stackSize = vm::ceilingDivide(secondSize, vm::TargetBytesPerWord) unsigned stackSize = vm::ceilingDivide(secondSize, vm::TargetBytesPerWord)
+ vm::ceilingDivide(firstSize, vm::TargetBytesPerWord); + vm::ceilingDivide(firstSize, vm::TargetBytesPerWord);
compiler::push(c, vm::ceilingDivide(secondSize, vm::TargetBytesPerWord), second); compiler::push(c, vm::ceilingDivide(secondSize, vm::TargetBytesPerWord), secondValue);
compiler::push(c, vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), first); compiler::push(c, vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), firstValue);
if (threadParameter) { if (threadParameter) {
++ stackSize; ++ stackSize;
@ -937,40 +933,40 @@ appendCombine(Context* c, lir::TernaryOperation type,
c->stack = oldStack; c->stack = oldStack;
appendCall appendCall
(c, value(c, lir::ValueGeneral, constantSite(c, handler)), 0, 0, result, (c, value(c, lir::ValueGeneral, constantSite(c, handler)), 0, 0, resultValue,
resultSize, argumentStack, stackSize, 0); resultSize, argumentStack, stackSize, 0);
} else { } else {
append append
(c, new(c->zone) (c, new(c->zone)
CombineEvent CombineEvent
(c, type, (c, type,
firstSize, first, firstSize, firstValue,
secondSize, second, secondSize, secondValue,
resultSize, result, resultSize, resultValue,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex), SiteMask(firstMask.typeMask, firstMask.registerMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex), SiteMask(firstMask.typeMask, firstMask.registerMask >> 32, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask, AnyFrameIndex), SiteMask(secondMask.typeMask, secondMask.registerMask, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask >> 32, AnyFrameIndex))); SiteMask(secondMask.typeMask, secondMask.registerMask >> 32, AnyFrameIndex)));
} }
} }
class TranslateEvent: public Event { class TranslateEvent: public Event {
public: public:
TranslateEvent(Context* c, lir::BinaryOperation type, unsigned valueSize, TranslateEvent(Context* c, lir::BinaryOperation type, unsigned valueSize,
Value* value, unsigned resultSize, Value* result, Value* value, unsigned resultSize, Value* resultValue,
const SiteMask& valueLowMask, const SiteMask& valueLowMask,
const SiteMask& valueHighMask): const SiteMask& valueHighMask):
Event(c), type(type), valueSize(valueSize), resultSize(resultSize), Event(c), type(type), valueSize(valueSize), resultSize(resultSize),
value(value), result(result) value(value), resultValue(resultValue)
{ {
bool condensed = c->arch->alwaysCondensed(type); bool condensed = c->arch->alwaysCondensed(type);
if (resultSize > vm::TargetBytesPerWord) { if (resultSize > vm::TargetBytesPerWord) {
result->grow(c); resultValue->grow(c);
} }
this->addReads(c, value, valueSize, valueLowMask, condensed ? result : 0, this->addReads(c, value, valueSize, valueLowMask, condensed ? resultValue : 0,
valueHighMask, condensed ? result->nextWord : 0); valueHighMask, condensed ? resultValue->nextWord : 0);
} }
virtual const char* name() { virtual const char* name() {
@ -980,27 +976,26 @@ class TranslateEvent: public Event {
virtual void compile(Context* c) { virtual void compile(Context* c) {
assert(c, value->source->type(c) == value->nextWord->source->type(c)); assert(c, value->source->type(c) == value->nextWord->source->type(c));
uint8_t bTypeMask; OperandMask bMask;
uint64_t bRegisterMask;
c->arch->planDestination c->arch->planDestination
(type, (type,
valueSize, valueSize,
OperandMask(
1 << value->source->type(c), 1 << value->source->type(c),
(static_cast<uint64_t>(value->nextWord->source->registerMask(c)) << 32) (static_cast<uint64_t>(value->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(value->source->registerMask(c)), | static_cast<uint64_t>(value->source->registerMask(c))),
resultSize, resultSize,
&bTypeMask, bMask);
&bRegisterMask);
SiteMask resultLowMask(bTypeMask, bRegisterMask, AnyFrameIndex); SiteMask resultLowMask(bMask.typeMask, bMask.registerMask, AnyFrameIndex);
SiteMask resultHighMask(bTypeMask, bRegisterMask >> 32, AnyFrameIndex); SiteMask resultHighMask(bMask.typeMask, bMask.registerMask >> 32, AnyFrameIndex);
Site* low = getTarget(c, value, result, resultLowMask); Site* low = getTarget(c, value, resultValue, resultLowMask);
unsigned lowSize = low->registerSize(c); unsigned lowSize = low->registerSize(c);
Site* high Site* high
= (resultSize > lowSize = (resultSize > lowSize
? getTarget(c, value->nextWord, result->nextWord, resultHighMask) ? getTarget(c, value->nextWord, resultValue->nextWord, resultHighMask)
: low); : low);
apply(c, type, valueSize, value->source, value->nextWord->source, apply(c, type, valueSize, value->source, value->nextWord->source,
@ -1015,10 +1010,10 @@ class TranslateEvent: public Event {
high->thaw(c, value->nextWord); high->thaw(c, value->nextWord);
} }
if (live(c, result)) { if (live(c, resultValue)) {
result->addSite(c, low); resultValue->addSite(c, low);
if (resultSize > lowSize and live(c, result->nextWord)) { if (resultSize > lowSize and live(c, resultValue->nextWord)) {
result->nextWord->addSite(c, high); resultValue->nextWord->addSite(c, high);
} }
} }
} }
@ -1027,7 +1022,7 @@ class TranslateEvent: public Event {
unsigned valueSize; unsigned valueSize;
unsigned resultSize; unsigned resultSize;
Value* value; Value* value;
Value* result; Value* resultValue;
Read* resultRead; Read* resultRead;
SiteMask resultLowMask; SiteMask resultLowMask;
SiteMask resultHighMask; SiteMask resultHighMask;
@ -1035,19 +1030,18 @@ class TranslateEvent: public Event {
void void
appendTranslate(Context* c, lir::BinaryOperation type, unsigned firstSize, appendTranslate(Context* c, lir::BinaryOperation type, unsigned firstSize,
Value* first, unsigned resultSize, Value* result) Value* firstValue, unsigned resultSize, Value* resultValue)
{ {
bool thunk; bool thunk;
uint8_t firstTypeMask; OperandMask first;
uint64_t firstRegisterMask;
c->arch->planSource(type, firstSize, &firstTypeMask, &firstRegisterMask, c->arch->planSource(type, firstSize, first,
resultSize, &thunk); resultSize, &thunk);
if (thunk) { if (thunk) {
Stack* oldStack = c->stack; Stack* oldStack = c->stack;
compiler::push(c, vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), first); compiler::push(c, vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), firstValue);
Stack* argumentStack = c->stack; Stack* argumentStack = c->stack;
c->stack = oldStack; c->stack = oldStack;
@ -1056,14 +1050,14 @@ appendTranslate(Context* c, lir::BinaryOperation type, unsigned firstSize,
(c, value (c, value
(c, lir::ValueGeneral, constantSite (c, lir::ValueGeneral, constantSite
(c, c->client->getThunk(type, firstSize, resultSize))), (c, c->client->getThunk(type, firstSize, resultSize))),
0, 0, result, resultSize, argumentStack, 0, 0, resultValue, resultSize, argumentStack,
vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), 0); vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), 0);
} else { } else {
append(c, new(c->zone) append(c, new(c->zone)
TranslateEvent TranslateEvent
(c, type, firstSize, first, resultSize, result, (c, type, firstSize, firstValue, resultSize, resultValue,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex), SiteMask(first.typeMask, first.registerMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex))); SiteMask(first.typeMask, first.registerMask >> 32, AnyFrameIndex)));
} }
} }
@ -1306,23 +1300,24 @@ thunkBranch(Context* c, lir::TernaryOperation type)
class BranchEvent: public Event { class BranchEvent: public Event {
public: public:
BranchEvent(Context* c, lir::TernaryOperation type, unsigned size, BranchEvent(Context* c, lir::TernaryOperation type, unsigned size,
Value* first, Value* second, Value* address, Value* firstValue, Value* secondValue, Value* addressValue,
const SiteMask& firstLowMask, const SiteMask& firstLowMask,
const SiteMask& firstHighMask, const SiteMask& firstHighMask,
const SiteMask& secondLowMask, const SiteMask& secondLowMask,
const SiteMask& secondHighMask): const SiteMask& secondHighMask):
Event(c), type(type), size(size), first(first), second(second), Event(c), type(type), size(size), firstValue(firstValue), secondValue(secondValue),
address(address) addressValue(addressValue)
{ {
this->addReads(c, first, size, firstLowMask, firstHighMask); this->addReads(c, firstValue, size, firstLowMask, firstHighMask);
this->addReads(c, second, size, secondLowMask, secondHighMask); this->addReads(c, secondValue, size, secondLowMask, secondHighMask);
uint8_t typeMask; OperandMask dstMask;
uint64_t registerMask; c->arch->planDestination(type,
c->arch->planDestination(type, size, 0, 0, size, 0, 0, vm::TargetBytesPerWord, size, OperandMask(0, 0),
&typeMask, &registerMask); size, OperandMask(0, 0),
vm::TargetBytesPerWord, dstMask);
this->addRead(c, address, SiteMask(typeMask, registerMask, AnyFrameIndex)); this->addRead(c, addressValue, SiteMask(dstMask.typeMask, dstMask.registerMask, AnyFrameIndex));
} }
virtual const char* name() { virtual const char* name() {
@ -1330,8 +1325,8 @@ class BranchEvent: public Event {
} }
virtual void compile(Context* c) { virtual void compile(Context* c) {
ConstantSite* firstConstant = findConstantSite(c, first); ConstantSite* firstConstant = findConstantSite(c, firstValue);
ConstantSite* secondConstant = findConstantSite(c, second); ConstantSite* secondConstant = findConstantSite(c, secondValue);
if (not this->isUnreachable()) { if (not this->isUnreachable()) {
if (firstConstant if (firstConstant
@ -1339,31 +1334,31 @@ class BranchEvent: public Event {
and firstConstant->value->resolved() and firstConstant->value->resolved()
and secondConstant->value->resolved()) and secondConstant->value->resolved())
{ {
int64_t firstValue = firstConstant->value->value(); int64_t firstConstVal = firstConstant->value->value();
int64_t secondValue = secondConstant->value->value(); int64_t secondConstVal = secondConstant->value->value();
if (size > vm::TargetBytesPerWord) { if (size > vm::TargetBytesPerWord) {
firstValue |= findConstantSite firstConstVal |= findConstantSite
(c, first->nextWord)->value->value() << 32; (c, firstValue->nextWord)->value->value() << 32;
secondValue |= findConstantSite secondConstVal |= findConstantSite
(c, second->nextWord)->value->value() << 32; (c, secondValue->nextWord)->value->value() << 32;
} }
if (shouldJump(c, type, size, firstValue, secondValue)) { if (shouldJump(c, type, size, firstConstVal, secondConstVal)) {
apply(c, lir::Jump, vm::TargetBytesPerWord, address->source, address->source); apply(c, lir::Jump, vm::TargetBytesPerWord, addressValue->source, addressValue->source);
} }
} else { } else {
freezeSource(c, size, first); freezeSource(c, size, firstValue);
freezeSource(c, size, second); freezeSource(c, size, secondValue);
freezeSource(c, vm::TargetBytesPerWord, address); freezeSource(c, vm::TargetBytesPerWord, addressValue);
apply(c, type, size, first->source, first->nextWord->source, apply(c, type, size, firstValue->source, firstValue->nextWord->source,
size, second->source, second->nextWord->source, size, secondValue->source, secondValue->nextWord->source,
vm::TargetBytesPerWord, address->source, address->source); vm::TargetBytesPerWord, addressValue->source, addressValue->source);
thawSource(c, vm::TargetBytesPerWord, address); thawSource(c, vm::TargetBytesPerWord, addressValue);
thawSource(c, size, second); thawSource(c, size, secondValue);
thawSource(c, size, first); thawSource(c, size, firstValue);
} }
} }
@ -1376,23 +1371,22 @@ class BranchEvent: public Event {
lir::TernaryOperation type; lir::TernaryOperation type;
unsigned size; unsigned size;
Value* first; Value* firstValue;
Value* second; Value* secondValue;
Value* address; Value* addressValue;
}; };
void void
appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first, appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* firstValue,
Value* second, Value* address) Value* secondValue, Value* addressValue)
{ {
bool thunk; bool thunk;
uint8_t firstTypeMask; OperandMask firstMask;
uint64_t firstRegisterMask; OperandMask secondMask;
uint8_t secondTypeMask;
uint64_t secondRegisterMask;
c->arch->planSource(type, size, &firstTypeMask, &firstRegisterMask, c->arch->planSource(type,
size, &secondTypeMask, &secondRegisterMask, size, firstMask,
size, secondMask,
vm::TargetBytesPerWord, &thunk); vm::TargetBytesPerWord, &thunk);
if (thunk) { if (thunk) {
@ -1404,8 +1398,8 @@ appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first
assert(c, not threadParameter); assert(c, not threadParameter);
compiler::push(c, vm::ceilingDivide(size, vm::TargetBytesPerWord), second); compiler::push(c, vm::ceilingDivide(size, vm::TargetBytesPerWord), secondValue);
compiler::push(c, vm::ceilingDivide(size, vm::TargetBytesPerWord), first); compiler::push(c, vm::ceilingDivide(size, vm::TargetBytesPerWord), firstValue);
Stack* argumentStack = c->stack; Stack* argumentStack = c->stack;
c->stack = oldStack; c->stack = oldStack;
@ -1418,16 +1412,16 @@ appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first
appendBranch(c, thunkBranch(c, type), 4, value appendBranch(c, thunkBranch(c, type), 4, value
(c, lir::ValueGeneral, constantSite(c, static_cast<int64_t>(0))), (c, lir::ValueGeneral, constantSite(c, static_cast<int64_t>(0))),
result, address); result, addressValue);
} else { } else {
append append
(c, new(c->zone) (c, new(c->zone)
BranchEvent BranchEvent
(c, type, size, first, second, address, (c, type, size, firstValue, secondValue, addressValue,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex), SiteMask(firstMask.typeMask, firstMask.registerMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex), SiteMask(firstMask.typeMask, firstMask.registerMask >> 32, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask, AnyFrameIndex), SiteMask(secondMask.typeMask, secondMask.registerMask, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask >> 32, AnyFrameIndex))); SiteMask(secondMask.typeMask, secondMask.registerMask >> 32, AnyFrameIndex)));
} }
} }
@ -1475,13 +1469,12 @@ class JumpEvent: public Event {
cleanLocals(cleanLocals) cleanLocals(cleanLocals)
{ {
bool thunk; bool thunk;
uint8_t typeMask; OperandMask mask;
uint64_t registerMask; c->arch->plan(type, vm::TargetBytesPerWord, mask, &thunk);
c->arch->plan(type, vm::TargetBytesPerWord, &typeMask, &registerMask, &thunk);
assert(c, not thunk); assert(c, not thunk);
this->addRead(c, address, SiteMask(typeMask, registerMask, AnyFrameIndex)); this->addRead(c, address, SiteMask(mask.typeMask, mask.registerMask, AnyFrameIndex));
} }
virtual const char* name() { virtual const char* name() {

View File

@ -40,6 +40,14 @@ class SiteMask {
return SiteMask(1 << lir::RegisterOperand, 1 << number, NoFrameIndex); return SiteMask(1 << lir::RegisterOperand, 1 << number, NoFrameIndex);
} }
static SiteMask lowPart(const OperandMask& mask) {
return SiteMask(mask.typeMask, mask.registerMask, AnyFrameIndex);
}
static SiteMask highPart(const OperandMask& mask) {
return SiteMask(mask.typeMask, mask.registerMask >> 32, AnyFrameIndex);
}
uint8_t typeMask; uint8_t typeMask;
uint32_t registerMask; uint32_t registerMask;
int frameIndex; int frameIndex;

View File

@ -2258,27 +2258,27 @@ class MyArchitecture: public Assembler::Architecture {
virtual void plan virtual void plan
(lir::UnaryOperation, (lir::UnaryOperation,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned, OperandMask& aMask,
bool* thunk) bool* thunk)
{ {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = ~static_cast<uint64_t>(0); aMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false; *thunk = false;
} }
virtual void planSource virtual void planSource
(lir::BinaryOperation op, (lir::BinaryOperation op,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned, OperandMask& aMask,
unsigned, bool* thunk) unsigned, bool* thunk)
{ {
*aTypeMask = ~0; aMask.typeMask = ~0;
*aRegisterMask = ~static_cast<uint64_t>(0); aMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false; *thunk = false;
switch (op) { switch (op) {
case lir::Negate: case lir::Negate:
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
break; break;
case lir::Absolute: case lir::Absolute:
@ -2298,15 +2298,15 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planDestination virtual void planDestination
(lir::BinaryOperation op, (lir::BinaryOperation op,
unsigned, uint8_t, uint64_t, unsigned, const OperandMask& aMask UNUSED,
unsigned, uint8_t* bTypeMask, uint64_t* bRegisterMask) unsigned, OperandMask& bMask)
{ {
*bTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*bRegisterMask = ~static_cast<uint64_t>(0); bMask.registerMask = ~static_cast<uint64_t>(0);
switch (op) { switch (op) {
case lir::Negate: case lir::Negate:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
break; break;
default: default:
@ -2315,35 +2315,35 @@ class MyArchitecture: public Assembler::Architecture {
} }
virtual void planMove virtual void planMove
(unsigned, uint8_t* srcTypeMask, uint64_t* srcRegisterMask, (unsigned, OperandMask& srcMask,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask, OperandMask& tmpMask,
uint8_t dstTypeMask, uint64_t) const OperandMask& dstMask)
{ {
*srcTypeMask = ~0; srcMask.typeMask = ~0;
*srcRegisterMask = ~static_cast<uint64_t>(0); srcMask.registerMask = ~static_cast<uint64_t>(0);
*tmpTypeMask = 0; tmpMask.typeMask = 0;
*tmpRegisterMask = 0; tmpMask.registerMask = 0;
if (dstTypeMask & (1 << lir::MemoryOperand)) { if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
// can't move directly from memory or constant to memory // can't move directly from memory or constant to memory
*srcTypeMask = 1 << lir::RegisterOperand; srcMask.typeMask = 1 << lir::RegisterOperand;
*tmpTypeMask = 1 << lir::RegisterOperand; tmpMask.typeMask = 1 << lir::RegisterOperand;
*tmpRegisterMask = ~static_cast<uint64_t>(0); tmpMask.registerMask = ~static_cast<uint64_t>(0);
} }
} }
virtual void planSource virtual void planSource
(lir::TernaryOperation op, (lir::TernaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned aSize, OperandMask& aMask,
unsigned, uint8_t* bTypeMask, uint64_t* bRegisterMask, unsigned, OperandMask& bMask,
unsigned, bool* thunk) unsigned, bool* thunk)
{ {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = ~static_cast<uint64_t>(0); aMask.registerMask = ~static_cast<uint64_t>(0);
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = ~static_cast<uint64_t>(0); bMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false; *thunk = false;
@ -2351,12 +2351,12 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Add: case lir::Add:
case lir::Subtract: case lir::Subtract:
if (aSize == 8) { if (aSize == 8) {
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
} }
break; break;
case lir::Multiply: case lir::Multiply:
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
break; break;
case lir::Divide: case lir::Divide:
@ -2370,7 +2370,7 @@ class MyArchitecture: public Assembler::Architecture {
if (true) {//if (TargetBytesPerWord == 4 and aSize == 8) { if (true) {//if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true; *thunk = true;
} else { } else {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
} }
break; break;
@ -2399,16 +2399,16 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planDestination virtual void planDestination
(lir::TernaryOperation op, (lir::TernaryOperation op,
unsigned, uint8_t, uint64_t, unsigned, const OperandMask& aMask UNUSED,
unsigned, uint8_t, const uint64_t, unsigned, const OperandMask& bMask UNUSED,
unsigned, uint8_t* cTypeMask, uint64_t* cRegisterMask) unsigned, OperandMask& cMask)
{ {
if (isBranch(op)) { if (isBranch(op)) {
*cTypeMask = (1 << lir::ConstantOperand); cMask.typeMask = (1 << lir::ConstantOperand);
*cRegisterMask = 0; cMask.registerMask = 0;
} else { } else {
*cTypeMask = (1 << lir::RegisterOperand); cMask.typeMask = (1 << lir::RegisterOperand);
*cRegisterMask = ~static_cast<uint64_t>(0); cMask.registerMask = ~static_cast<uint64_t>(0);
} }
} }

View File

@ -2992,37 +2992,34 @@ class MyArchitecture: public Assembler::Architecture {
virtual void plan virtual void plan
(lir::UnaryOperation, (lir::UnaryOperation,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned, OperandMask& aMask,
bool* thunk) bool* thunk)
{ {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand) aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand) | (1 << lir::ConstantOperand);
| (1 << lir::ConstantOperand);
*aRegisterMask = ~static_cast<uint64_t>(0);
*thunk = false; *thunk = false;
} }
virtual void planSource virtual void planSource
(lir::BinaryOperation op, (lir::BinaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask, unsigned aSize, OperandMask& aMask,
unsigned bSize, bool* thunk) unsigned bSize, bool* thunk)
{ {
*aTypeMask = ~0; aMask.registerMask = GeneralRegisterMask |
*aRegisterMask = GeneralRegisterMask |
(static_cast<uint64_t>(GeneralRegisterMask) << 32); (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*thunk = false; *thunk = false;
switch (op) { switch (op) {
case lir::Negate: case lir::Negate:
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = (static_cast<uint64_t>(1) << (rdx + 32)) aMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32))
| (static_cast<uint64_t>(1) << rax); | (static_cast<uint64_t>(1) << rax);
break; break;
case lir::Absolute: case lir::Absolute:
if (aSize <= TargetBytesPerWord) { if (aSize <= TargetBytesPerWord) {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = (static_cast<uint64_t>(1) << rax); aMask.registerMask = (static_cast<uint64_t>(1) << rax);
} else { } else {
*thunk = true; *thunk = true;
} }
@ -3030,8 +3027,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatAbsolute: case lir::FloatAbsolute:
if (useSSE(&c)) { if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask; | FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
@ -3041,8 +3038,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatNegate: case lir::FloatNegate:
// floatNegateRR does not support doubles // floatNegateRR does not support doubles
if (useSSE(&c) and aSize == 4 and bSize == 4) { if (useSSE(&c) and aSize == 4 and bSize == 4) {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = FloatRegisterMask; aMask.registerMask = FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -3050,8 +3047,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatSquareRoot: case lir::FloatSquareRoot:
if (useSSE(&c)) { if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask; | FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
@ -3060,8 +3057,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Float2Float: case lir::Float2Float:
if (useSSE(&c)) { if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask; | FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
@ -3074,8 +3071,8 @@ class MyArchitecture: public Assembler::Architecture {
// thunks or produce inline machine code which handles edge // thunks or produce inline machine code which handles edge
// cases properly. // cases properly.
if (false and useSSE(&c) and bSize <= TargetBytesPerWord) { if (false and useSSE(&c) and bSize <= TargetBytesPerWord) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask; | FloatRegisterMask;
} else { } else {
*thunk = true; *thunk = true;
@ -3084,8 +3081,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Int2Float: case lir::Int2Float:
if (useSSE(&c) and aSize <= TargetBytesPerWord) { if (useSSE(&c) and aSize <= TargetBytesPerWord) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*aRegisterMask = GeneralRegisterMask aMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32); | (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} else { } else {
*thunk = true; *thunk = true;
@ -3093,20 +3090,20 @@ class MyArchitecture: public Assembler::Architecture {
break; break;
case lir::Move: case lir::Move:
*aTypeMask = ~0; aMask.typeMask = ~0;
*aRegisterMask = ~static_cast<uint64_t>(0); aMask.registerMask = ~static_cast<uint64_t>(0);
if (TargetBytesPerWord == 4) { if (TargetBytesPerWord == 4) {
if (aSize == 4 and bSize == 8) { if (aSize == 4 and bSize == 8) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
const uint32_t mask const uint32_t mask
= GeneralRegisterMask & ~((1 << rax) | (1 << rdx)); = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
*aRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask; aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} else if (aSize == 1 or bSize == 1) { } else if (aSize == 1 or bSize == 1) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
const uint32_t mask const uint32_t mask
= (1 << rax) | (1 << rcx) | (1 << rdx) | (1 << rbx); = (1 << rax) | (1 << rcx) | (1 << rdx) | (1 << rbx);
*aRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask; aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} }
} }
break; break;
@ -3117,69 +3114,69 @@ class MyArchitecture: public Assembler::Architecture {
} }
virtual void planDestination virtual void planDestination
(lir::BinaryOperation op, unsigned aSize, uint8_t aTypeMask, (lir::BinaryOperation op,
uint64_t aRegisterMask, unsigned bSize, uint8_t* bTypeMask, unsigned aSize, const OperandMask& aMask,
uint64_t* bRegisterMask) unsigned bSize, OperandMask& bMask)
{ {
*bTypeMask = ~0; bMask.typeMask = ~0;
*bRegisterMask = GeneralRegisterMask bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32); | (static_cast<uint64_t>(GeneralRegisterMask) << 32);
switch (op) { switch (op) {
case lir::Absolute: case lir::Absolute:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = (static_cast<uint64_t>(1) << rax); bMask.registerMask = (static_cast<uint64_t>(1) << rax);
break; break;
case lir::FloatAbsolute: case lir::FloatAbsolute:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = aRegisterMask; bMask.registerMask = aMask.registerMask;
break; break;
case lir::Negate: case lir::Negate:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = aRegisterMask; bMask.registerMask = aMask.registerMask;
break; break;
case lir::FloatNegate: case lir::FloatNegate:
case lir::FloatSquareRoot: case lir::FloatSquareRoot:
case lir::Float2Float: case lir::Float2Float:
case lir::Int2Float: case lir::Int2Float:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) bMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask; | FloatRegisterMask;
break; break;
case lir::Float2Int: case lir::Float2Int:
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
break; break;
case lir::Move: case lir::Move:
if (aTypeMask & ((1 << lir::MemoryOperand) | 1 << lir::AddressOperand)) { if (aMask.typeMask & ((1 << lir::MemoryOperand) | 1 << lir::AddressOperand)) {
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GeneralRegisterMask bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32) | (static_cast<uint64_t>(GeneralRegisterMask) << 32)
| FloatRegisterMask; | FloatRegisterMask;
} else if (aTypeMask & (1 << lir::RegisterOperand)) { } else if (aMask.typeMask & (1 << lir::RegisterOperand)) {
*bTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
if (aRegisterMask & FloatRegisterMask) { if (aMask.registerMask & FloatRegisterMask) {
*bRegisterMask = FloatRegisterMask; bMask.registerMask = FloatRegisterMask;
} else { } else {
*bRegisterMask = GeneralRegisterMask bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32); | (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} }
} else { } else {
*bTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
} }
if (TargetBytesPerWord == 4) { if (TargetBytesPerWord == 4) {
if (aSize == 4 and bSize == 8) { if (aSize == 4 and bSize == 8) {
*bRegisterMask = (static_cast<uint64_t>(1) << (rdx + 32)) bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32))
| (static_cast<uint64_t>(1) << rax); | (static_cast<uint64_t>(1) << rax);
} else if (aSize == 1 or bSize == 1) { } else if (aSize == 1 or bSize == 1) {
const uint32_t mask const uint32_t mask
= (1 << rax) | (1 << rcx) | (1 << rdx) | (1 << rbx); = (1 << rax) | (1 << rcx) | (1 << rdx) | (1 << rbx);
*bRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask; bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} }
} }
break; break;
@ -3190,44 +3187,44 @@ class MyArchitecture: public Assembler::Architecture {
} }
virtual void planMove virtual void planMove
(unsigned size, uint8_t* srcTypeMask, uint64_t* srcRegisterMask, (unsigned size, OperandMask& srcMask,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask, OperandMask& tmpMask,
uint8_t dstTypeMask, uint64_t dstRegisterMask) const OperandMask& dstMask)
{ {
*srcTypeMask = ~0; srcMask.typeMask = ~0;
*srcRegisterMask = ~static_cast<uint64_t>(0); srcMask.registerMask = ~static_cast<uint64_t>(0);
*tmpTypeMask = 0; tmpMask.typeMask = 0;
*tmpRegisterMask = 0; tmpMask.registerMask = 0;
if (dstTypeMask & (1 << lir::MemoryOperand)) { if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
// can't move directly from memory to memory // can't move directly from memory to memory
*srcTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); srcMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*tmpTypeMask = 1 << lir::RegisterOperand; tmpMask.typeMask = 1 << lir::RegisterOperand;
*tmpRegisterMask = GeneralRegisterMask tmpMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32); | (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} else if (dstTypeMask & (1 << lir::RegisterOperand)) { } else if (dstMask.typeMask & (1 << lir::RegisterOperand)) {
if (size > TargetBytesPerWord) { if (size > TargetBytesPerWord) {
// can't move directly from FPR to GPR or vice-versa for // can't move directly from FPR to GPR or vice-versa for
// values larger than the GPR size // values larger than the GPR size
if (dstRegisterMask & FloatRegisterMask) { if (dstMask.registerMask & FloatRegisterMask) {
*srcRegisterMask = FloatRegisterMask srcMask.registerMask = FloatRegisterMask
| (static_cast<uint64_t>(FloatRegisterMask) << 32); | (static_cast<uint64_t>(FloatRegisterMask) << 32);
*tmpTypeMask = 1 << lir::MemoryOperand; tmpMask.typeMask = 1 << lir::MemoryOperand;
} else if (dstRegisterMask & GeneralRegisterMask) { } else if (dstMask.registerMask & GeneralRegisterMask) {
*srcRegisterMask = GeneralRegisterMask srcMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32); | (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*tmpTypeMask = 1 << lir::MemoryOperand; tmpMask.typeMask = 1 << lir::MemoryOperand;
} }
} }
if (dstRegisterMask & FloatRegisterMask) { if (dstMask.registerMask & FloatRegisterMask) {
// can't move directly from constant to FPR // can't move directly from constant to FPR
*srcTypeMask &= ~(1 << lir::ConstantOperand); srcMask.typeMask &= ~(1 << lir::ConstantOperand);
if (size > TargetBytesPerWord) { if (size > TargetBytesPerWord) {
*tmpTypeMask = 1 << lir::MemoryOperand; tmpMask.typeMask = 1 << lir::MemoryOperand;
} else { } else {
*tmpTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); tmpMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*tmpRegisterMask = GeneralRegisterMask tmpMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32); | (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} }
} }
@ -3236,16 +3233,16 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planSource virtual void planSource
(lir::TernaryOperation op, (lir::TernaryOperation op,
unsigned aSize, uint8_t *aTypeMask, uint64_t *aRegisterMask, unsigned aSize, OperandMask& aMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask, unsigned bSize, OperandMask& bMask,
unsigned, bool* thunk) unsigned, bool* thunk)
{ {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = GeneralRegisterMask aMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32); | (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GeneralRegisterMask bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32); | (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*thunk = false; *thunk = false;
@ -3256,14 +3253,14 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatMultiply: case lir::FloatMultiply:
case lir::FloatDivide: case lir::FloatDivide:
if (useSSE(&c)) { if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*bTypeMask = (1 << lir::RegisterOperand); bMask.typeMask = (1 << lir::RegisterOperand);
const uint64_t mask const uint64_t mask
= (static_cast<uint64_t>(FloatRegisterMask) << 32) = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask; | FloatRegisterMask;
*aRegisterMask = mask; aMask.registerMask = mask;
*bRegisterMask = mask; bMask.registerMask = mask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -3276,11 +3273,11 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Multiply: case lir::Multiply:
if (TargetBytesPerWord == 4 and aSize == 8) { if (TargetBytesPerWord == 4 and aSize == 8) {
const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx)); const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
*aRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask; aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
*bRegisterMask = (static_cast<uint64_t>(1) << (rdx + 32)) | mask; bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32)) | mask;
} else { } else {
*aRegisterMask = GeneralRegisterMask; aMask.registerMask = GeneralRegisterMask;
*bRegisterMask = GeneralRegisterMask; bMask.registerMask = GeneralRegisterMask;
} }
break; break;
@ -3288,9 +3285,9 @@ class MyArchitecture: public Assembler::Architecture {
if (TargetBytesPerWord == 4 and aSize == 8) { if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true; *thunk = true;
} else { } else {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx)); aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
*bRegisterMask = 1 << rax; bMask.registerMask = 1 << rax;
} }
break; break;
@ -3298,9 +3295,9 @@ class MyArchitecture: public Assembler::Architecture {
if (TargetBytesPerWord == 4 and aSize == 8) { if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true; *thunk = true;
} else { } else {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx)); aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
*bRegisterMask = 1 << rax; bMask.registerMask = 1 << rax;
} }
break; break;
@ -3309,13 +3306,13 @@ class MyArchitecture: public Assembler::Architecture {
case lir::UnsignedShiftRight: { case lir::UnsignedShiftRight: {
if (TargetBytesPerWord == 4 and bSize == 8) { if (TargetBytesPerWord == 4 and bSize == 8) {
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx); const uint32_t mask = GeneralRegisterMask & ~(1 << rcx);
*aRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask; aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
*bRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask; bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} else { } else {
*aRegisterMask = (static_cast<uint64_t>(GeneralRegisterMask) << 32) aMask.registerMask = (static_cast<uint64_t>(GeneralRegisterMask) << 32)
| (static_cast<uint64_t>(1) << rcx); | (static_cast<uint64_t>(1) << rcx);
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx); const uint32_t mask = GeneralRegisterMask & ~(1 << rcx);
*bRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask; bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} }
} break; } break;
@ -3330,11 +3327,11 @@ class MyArchitecture: public Assembler::Architecture {
case lir::JumpIfFloatLessOrEqualOrUnordered: case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered: case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (useSSE(&c)) { if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand); aMask.typeMask = (1 << lir::RegisterOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32) aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask; | FloatRegisterMask;
*bTypeMask = *aTypeMask; bMask.typeMask = aMask.typeMask;
*bRegisterMask = *aRegisterMask; bMask.registerMask = aMask.registerMask;
} else { } else {
*thunk = true; *thunk = true;
} }
@ -3346,16 +3343,17 @@ class MyArchitecture: public Assembler::Architecture {
} }
virtual void planDestination virtual void planDestination
(lir::TernaryOperation op, unsigned, uint8_t, uint64_t, unsigned, uint8_t, (lir::TernaryOperation op,
uint64_t bRegisterMask, unsigned, uint8_t* cTypeMask, unsigned, const OperandMask&,
uint64_t* cRegisterMask) unsigned, const OperandMask& bMask,
unsigned, OperandMask& cMask)
{ {
if (isBranch(op)) { if (isBranch(op)) {
*cTypeMask = (1 << lir::ConstantOperand); cMask.typeMask = (1 << lir::ConstantOperand);
*cRegisterMask = 0; cMask.registerMask = 0;
} else { } else {
*cTypeMask = (1 << lir::RegisterOperand); cMask.typeMask = (1 << lir::RegisterOperand);
*cRegisterMask = bRegisterMask; cMask.registerMask = bMask.registerMask;
} }
} }

View File

@ -84,12 +84,11 @@ public:
for(int op = (int)lir::Call; op < (int)lir::AlignedJump; op++) { for(int op = (int)lir::Call; op < (int)lir::AlignedJump; op++) {
bool thunk; bool thunk;
uint8_t typeMask; OperandMask mask;
uint64_t registerMask; env.arch->plan((lir::UnaryOperation)op, vm::TargetBytesPerWord, mask, &thunk);
env.arch->plan((lir::UnaryOperation)op, vm::TargetBytesPerWord, &typeMask, &registerMask, &thunk);
assertFalse(thunk); assertFalse(thunk);
assertNotEqual(static_cast<uint8_t>(0), typeMask); assertNotEqual(static_cast<uint8_t>(0), mask.typeMask);
assertNotEqual(static_cast<uint64_t>(0), registerMask); assertNotEqual(static_cast<uint64_t>(0), mask.registerMask);
} }
} }