group typeMask and registerMask into OperandMask, for Architecture::plan

This commit is contained in:
Joshua Warner 2013-02-15 20:04:30 -07:00 committed by Joshua Warner
parent 5a5b9248e6
commit 4462b87f10
8 changed files with 529 additions and 523 deletions

View File

@ -2254,27 +2254,27 @@ class MyArchitecture: public Assembler::Architecture {
virtual void plan
(lir::UnaryOperation,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned, OperandMask& aMask,
bool* thunk)
{
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = ~static_cast<uint64_t>(0);
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false;
}
virtual void planSource
(lir::BinaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned aSize, OperandMask& aMask,
unsigned bSize, bool* thunk)
{
*thunk = false;
*aTypeMask = ~0;
*aRegisterMask = GPR_MASK64;
aMask.typeMask = ~0;
aMask.registerMask = GPR_MASK64;
switch (op) {
case lir::Negate:
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = GPR_MASK64;
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GPR_MASK64;
break;
case lir::Absolute:
@ -2286,8 +2286,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatNegate:
case lir::Float2Float:
if (vfpSupported()) {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = FPR_MASK64;
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = FPR_MASK64;
} else {
*thunk = true;
}
@ -2299,8 +2299,8 @@ class MyArchitecture: public Assembler::Architecture {
// thunks or produce inline machine code which handles edge
// cases properly.
if (false && vfpSupported() && bSize == 4) {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = FPR_MASK64;
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = FPR_MASK64;
} else {
*thunk = true;
}
@ -2308,8 +2308,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Int2Float:
if (vfpSupported() && aSize == 4) {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = GPR_MASK64;
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GPR_MASK64;
} else {
*thunk = true;
}
@ -2322,16 +2322,16 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planDestination
(lir::BinaryOperation op,
unsigned, uint8_t aTypeMask, uint64_t,
unsigned , uint8_t* bTypeMask, uint64_t* bRegisterMask)
unsigned, const OperandMask& aMask,
unsigned, OperandMask& bMask)
{
*bTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*bRegisterMask = GPR_MASK64;
bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
bMask.registerMask = GPR_MASK64;
switch (op) {
case lir::Negate:
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GPR_MASK64;
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
break;
case lir::FloatAbsolute:
@ -2339,18 +2339,18 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatNegate:
case lir::Float2Float:
case lir::Int2Float:
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = FPR_MASK64;
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = FPR_MASK64;
break;
case lir::Float2Int:
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GPR_MASK64;
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
break;
case lir::Move:
if (!(aTypeMask & 1 << lir::RegisterOperand)) {
*bTypeMask = 1 << lir::RegisterOperand;
if (!(aMask.typeMask & 1 << lir::RegisterOperand)) {
bMask.typeMask = 1 << lir::RegisterOperand;
}
break;
@ -2360,41 +2360,41 @@ class MyArchitecture: public Assembler::Architecture {
}
virtual void planMove
(unsigned, uint8_t* srcTypeMask, uint64_t* srcRegisterMask,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask,
uint8_t dstTypeMask, uint64_t dstRegisterMask)
(unsigned, OperandMask& srcMask,
OperandMask& tmpMask,
const OperandMask& dstMask)
{
*srcTypeMask = ~0;
*srcRegisterMask = ~static_cast<uint64_t>(0);
srcMask.typeMask = ~0;
srcMask.registerMask = ~static_cast<uint64_t>(0);
*tmpTypeMask = 0;
*tmpRegisterMask = 0;
tmpMask.typeMask = 0;
tmpMask.registerMask = 0;
if (dstTypeMask & (1 << lir::MemoryOperand)) {
if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
// can't move directly from memory or constant to memory
*srcTypeMask = 1 << lir::RegisterOperand;
*tmpTypeMask = 1 << lir::RegisterOperand;
*tmpRegisterMask = GPR_MASK64;
srcMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.registerMask = GPR_MASK64;
} else if (vfpSupported() &&
dstTypeMask & 1 << lir::RegisterOperand &&
dstRegisterMask & FPR_MASK) {
*srcTypeMask = *tmpTypeMask = 1 << lir::RegisterOperand |
dstMask.typeMask & 1 << lir::RegisterOperand &&
dstMask.registerMask & FPR_MASK) {
srcMask.typeMask = tmpMask.typeMask = 1 << lir::RegisterOperand |
1 << lir::MemoryOperand;
*tmpRegisterMask = ~static_cast<uint64_t>(0);
tmpMask.registerMask = ~static_cast<uint64_t>(0);
}
}
virtual void planSource
(lir::TernaryOperation op,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask,
unsigned, OperandMask& aMask,
unsigned bSize, OperandMask& bMask,
unsigned, bool* thunk)
{
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = GPR_MASK64;
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = GPR_MASK64;
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GPR_MASK64;
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GPR_MASK64;
*thunk = false;
@ -2402,7 +2402,7 @@ class MyArchitecture: public Assembler::Architecture {
case lir::ShiftLeft:
case lir::ShiftRight:
case lir::UnsignedShiftRight:
if (bSize == 8) *aTypeMask = *bTypeMask = (1 << lir::RegisterOperand);
if (bSize == 8) aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
break;
case lir::Add:
@ -2410,7 +2410,7 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Or:
case lir::Xor:
case lir::Multiply:
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand);
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
break;
case lir::Divide:
@ -2424,8 +2424,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatMultiply:
case lir::FloatDivide:
if (vfpSupported()) {
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = *bRegisterMask = FPR_MASK64;
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = bMask.registerMask = FPR_MASK64;
} else {
*thunk = true;
}
@ -2442,8 +2442,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (vfpSupported()) {
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = *bRegisterMask = FPR_MASK64;
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = bMask.registerMask = FPR_MASK64;
} else {
*thunk = true;
}
@ -2456,16 +2456,16 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planDestination
(lir::TernaryOperation op,
unsigned, uint8_t, uint64_t,
unsigned, uint8_t, const uint64_t bRegisterMask,
unsigned, uint8_t* cTypeMask, uint64_t* cRegisterMask)
unsigned, const OperandMask& aMask UNUSED,
unsigned, const OperandMask& bMask,
unsigned, OperandMask& cMask)
{
if (isBranch(op)) {
*cTypeMask = (1 << lir::ConstantOperand);
*cRegisterMask = 0;
cMask.typeMask = (1 << lir::ConstantOperand);
cMask.registerMask = 0;
} else {
*cTypeMask = (1 << lir::RegisterOperand);
*cRegisterMask = bRegisterMask;
cMask.typeMask = (1 << lir::RegisterOperand);
cMask.registerMask = bMask.registerMask;
}
}

View File

@ -35,6 +35,22 @@ public:
{ }
};
class OperandMask {
public:
uint8_t typeMask;
uint64_t registerMask;
OperandMask(uint8_t typeMask, uint64_t registerMask):
typeMask(typeMask),
registerMask(registerMask)
{ }
OperandMask():
typeMask(~0),
registerMask(~static_cast<uint64_t>(0))
{ }
};
#ifdef AVIAN_TAILS
const bool TailCalls = true;
#else
@ -120,35 +136,35 @@ class Assembler {
virtual void plan
(lir::UnaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned aSize, OperandMask& aMask,
bool* thunk) = 0;
virtual void planSource
(lir::BinaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned aSize, OperandMask& aMask,
unsigned bSize, bool* thunk) = 0;
virtual void planDestination
(lir::BinaryOperation op,
unsigned aSize, uint8_t aTypeMask, uint64_t aRegisterMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask) = 0;
unsigned aSize, const OperandMask& aMask,
unsigned bSize, OperandMask& bMask) = 0;
virtual void planMove
(unsigned size, uint8_t* srcTypeMask, uint64_t* srcRegisterMask,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask,
uint8_t dstTypeMask, uint64_t dstRegisterMask) = 0;
(unsigned size, OperandMask& src,
OperandMask& tmp,
const OperandMask& dst) = 0;
virtual void planSource
(lir::TernaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask,
unsigned aSize, OperandMask& aMask,
unsigned bSize, OperandMask& bMask,
unsigned cSize, bool* thunk) = 0;
virtual void planDestination
(lir::TernaryOperation op,
unsigned aSize, uint8_t aTypeMask, uint64_t aRegisterMask,
unsigned bSize, uint8_t bTypeMask, uint64_t bRegisterMask,
unsigned cSize, uint8_t* cTypeMask, uint64_t* cRegisterMask) = 0;
unsigned aSize, const OperandMask& aMask,
unsigned bSize, const OperandMask& bMask,
unsigned cSize, OperandMask& cMask) = 0;
virtual Assembler* makeAssembler(vm::Allocator*, vm::Zone*) = 0;

View File

@ -334,16 +334,13 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
virtual unsigned cost(Context* c, SiteMask dstMask)
{
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
uint8_t tmpTypeMask;
uint64_t tmpRegisterMask;
OperandMask src;
OperandMask tmp;
c->arch->planMove
(size, &srcTypeMask, &srcRegisterMask,
&tmpTypeMask, &tmpRegisterMask,
dstMask.typeMask, dstMask.registerMask);
(size, src, tmp,
OperandMask(dstMask.typeMask, dstMask.registerMask));
SiteMask srcMask(srcTypeMask, srcRegisterMask, AnyFrameIndex);
SiteMask srcMask(src.typeMask, src.registerMask, AnyFrameIndex);
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
Site* s = it.next();
if (s->match(c, srcMask) or s->match(c, dstMask)) {
@ -359,26 +356,23 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
bool includeNextWord;
} costCalculator(value, size, includeNextWord);
Site* dst = pickTargetSite
Site* dstSite = pickTargetSite
(c, read, intersectRead, registerReserveCount, &costCalculator);
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
uint8_t tmpTypeMask;
uint64_t tmpRegisterMask;
OperandMask src;
OperandMask tmp;
c->arch->planMove
(size, &srcTypeMask, &srcRegisterMask,
&tmpTypeMask, &tmpRegisterMask,
1 << dst->type(c), dst->registerMask(c));
(size, src, tmp,
OperandMask(1 << dstSite->type(c), dstSite->registerMask(c)));
SiteMask srcMask(srcTypeMask, srcRegisterMask, AnyFrameIndex);
SiteMask srcMask(src.typeMask, src.registerMask, AnyFrameIndex);
unsigned cost = 0xFFFFFFFF;
Site* src = 0;
Site* srcSite = 0;
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
Site* s = it.next();
unsigned v = s->copyCost(c, dst);
unsigned v = s->copyCost(c, dstSite);
if (v == 0) {
src = s;
srcSite = s;
cost = 0;
break;
}
@ -386,50 +380,50 @@ maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
v += CopyPenalty;
}
if (v < cost) {
src = s;
srcSite = s;
cost = v;
}
}
if (cost) {
if (DebugMoves) {
char srcb[256]; src->toString(c, srcb, 256);
char dstb[256]; dst->toString(c, dstb, 256);
char srcb[256]; srcSite->toString(c, srcb, 256);
char dstb[256]; dstSite->toString(c, dstb, 256);
fprintf(stderr, "maybe move %s to %s for %p to %p\n",
srcb, dstb, value, value);
}
src->freeze(c, value);
srcSite->freeze(c, value);
value->addSite(c, dst);
value->addSite(c, dstSite);
src->thaw(c, value);
srcSite->thaw(c, value);
if (not src->match(c, srcMask)) {
src->freeze(c, value);
dst->freeze(c, value);
if (not srcSite->match(c, srcMask)) {
srcSite->freeze(c, value);
dstSite->freeze(c, value);
SiteMask tmpMask(tmpTypeMask, tmpRegisterMask, AnyFrameIndex);
SiteMask tmpMask(tmp.typeMask, tmp.registerMask, AnyFrameIndex);
SingleRead tmpRead(tmpMask, 0);
tmpRead.value = value;
tmpRead.successor_ = value;
Site* tmp = pickTargetSite(c, &tmpRead, true);
Site* tmpSite = pickTargetSite(c, &tmpRead, true);
value->addSite(c, tmp);
value->addSite(c, tmpSite);
move(c, value, src, tmp);
move(c, value, srcSite, tmpSite);
dst->thaw(c, value);
src->thaw(c, value);
dstSite->thaw(c, value);
srcSite->thaw(c, value);
src = tmp;
srcSite = tmpSite;
}
move(c, value, src, dst);
move(c, value, srcSite, dstSite);
}
return dst;
return dstSite;
}
Site*
@ -757,145 +751,143 @@ saveLocals(Context* c, Event* e)
void
maybeMove(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst,
unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue,
const SiteMask& dstMask)
{
Read* read = live(c, dst);
Read* read = live(c, dstValue);
bool isStore = read == 0;
Site* target;
if (dst->target) {
target = dst->target;
if (dstValue->target) {
target = dstValue->target;
} else if (isStore) {
return;
} else {
target = pickTargetSite(c, read);
}
unsigned cost = src->source->copyCost(c, target);
unsigned cost = srcValue->source->copyCost(c, target);
if (srcSelectSize < dstSize) cost = 1;
if (cost) {
// todo: let c->arch->planMove decide this:
bool useTemporary = ((target->type(c) == lir::MemoryOperand
and src->source->type(c) == lir::MemoryOperand)
and srcValue->source->type(c) == lir::MemoryOperand)
or (srcSelectSize < dstSize
and target->type(c) != lir::RegisterOperand));
src->source->freeze(c, src);
srcValue->source->freeze(c, srcValue);
dst->addSite(c, target);
dstValue->addSite(c, target);
src->source->thaw(c, src);
srcValue->source->thaw(c, srcValue);
bool addOffset = srcSize != srcSelectSize
and c->arch->bigEndian()
and src->source->type(c) == lir::MemoryOperand;
and srcValue->source->type(c) == lir::MemoryOperand;
if (addOffset) {
static_cast<MemorySite*>(src->source)->offset
static_cast<MemorySite*>(srcValue->source)->offset
+= (srcSize - srcSelectSize);
}
target->freeze(c, dst);
target->freeze(c, dstValue);
if (target->match(c, dstMask) and not useTemporary) {
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
srcb, dstb, srcValue, dstValue);
}
src->source->freeze(c, src);
srcValue->source->freeze(c, srcValue);
apply(c, type, min(srcSelectSize, dstSize), src->source, src->source,
apply(c, type, min(srcSelectSize, dstSize), srcValue->source, srcValue->source,
dstSize, target, target);
src->source->thaw(c, src);
srcValue->source->thaw(c, srcValue);
} else {
// pick a temporary register which is valid as both a
// destination and a source for the moves we need to perform:
dst->removeSite(c, target);
dstValue->removeSite(c, target);
bool thunk;
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
OperandMask src;
c->arch->planSource(type, dstSize, &srcTypeMask, &srcRegisterMask,
dstSize, &thunk);
c->arch->planSource(type, dstSize, src, dstSize, &thunk);
if (src->type == lir::ValueGeneral) {
srcRegisterMask &= c->regFile->generalRegisters.mask;
if (srcValue->type == lir::ValueGeneral) {
src.registerMask &= c->regFile->generalRegisters.mask;
}
assert(c, thunk == 0);
assert(c, dstMask.typeMask & srcTypeMask & (1 << lir::RegisterOperand));
assert(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand));
Site* tmpTarget = freeRegisterSite
(c, dstMask.registerMask & srcRegisterMask);
(c, dstMask.registerMask & src.registerMask);
src->source->freeze(c, src);
srcValue->source->freeze(c, srcValue);
dst->addSite(c, tmpTarget);
dstValue->addSite(c, tmpTarget);
tmpTarget->freeze(c, dst);
tmpTarget->freeze(c, dstValue);
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; tmpTarget->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
srcb, dstb, srcValue, dstValue);
}
apply(c, type, srcSelectSize, src->source, src->source,
apply(c, type, srcSelectSize, srcValue->source, srcValue->source,
dstSize, tmpTarget, tmpTarget);
tmpTarget->thaw(c, dst);
tmpTarget->thaw(c, dstValue);
src->source->thaw(c, src);
srcValue->source->thaw(c, srcValue);
if (useTemporary or isStore) {
if (DebugMoves) {
char srcb[256]; tmpTarget->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
srcb, dstb, srcValue, dstValue);
}
dst->addSite(c, target);
dstValue->addSite(c, target);
tmpTarget->freeze(c, dst);
tmpTarget->freeze(c, dstValue);
apply(c, lir::Move, dstSize, tmpTarget, tmpTarget, dstSize, target, target);
tmpTarget->thaw(c, dst);
tmpTarget->thaw(c, dstValue);
if (isStore) {
dst->removeSite(c, tmpTarget);
dstValue->removeSite(c, tmpTarget);
}
}
}
target->thaw(c, dst);
target->thaw(c, dstValue);
if (addOffset) {
static_cast<MemorySite*>(src->source)->offset
static_cast<MemorySite*>(srcValue->source)->offset
-= (srcSize - srcSelectSize);
}
} else {
target = src->source;
target = srcValue->source;
if (DebugMoves) {
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "null move in %s for %p to %p\n", dstb, src, dst);
fprintf(stderr, "null move in %s for %p to %p\n", dstb, srcValue, dstValue);
}
}
if (isStore) {
dst->removeSite(c, target);
dstValue->removeSite(c, target);
}
}

View File

@ -264,16 +264,15 @@ class CallEvent: public Event {
}
{ bool thunk;
uint8_t typeMask;
uint64_t planRegisterMask;
OperandMask op;
c->arch->plan
((flags & Compiler::Aligned) ? lir::AlignedCall : lir::Call, vm::TargetBytesPerWord,
&typeMask, &planRegisterMask, &thunk);
op, &thunk);
assert(c, not thunk);
this->addRead(c, address, SiteMask
(typeMask, registerMask & planRegisterMask, AnyFrameIndex));
(op.typeMask, registerMask & op.registerMask, AnyFrameIndex));
}
Stack* stack = stackBefore;
@ -543,26 +542,26 @@ void appendReturn(Context* c, unsigned size, Value* value) {
class MoveEvent: public Event {
public:
MoveEvent(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst,
unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue,
const SiteMask& srcLowMask, const SiteMask& srcHighMask):
Event(c), type(type), srcSize(srcSize), srcSelectSize(srcSelectSize),
src(src), dstSize(dstSize), dst(dst)
srcValue(srcValue), dstSize(dstSize), dstValue(dstValue)
{
assert(c, srcSelectSize <= srcSize);
bool noop = srcSelectSize >= dstSize;
if (dstSize > vm::TargetBytesPerWord) {
dst->grow(c);
dstValue->grow(c);
}
if (srcSelectSize > vm::TargetBytesPerWord) {
src->maybeSplit(c);
srcValue->maybeSplit(c);
}
this->addReads(c, src, srcSelectSize, srcLowMask, noop ? dst : 0,
this->addReads(c, srcValue, srcSelectSize, srcLowMask, noop ? dstValue : 0,
srcHighMask,
noop and dstSize > vm::TargetBytesPerWord ? dst->nextWord : 0);
noop and dstSize > vm::TargetBytesPerWord ? dstValue->nextWord : 0);
}
virtual const char* name() {
@ -570,118 +569,116 @@ class MoveEvent: public Event {
}
virtual void compile(Context* c) {
uint8_t dstTypeMask;
uint64_t dstRegisterMask;
OperandMask dst;
c->arch->planDestination
(type,
srcSelectSize,
1 << src->source->type(c),
(static_cast<uint64_t>(src->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(src->source->registerMask(c)),
dstSize,
&dstTypeMask,
&dstRegisterMask);
OperandMask(
1 << srcValue->source->type(c),
(static_cast<uint64_t>(srcValue->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(srcValue->source->registerMask(c))),
dstSize, dst);
SiteMask dstLowMask(dstTypeMask, dstRegisterMask, AnyFrameIndex);
SiteMask dstHighMask(dstTypeMask, dstRegisterMask >> 32, AnyFrameIndex);
SiteMask dstLowMask(dst.typeMask, dst.registerMask, AnyFrameIndex);
SiteMask dstHighMask(dst.typeMask, dst.registerMask >> 32, AnyFrameIndex);
if (srcSelectSize >= vm::TargetBytesPerWord
and dstSize >= vm::TargetBytesPerWord
and srcSelectSize >= dstSize)
{
if (dst->target) {
if (dstValue->target) {
if (dstSize > vm::TargetBytesPerWord) {
if (src->source->registerSize(c) > vm::TargetBytesPerWord) {
apply(c, lir::Move, srcSelectSize, src->source, src->source,
dstSize, dst->target, dst->target);
if (srcValue->source->registerSize(c) > vm::TargetBytesPerWord) {
apply(c, lir::Move, srcSelectSize, srcValue->source, srcValue->source,
dstSize, dstValue->target, dstValue->target);
if (live(c, dst) == 0) {
dst->removeSite(c, dst->target);
if (live(c, dstValue) == 0) {
dstValue->removeSite(c, dstValue->target);
if (dstSize > vm::TargetBytesPerWord) {
dst->nextWord->removeSite(c, dst->nextWord->target);
dstValue->nextWord->removeSite(c, dstValue->nextWord->target);
}
}
} else {
src->nextWord->source->freeze(c, src->nextWord);
srcValue->nextWord->source->freeze(c, srcValue->nextWord);
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src,
vm::TargetBytesPerWord, dst, dstLowMask);
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue,
vm::TargetBytesPerWord, dstValue, dstLowMask);
src->nextWord->source->thaw(c, src->nextWord);
srcValue->nextWord->source->thaw(c, srcValue->nextWord);
maybeMove
(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src->nextWord,
vm::TargetBytesPerWord, dst->nextWord, dstHighMask);
(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue->nextWord,
vm::TargetBytesPerWord, dstValue->nextWord, dstHighMask);
}
} else {
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, src,
vm::TargetBytesPerWord, dst, dstLowMask);
maybeMove(c, lir::Move, vm::TargetBytesPerWord, vm::TargetBytesPerWord, srcValue,
vm::TargetBytesPerWord, dstValue, dstLowMask);
}
} else {
Site* low = pickSiteOrMove(c, src, dst, 0, 0);
Site* low = pickSiteOrMove(c, srcValue, dstValue, 0, 0);
if (dstSize > vm::TargetBytesPerWord) {
pickSiteOrMove(c, src->nextWord, dst->nextWord, low, 1);
pickSiteOrMove(c, srcValue->nextWord, dstValue->nextWord, low, 1);
}
}
} else if (srcSelectSize <= vm::TargetBytesPerWord
and dstSize <= vm::TargetBytesPerWord)
{
maybeMove(c, type, srcSize, srcSelectSize, src, dstSize, dst,
maybeMove(c, type, srcSize, srcSelectSize, srcValue, dstSize, dstValue,
dstLowMask);
} else {
assert(c, srcSize == vm::TargetBytesPerWord);
assert(c, srcSelectSize == vm::TargetBytesPerWord);
if (dst->nextWord->target or live(c, dst->nextWord)) {
if (dstValue->nextWord->target or live(c, dstValue->nextWord)) {
assert(c, dstLowMask.typeMask & (1 << lir::RegisterOperand));
Site* low = freeRegisterSite(c, dstLowMask.registerMask);
src->source->freeze(c, src);
srcValue->source->freeze(c, srcValue);
dst->addSite(c, low);
dstValue->addSite(c, low);
low->freeze(c, dst);
low->freeze(c, dstValue);
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char srcb[256]; srcValue->source->toString(c, srcb, 256);
char dstb[256]; low->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p\n",
srcb, dstb, src);
srcb, dstb, srcValue);
}
apply(c, lir::Move, vm::TargetBytesPerWord, src->source, src->source,
apply(c, lir::Move, vm::TargetBytesPerWord, srcValue->source, srcValue->source,
vm::TargetBytesPerWord, low, low);
low->thaw(c, dst);
low->thaw(c, dstValue);
src->source->thaw(c, src);
srcValue->source->thaw(c, srcValue);
assert(c, dstHighMask.typeMask & (1 << lir::RegisterOperand));
Site* high = freeRegisterSite(c, dstHighMask.registerMask);
low->freeze(c, dst);
low->freeze(c, dstValue);
dst->nextWord->addSite(c, high);
dstValue->nextWord->addSite(c, high);
high->freeze(c, dst->nextWord);
high->freeze(c, dstValue->nextWord);
if (DebugMoves) {
char srcb[256]; low->toString(c, srcb, 256);
char dstb[256]; high->toString(c, dstb, 256);
fprintf(stderr, "extend %s to %s for %p %p\n",
srcb, dstb, dst, dst->nextWord);
srcb, dstb, dstValue, dstValue->nextWord);
}
apply(c, lir::Move, vm::TargetBytesPerWord, low, low, dstSize, low, high);
high->thaw(c, dst->nextWord);
high->thaw(c, dstValue->nextWord);
low->thaw(c, dst);
low->thaw(c, dstValue);
} else {
pickSiteOrMove(c, src, dst, 0, 0);
pickSiteOrMove(c, srcValue, dstValue, 0, 0);
}
}
@ -693,29 +690,28 @@ class MoveEvent: public Event {
lir::BinaryOperation type;
unsigned srcSize;
unsigned srcSelectSize;
Value* src;
Value* srcValue;
unsigned dstSize;
Value* dst;
Value* dstValue;
};
void
appendMove(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst)
unsigned srcSelectSize, Value* srcValue, unsigned dstSize, Value* dstValue)
{
bool thunk;
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
OperandMask src;
c->arch->planSource
(type, srcSelectSize, &srcTypeMask, &srcRegisterMask, dstSize, &thunk);
(type, srcSelectSize, src, dstSize, &thunk);
assert(c, not thunk);
append(c, new(c->zone)
MoveEvent
(c, type, srcSize, srcSelectSize, src, dstSize, dst,
SiteMask(srcTypeMask, srcRegisterMask, AnyFrameIndex),
SiteMask(srcTypeMask, srcRegisterMask >> 32, AnyFrameIndex)));
(c, type, srcSize, srcSelectSize, srcValue, dstSize, dstValue,
SiteMask(src.typeMask, src.registerMask, AnyFrameIndex),
SiteMask(src.typeMask, src.registerMask >> 32, AnyFrameIndex)));
}
@ -791,28 +787,28 @@ Site* getTarget(Context* c, Value* value, Value* result, const SiteMask& resultM
class CombineEvent: public Event {
public:
CombineEvent(Context* c, lir::TernaryOperation type,
unsigned firstSize, Value* first,
unsigned secondSize, Value* second,
unsigned resultSize, Value* result,
unsigned firstSize, Value* firstValue,
unsigned secondSize, Value* secondValue,
unsigned resultSize, Value* resultValue,
const SiteMask& firstLowMask,
const SiteMask& firstHighMask,
const SiteMask& secondLowMask,
const SiteMask& secondHighMask):
Event(c), type(type), firstSize(firstSize), first(first),
secondSize(secondSize), second(second), resultSize(resultSize),
result(result)
Event(c), type(type), firstSize(firstSize), firstValue(firstValue),
secondSize(secondSize), secondValue(secondValue), resultSize(resultSize),
resultValue(resultValue)
{
this->addReads(c, first, firstSize, firstLowMask, firstHighMask);
this->addReads(c, firstValue, firstSize, firstLowMask, firstHighMask);
if (resultSize > vm::TargetBytesPerWord) {
result->grow(c);
resultValue->grow(c);
}
bool condensed = c->arch->alwaysCondensed(type);
this->addReads(c, second, secondSize,
secondLowMask, condensed ? result : 0,
secondHighMask, condensed ? result->nextWord : 0);
this->addReads(c, secondValue, secondSize,
secondLowMask, condensed ? resultValue : 0,
secondHighMask, condensed ? resultValue->nextWord : 0);
}
virtual const char* name() {
@ -820,99 +816,99 @@ class CombineEvent: public Event {
}
virtual void compile(Context* c) {
assert(c, first->source->type(c) == first->nextWord->source->type(c));
assert(c, firstValue->source->type(c) == firstValue->nextWord->source->type(c));
// if (second->source->type(c) != second->nextWord->source->type(c)) {
// if (secondValue->source->type(c) != secondValue->nextWord->source->type(c)) {
// fprintf(stderr, "%p %p %d : %p %p %d\n",
// second, second->source, second->source->type(c),
// second->nextWord, second->nextWord->source,
// second->nextWord->source->type(c));
// secondValue, secondValue->source, secondValue->source->type(c),
// secondValue->nextWord, secondValue->nextWord->source,
// secondValue->nextWord->source->type(c));
// }
assert(c, second->source->type(c) == second->nextWord->source->type(c));
assert(c, secondValue->source->type(c) == secondValue->nextWord->source->type(c));
freezeSource(c, firstSize, first);
freezeSource(c, firstSize, firstValue);
uint8_t cTypeMask;
uint64_t cRegisterMask;
OperandMask cMask;
c->arch->planDestination
(type,
firstSize,
1 << first->source->type(c),
(static_cast<uint64_t>(first->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(first->source->registerMask(c)),
OperandMask(
1 << firstValue->source->type(c),
(static_cast<uint64_t>(firstValue->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(firstValue->source->registerMask(c))),
secondSize,
1 << second->source->type(c),
(static_cast<uint64_t>(second->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(second->source->registerMask(c)),
OperandMask(
1 << secondValue->source->type(c),
(static_cast<uint64_t>(secondValue->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(secondValue->source->registerMask(c))),
resultSize,
&cTypeMask,
&cRegisterMask);
cMask);
SiteMask resultLowMask(cTypeMask, cRegisterMask, AnyFrameIndex);
SiteMask resultHighMask(cTypeMask, cRegisterMask >> 32, AnyFrameIndex);
SiteMask resultLowMask(cMask.typeMask, cMask.registerMask, AnyFrameIndex);
SiteMask resultHighMask(cMask.typeMask, cMask.registerMask >> 32, AnyFrameIndex);
Site* low = getTarget(c, second, result, resultLowMask);
Site* low = getTarget(c, secondValue, resultValue, resultLowMask);
unsigned lowSize = low->registerSize(c);
Site* high
= (resultSize > lowSize
? getTarget(c, second->nextWord, result->nextWord, resultHighMask)
? getTarget(c, secondValue->nextWord, resultValue->nextWord, resultHighMask)
: low);
// fprintf(stderr, "combine %p:%p and %p:%p into %p:%p\n",
// first, first->nextWord,
// second, second->nextWord,
// result, result->nextWord);
// firstValue, firstValue->nextWord,
// secondValue, secondValue->nextWord,
// resultValue, resultValue->nextWord);
apply(c, type,
firstSize, first->source, first->nextWord->source,
secondSize, second->source, second->nextWord->source,
firstSize, firstValue->source, firstValue->nextWord->source,
secondSize, secondValue->source, secondValue->nextWord->source,
resultSize, low, high);
thawSource(c, firstSize, first);
thawSource(c, firstSize, firstValue);
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
}
low->thaw(c, second);
low->thaw(c, secondValue);
if (resultSize > lowSize) {
high->thaw(c, second->nextWord);
high->thaw(c, secondValue->nextWord);
}
if (live(c, result)) {
result->addSite(c, low);
if (resultSize > lowSize and live(c, result->nextWord)) {
result->nextWord->addSite(c, high);
if (live(c, resultValue)) {
resultValue->addSite(c, low);
if (resultSize > lowSize and live(c, resultValue->nextWord)) {
resultValue->nextWord->addSite(c, high);
}
}
}
lir::TernaryOperation type;
unsigned firstSize;
Value* first;
Value* firstValue;
unsigned secondSize;
Value* second;
Value* secondValue;
unsigned resultSize;
Value* result;
Value* resultValue;
};
void
appendCombine(Context* c, lir::TernaryOperation type,
unsigned firstSize, Value* first,
unsigned secondSize, Value* second,
unsigned resultSize, Value* result)
unsigned firstSize, Value* firstValue,
unsigned secondSize, Value* secondValue,
unsigned resultSize, Value* resultValue)
{
bool thunk;
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
uint8_t secondTypeMask;
uint64_t secondRegisterMask;
OperandMask firstMask;
OperandMask secondMask;
c->arch->planSource(type, firstSize, &firstTypeMask, &firstRegisterMask,
secondSize, &secondTypeMask, &secondRegisterMask,
resultSize, &thunk);
c->arch->planSource(type,
firstSize, firstMask,
secondSize, secondMask,
resultSize,
&thunk);
if (thunk) {
Stack* oldStack = c->stack;
@ -924,8 +920,8 @@ appendCombine(Context* c, lir::TernaryOperation type,
unsigned stackSize = vm::ceilingDivide(secondSize, vm::TargetBytesPerWord)
+ vm::ceilingDivide(firstSize, vm::TargetBytesPerWord);
compiler::push(c, vm::ceilingDivide(secondSize, vm::TargetBytesPerWord), second);
compiler::push(c, vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), first);
compiler::push(c, vm::ceilingDivide(secondSize, vm::TargetBytesPerWord), secondValue);
compiler::push(c, vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), firstValue);
if (threadParameter) {
++ stackSize;
@ -937,40 +933,40 @@ appendCombine(Context* c, lir::TernaryOperation type,
c->stack = oldStack;
appendCall
(c, value(c, lir::ValueGeneral, constantSite(c, handler)), 0, 0, result,
(c, value(c, lir::ValueGeneral, constantSite(c, handler)), 0, 0, resultValue,
resultSize, argumentStack, stackSize, 0);
} else {
append
(c, new(c->zone)
CombineEvent
(c, type,
firstSize, first,
secondSize, second,
resultSize, result,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask >> 32, AnyFrameIndex)));
firstSize, firstValue,
secondSize, secondValue,
resultSize, resultValue,
SiteMask(firstMask.typeMask, firstMask.registerMask, AnyFrameIndex),
SiteMask(firstMask.typeMask, firstMask.registerMask >> 32, AnyFrameIndex),
SiteMask(secondMask.typeMask, secondMask.registerMask, AnyFrameIndex),
SiteMask(secondMask.typeMask, secondMask.registerMask >> 32, AnyFrameIndex)));
}
}
class TranslateEvent: public Event {
public:
TranslateEvent(Context* c, lir::BinaryOperation type, unsigned valueSize,
Value* value, unsigned resultSize, Value* result,
Value* value, unsigned resultSize, Value* resultValue,
const SiteMask& valueLowMask,
const SiteMask& valueHighMask):
Event(c), type(type), valueSize(valueSize), resultSize(resultSize),
value(value), result(result)
value(value), resultValue(resultValue)
{
bool condensed = c->arch->alwaysCondensed(type);
if (resultSize > vm::TargetBytesPerWord) {
result->grow(c);
resultValue->grow(c);
}
this->addReads(c, value, valueSize, valueLowMask, condensed ? result : 0,
valueHighMask, condensed ? result->nextWord : 0);
this->addReads(c, value, valueSize, valueLowMask, condensed ? resultValue : 0,
valueHighMask, condensed ? resultValue->nextWord : 0);
}
virtual const char* name() {
@ -980,27 +976,26 @@ class TranslateEvent: public Event {
virtual void compile(Context* c) {
assert(c, value->source->type(c) == value->nextWord->source->type(c));
uint8_t bTypeMask;
uint64_t bRegisterMask;
OperandMask bMask;
c->arch->planDestination
(type,
valueSize,
1 << value->source->type(c),
(static_cast<uint64_t>(value->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(value->source->registerMask(c)),
OperandMask(
1 << value->source->type(c),
(static_cast<uint64_t>(value->nextWord->source->registerMask(c)) << 32)
| static_cast<uint64_t>(value->source->registerMask(c))),
resultSize,
&bTypeMask,
&bRegisterMask);
bMask);
SiteMask resultLowMask(bTypeMask, bRegisterMask, AnyFrameIndex);
SiteMask resultHighMask(bTypeMask, bRegisterMask >> 32, AnyFrameIndex);
SiteMask resultLowMask(bMask.typeMask, bMask.registerMask, AnyFrameIndex);
SiteMask resultHighMask(bMask.typeMask, bMask.registerMask >> 32, AnyFrameIndex);
Site* low = getTarget(c, value, result, resultLowMask);
Site* low = getTarget(c, value, resultValue, resultLowMask);
unsigned lowSize = low->registerSize(c);
Site* high
= (resultSize > lowSize
? getTarget(c, value->nextWord, result->nextWord, resultHighMask)
? getTarget(c, value->nextWord, resultValue->nextWord, resultHighMask)
: low);
apply(c, type, valueSize, value->source, value->nextWord->source,
@ -1015,10 +1010,10 @@ class TranslateEvent: public Event {
high->thaw(c, value->nextWord);
}
if (live(c, result)) {
result->addSite(c, low);
if (resultSize > lowSize and live(c, result->nextWord)) {
result->nextWord->addSite(c, high);
if (live(c, resultValue)) {
resultValue->addSite(c, low);
if (resultSize > lowSize and live(c, resultValue->nextWord)) {
resultValue->nextWord->addSite(c, high);
}
}
}
@ -1027,7 +1022,7 @@ class TranslateEvent: public Event {
unsigned valueSize;
unsigned resultSize;
Value* value;
Value* result;
Value* resultValue;
Read* resultRead;
SiteMask resultLowMask;
SiteMask resultHighMask;
@ -1035,19 +1030,18 @@ class TranslateEvent: public Event {
void
appendTranslate(Context* c, lir::BinaryOperation type, unsigned firstSize,
Value* first, unsigned resultSize, Value* result)
Value* firstValue, unsigned resultSize, Value* resultValue)
{
bool thunk;
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
OperandMask first;
c->arch->planSource(type, firstSize, &firstTypeMask, &firstRegisterMask,
c->arch->planSource(type, firstSize, first,
resultSize, &thunk);
if (thunk) {
Stack* oldStack = c->stack;
compiler::push(c, vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), first);
compiler::push(c, vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), firstValue);
Stack* argumentStack = c->stack;
c->stack = oldStack;
@ -1056,14 +1050,14 @@ appendTranslate(Context* c, lir::BinaryOperation type, unsigned firstSize,
(c, value
(c, lir::ValueGeneral, constantSite
(c, c->client->getThunk(type, firstSize, resultSize))),
0, 0, result, resultSize, argumentStack,
0, 0, resultValue, resultSize, argumentStack,
vm::ceilingDivide(firstSize, vm::TargetBytesPerWord), 0);
} else {
append(c, new(c->zone)
TranslateEvent
(c, type, firstSize, first, resultSize, result,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex)));
(c, type, firstSize, firstValue, resultSize, resultValue,
SiteMask(first.typeMask, first.registerMask, AnyFrameIndex),
SiteMask(first.typeMask, first.registerMask >> 32, AnyFrameIndex)));
}
}
@ -1306,23 +1300,24 @@ thunkBranch(Context* c, lir::TernaryOperation type)
class BranchEvent: public Event {
public:
BranchEvent(Context* c, lir::TernaryOperation type, unsigned size,
Value* first, Value* second, Value* address,
Value* firstValue, Value* secondValue, Value* addressValue,
const SiteMask& firstLowMask,
const SiteMask& firstHighMask,
const SiteMask& secondLowMask,
const SiteMask& secondHighMask):
Event(c), type(type), size(size), first(first), second(second),
address(address)
Event(c), type(type), size(size), firstValue(firstValue), secondValue(secondValue),
addressValue(addressValue)
{
this->addReads(c, first, size, firstLowMask, firstHighMask);
this->addReads(c, second, size, secondLowMask, secondHighMask);
this->addReads(c, firstValue, size, firstLowMask, firstHighMask);
this->addReads(c, secondValue, size, secondLowMask, secondHighMask);
uint8_t typeMask;
uint64_t registerMask;
c->arch->planDestination(type, size, 0, 0, size, 0, 0, vm::TargetBytesPerWord,
&typeMask, &registerMask);
OperandMask dstMask;
c->arch->planDestination(type,
size, OperandMask(0, 0),
size, OperandMask(0, 0),
vm::TargetBytesPerWord, dstMask);
this->addRead(c, address, SiteMask(typeMask, registerMask, AnyFrameIndex));
this->addRead(c, addressValue, SiteMask(dstMask.typeMask, dstMask.registerMask, AnyFrameIndex));
}
virtual const char* name() {
@ -1330,8 +1325,8 @@ class BranchEvent: public Event {
}
virtual void compile(Context* c) {
ConstantSite* firstConstant = findConstantSite(c, first);
ConstantSite* secondConstant = findConstantSite(c, second);
ConstantSite* firstConstant = findConstantSite(c, firstValue);
ConstantSite* secondConstant = findConstantSite(c, secondValue);
if (not this->isUnreachable()) {
if (firstConstant
@ -1339,31 +1334,31 @@ class BranchEvent: public Event {
and firstConstant->value->resolved()
and secondConstant->value->resolved())
{
int64_t firstValue = firstConstant->value->value();
int64_t secondValue = secondConstant->value->value();
int64_t firstConstVal = firstConstant->value->value();
int64_t secondConstVal = secondConstant->value->value();
if (size > vm::TargetBytesPerWord) {
firstValue |= findConstantSite
(c, first->nextWord)->value->value() << 32;
secondValue |= findConstantSite
(c, second->nextWord)->value->value() << 32;
firstConstVal |= findConstantSite
(c, firstValue->nextWord)->value->value() << 32;
secondConstVal |= findConstantSite
(c, secondValue->nextWord)->value->value() << 32;
}
if (shouldJump(c, type, size, firstValue, secondValue)) {
apply(c, lir::Jump, vm::TargetBytesPerWord, address->source, address->source);
if (shouldJump(c, type, size, firstConstVal, secondConstVal)) {
apply(c, lir::Jump, vm::TargetBytesPerWord, addressValue->source, addressValue->source);
}
} else {
freezeSource(c, size, first);
freezeSource(c, size, second);
freezeSource(c, vm::TargetBytesPerWord, address);
freezeSource(c, size, firstValue);
freezeSource(c, size, secondValue);
freezeSource(c, vm::TargetBytesPerWord, addressValue);
apply(c, type, size, first->source, first->nextWord->source,
size, second->source, second->nextWord->source,
vm::TargetBytesPerWord, address->source, address->source);
apply(c, type, size, firstValue->source, firstValue->nextWord->source,
size, secondValue->source, secondValue->nextWord->source,
vm::TargetBytesPerWord, addressValue->source, addressValue->source);
thawSource(c, vm::TargetBytesPerWord, address);
thawSource(c, size, second);
thawSource(c, size, first);
thawSource(c, vm::TargetBytesPerWord, addressValue);
thawSource(c, size, secondValue);
thawSource(c, size, firstValue);
}
}
@ -1376,24 +1371,23 @@ class BranchEvent: public Event {
lir::TernaryOperation type;
unsigned size;
Value* first;
Value* second;
Value* address;
Value* firstValue;
Value* secondValue;
Value* addressValue;
};
void
appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first,
Value* second, Value* address)
appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* firstValue,
Value* secondValue, Value* addressValue)
{
bool thunk;
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
uint8_t secondTypeMask;
uint64_t secondRegisterMask;
OperandMask firstMask;
OperandMask secondMask;
c->arch->planSource(type, size, &firstTypeMask, &firstRegisterMask,
size, &secondTypeMask, &secondRegisterMask,
vm::TargetBytesPerWord, &thunk);
c->arch->planSource(type,
size, firstMask,
size, secondMask,
vm::TargetBytesPerWord, &thunk);
if (thunk) {
Stack* oldStack = c->stack;
@ -1404,8 +1398,8 @@ appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first
assert(c, not threadParameter);
compiler::push(c, vm::ceilingDivide(size, vm::TargetBytesPerWord), second);
compiler::push(c, vm::ceilingDivide(size, vm::TargetBytesPerWord), first);
compiler::push(c, vm::ceilingDivide(size, vm::TargetBytesPerWord), secondValue);
compiler::push(c, vm::ceilingDivide(size, vm::TargetBytesPerWord), firstValue);
Stack* argumentStack = c->stack;
c->stack = oldStack;
@ -1418,16 +1412,16 @@ appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first
appendBranch(c, thunkBranch(c, type), 4, value
(c, lir::ValueGeneral, constantSite(c, static_cast<int64_t>(0))),
result, address);
result, addressValue);
} else {
append
(c, new(c->zone)
BranchEvent
(c, type, size, first, second, address,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask >> 32, AnyFrameIndex)));
(c, type, size, firstValue, secondValue, addressValue,
SiteMask(firstMask.typeMask, firstMask.registerMask, AnyFrameIndex),
SiteMask(firstMask.typeMask, firstMask.registerMask >> 32, AnyFrameIndex),
SiteMask(secondMask.typeMask, secondMask.registerMask, AnyFrameIndex),
SiteMask(secondMask.typeMask, secondMask.registerMask >> 32, AnyFrameIndex)));
}
}
@ -1475,13 +1469,12 @@ class JumpEvent: public Event {
cleanLocals(cleanLocals)
{
bool thunk;
uint8_t typeMask;
uint64_t registerMask;
c->arch->plan(type, vm::TargetBytesPerWord, &typeMask, &registerMask, &thunk);
OperandMask mask;
c->arch->plan(type, vm::TargetBytesPerWord, mask, &thunk);
assert(c, not thunk);
this->addRead(c, address, SiteMask(typeMask, registerMask, AnyFrameIndex));
this->addRead(c, address, SiteMask(mask.typeMask, mask.registerMask, AnyFrameIndex));
}
virtual const char* name() {

View File

@ -40,6 +40,14 @@ class SiteMask {
return SiteMask(1 << lir::RegisterOperand, 1 << number, NoFrameIndex);
}
static SiteMask lowPart(const OperandMask& mask) {
return SiteMask(mask.typeMask, mask.registerMask, AnyFrameIndex);
}
static SiteMask highPart(const OperandMask& mask) {
return SiteMask(mask.typeMask, mask.registerMask >> 32, AnyFrameIndex);
}
uint8_t typeMask;
uint32_t registerMask;
int frameIndex;

View File

@ -2258,27 +2258,27 @@ class MyArchitecture: public Assembler::Architecture {
virtual void plan
(lir::UnaryOperation,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned, OperandMask& aMask,
bool* thunk)
{
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = ~static_cast<uint64_t>(0);
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false;
}
virtual void planSource
(lir::BinaryOperation op,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned, OperandMask& aMask,
unsigned, bool* thunk)
{
*aTypeMask = ~0;
*aRegisterMask = ~static_cast<uint64_t>(0);
aMask.typeMask = ~0;
aMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false;
switch (op) {
case lir::Negate:
*aTypeMask = (1 << lir::RegisterOperand);
aMask.typeMask = (1 << lir::RegisterOperand);
break;
case lir::Absolute:
@ -2298,15 +2298,15 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planDestination
(lir::BinaryOperation op,
unsigned, uint8_t, uint64_t,
unsigned, uint8_t* bTypeMask, uint64_t* bRegisterMask)
unsigned, const OperandMask& aMask UNUSED,
unsigned, OperandMask& bMask)
{
*bTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*bRegisterMask = ~static_cast<uint64_t>(0);
bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
bMask.registerMask = ~static_cast<uint64_t>(0);
switch (op) {
case lir::Negate:
*bTypeMask = (1 << lir::RegisterOperand);
bMask.typeMask = (1 << lir::RegisterOperand);
break;
default:
@ -2315,35 +2315,35 @@ class MyArchitecture: public Assembler::Architecture {
}
virtual void planMove
(unsigned, uint8_t* srcTypeMask, uint64_t* srcRegisterMask,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask,
uint8_t dstTypeMask, uint64_t)
(unsigned, OperandMask& srcMask,
OperandMask& tmpMask,
const OperandMask& dstMask)
{
*srcTypeMask = ~0;
*srcRegisterMask = ~static_cast<uint64_t>(0);
srcMask.typeMask = ~0;
srcMask.registerMask = ~static_cast<uint64_t>(0);
*tmpTypeMask = 0;
*tmpRegisterMask = 0;
tmpMask.typeMask = 0;
tmpMask.registerMask = 0;
if (dstTypeMask & (1 << lir::MemoryOperand)) {
if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
// can't move directly from memory or constant to memory
*srcTypeMask = 1 << lir::RegisterOperand;
*tmpTypeMask = 1 << lir::RegisterOperand;
*tmpRegisterMask = ~static_cast<uint64_t>(0);
srcMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.registerMask = ~static_cast<uint64_t>(0);
}
}
virtual void planSource
(lir::TernaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned, uint8_t* bTypeMask, uint64_t* bRegisterMask,
unsigned aSize, OperandMask& aMask,
unsigned, OperandMask& bMask,
unsigned, bool* thunk)
{
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = ~static_cast<uint64_t>(0);
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = ~static_cast<uint64_t>(0);
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = ~static_cast<uint64_t>(0);
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = ~static_cast<uint64_t>(0);
*thunk = false;
@ -2351,12 +2351,12 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Add:
case lir::Subtract:
if (aSize == 8) {
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand);
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
}
break;
case lir::Multiply:
*aTypeMask = *bTypeMask = (1 << lir::RegisterOperand);
aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand);
break;
case lir::Divide:
@ -2370,7 +2370,7 @@ class MyArchitecture: public Assembler::Architecture {
if (true) {//if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true;
} else {
*aTypeMask = (1 << lir::RegisterOperand);
aMask.typeMask = (1 << lir::RegisterOperand);
}
break;
@ -2399,16 +2399,16 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planDestination
(lir::TernaryOperation op,
unsigned, uint8_t, uint64_t,
unsigned, uint8_t, const uint64_t,
unsigned, uint8_t* cTypeMask, uint64_t* cRegisterMask)
unsigned, const OperandMask& aMask UNUSED,
unsigned, const OperandMask& bMask UNUSED,
unsigned, OperandMask& cMask)
{
if (isBranch(op)) {
*cTypeMask = (1 << lir::ConstantOperand);
*cRegisterMask = 0;
cMask.typeMask = (1 << lir::ConstantOperand);
cMask.registerMask = 0;
} else {
*cTypeMask = (1 << lir::RegisterOperand);
*cRegisterMask = ~static_cast<uint64_t>(0);
cMask.typeMask = (1 << lir::RegisterOperand);
cMask.registerMask = ~static_cast<uint64_t>(0);
}
}

View File

@ -2992,37 +2992,34 @@ class MyArchitecture: public Assembler::Architecture {
virtual void plan
(lir::UnaryOperation,
unsigned, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned, OperandMask& aMask,
bool* thunk)
{
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand)
| (1 << lir::ConstantOperand);
*aRegisterMask = ~static_cast<uint64_t>(0);
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand) | (1 << lir::ConstantOperand);
*thunk = false;
}
virtual void planSource
(lir::BinaryOperation op,
unsigned aSize, uint8_t* aTypeMask, uint64_t* aRegisterMask,
unsigned aSize, OperandMask& aMask,
unsigned bSize, bool* thunk)
{
*aTypeMask = ~0;
*aRegisterMask = GeneralRegisterMask |
aMask.registerMask = GeneralRegisterMask |
(static_cast<uint64_t>(GeneralRegisterMask) << 32);
*thunk = false;
switch (op) {
case lir::Negate:
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = (static_cast<uint64_t>(1) << (rdx + 32))
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32))
| (static_cast<uint64_t>(1) << rax);
break;
case lir::Absolute:
if (aSize <= TargetBytesPerWord) {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = (static_cast<uint64_t>(1) << rax);
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = (static_cast<uint64_t>(1) << rax);
} else {
*thunk = true;
}
@ -3030,8 +3027,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatAbsolute:
if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
} else {
*thunk = true;
@ -3041,8 +3038,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatNegate:
// floatNegateRR does not support doubles
if (useSSE(&c) and aSize == 4 and bSize == 4) {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = FloatRegisterMask;
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = FloatRegisterMask;
} else {
*thunk = true;
}
@ -3050,8 +3047,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatSquareRoot:
if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
} else {
*thunk = true;
@ -3060,8 +3057,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Float2Float:
if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
} else {
*thunk = true;
@ -3074,8 +3071,8 @@ class MyArchitecture: public Assembler::Architecture {
// thunks or produce inline machine code which handles edge
// cases properly.
if (false and useSSE(&c) and bSize <= TargetBytesPerWord) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
} else {
*thunk = true;
@ -3084,8 +3081,8 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Int2Float:
if (useSSE(&c) and aSize <= TargetBytesPerWord) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*aRegisterMask = GeneralRegisterMask
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
aMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} else {
*thunk = true;
@ -3093,20 +3090,20 @@ class MyArchitecture: public Assembler::Architecture {
break;
case lir::Move:
*aTypeMask = ~0;
*aRegisterMask = ~static_cast<uint64_t>(0);
aMask.typeMask = ~0;
aMask.registerMask = ~static_cast<uint64_t>(0);
if (TargetBytesPerWord == 4) {
if (aSize == 4 and bSize == 8) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
const uint32_t mask
= GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
*aRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask;
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} else if (aSize == 1 or bSize == 1) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
const uint32_t mask
= (1 << rax) | (1 << rcx) | (1 << rdx) | (1 << rbx);
*aRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask;
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
}
}
break;
@ -3117,69 +3114,69 @@ class MyArchitecture: public Assembler::Architecture {
}
virtual void planDestination
(lir::BinaryOperation op, unsigned aSize, uint8_t aTypeMask,
uint64_t aRegisterMask, unsigned bSize, uint8_t* bTypeMask,
uint64_t* bRegisterMask)
(lir::BinaryOperation op,
unsigned aSize, const OperandMask& aMask,
unsigned bSize, OperandMask& bMask)
{
*bTypeMask = ~0;
*bRegisterMask = GeneralRegisterMask
bMask.typeMask = ~0;
bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
switch (op) {
case lir::Absolute:
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = (static_cast<uint64_t>(1) << rax);
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = (static_cast<uint64_t>(1) << rax);
break;
case lir::FloatAbsolute:
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = aRegisterMask;
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = aMask.registerMask;
break;
case lir::Negate:
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = aRegisterMask;
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = aMask.registerMask;
break;
case lir::FloatNegate:
case lir::FloatSquareRoot:
case lir::Float2Float:
case lir::Int2Float:
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
break;
case lir::Float2Int:
*bTypeMask = (1 << lir::RegisterOperand);
bMask.typeMask = (1 << lir::RegisterOperand);
break;
case lir::Move:
if (aTypeMask & ((1 << lir::MemoryOperand) | 1 << lir::AddressOperand)) {
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GeneralRegisterMask
if (aMask.typeMask & ((1 << lir::MemoryOperand) | 1 << lir::AddressOperand)) {
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32)
| FloatRegisterMask;
} else if (aTypeMask & (1 << lir::RegisterOperand)) {
*bTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
if (aRegisterMask & FloatRegisterMask) {
*bRegisterMask = FloatRegisterMask;
} else if (aMask.typeMask & (1 << lir::RegisterOperand)) {
bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
if (aMask.registerMask & FloatRegisterMask) {
bMask.registerMask = FloatRegisterMask;
} else {
*bRegisterMask = GeneralRegisterMask
bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
}
} else {
*bTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
}
if (TargetBytesPerWord == 4) {
if (aSize == 4 and bSize == 8) {
*bRegisterMask = (static_cast<uint64_t>(1) << (rdx + 32))
bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32))
| (static_cast<uint64_t>(1) << rax);
} else if (aSize == 1 or bSize == 1) {
const uint32_t mask
= (1 << rax) | (1 << rcx) | (1 << rdx) | (1 << rbx);
*bRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask;
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
}
}
break;
@ -3190,44 +3187,44 @@ class MyArchitecture: public Assembler::Architecture {
}
virtual void planMove
(unsigned size, uint8_t* srcTypeMask, uint64_t* srcRegisterMask,
uint8_t* tmpTypeMask, uint64_t* tmpRegisterMask,
uint8_t dstTypeMask, uint64_t dstRegisterMask)
(unsigned size, OperandMask& srcMask,
OperandMask& tmpMask,
const OperandMask& dstMask)
{
*srcTypeMask = ~0;
*srcRegisterMask = ~static_cast<uint64_t>(0);
srcMask.typeMask = ~0;
srcMask.registerMask = ~static_cast<uint64_t>(0);
*tmpTypeMask = 0;
*tmpRegisterMask = 0;
tmpMask.typeMask = 0;
tmpMask.registerMask = 0;
if (dstTypeMask & (1 << lir::MemoryOperand)) {
if (dstMask.typeMask & (1 << lir::MemoryOperand)) {
// can't move directly from memory to memory
*srcTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*tmpTypeMask = 1 << lir::RegisterOperand;
*tmpRegisterMask = GeneralRegisterMask
srcMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
tmpMask.typeMask = 1 << lir::RegisterOperand;
tmpMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
} else if (dstTypeMask & (1 << lir::RegisterOperand)) {
} else if (dstMask.typeMask & (1 << lir::RegisterOperand)) {
if (size > TargetBytesPerWord) {
// can't move directly from FPR to GPR or vice-versa for
// values larger than the GPR size
if (dstRegisterMask & FloatRegisterMask) {
*srcRegisterMask = FloatRegisterMask
if (dstMask.registerMask & FloatRegisterMask) {
srcMask.registerMask = FloatRegisterMask
| (static_cast<uint64_t>(FloatRegisterMask) << 32);
*tmpTypeMask = 1 << lir::MemoryOperand;
} else if (dstRegisterMask & GeneralRegisterMask) {
*srcRegisterMask = GeneralRegisterMask
tmpMask.typeMask = 1 << lir::MemoryOperand;
} else if (dstMask.registerMask & GeneralRegisterMask) {
srcMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*tmpTypeMask = 1 << lir::MemoryOperand;
tmpMask.typeMask = 1 << lir::MemoryOperand;
}
}
if (dstRegisterMask & FloatRegisterMask) {
if (dstMask.registerMask & FloatRegisterMask) {
// can't move directly from constant to FPR
*srcTypeMask &= ~(1 << lir::ConstantOperand);
srcMask.typeMask &= ~(1 << lir::ConstantOperand);
if (size > TargetBytesPerWord) {
*tmpTypeMask = 1 << lir::MemoryOperand;
tmpMask.typeMask = 1 << lir::MemoryOperand;
} else {
*tmpTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*tmpRegisterMask = GeneralRegisterMask
tmpMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
tmpMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
}
}
@ -3236,16 +3233,16 @@ class MyArchitecture: public Assembler::Architecture {
virtual void planSource
(lir::TernaryOperation op,
unsigned aSize, uint8_t *aTypeMask, uint64_t *aRegisterMask,
unsigned bSize, uint8_t* bTypeMask, uint64_t* bRegisterMask,
unsigned aSize, OperandMask& aMask,
unsigned bSize, OperandMask& bMask,
unsigned, bool* thunk)
{
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
*aRegisterMask = GeneralRegisterMask
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand);
aMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*bTypeMask = (1 << lir::RegisterOperand);
*bRegisterMask = GeneralRegisterMask
bMask.typeMask = (1 << lir::RegisterOperand);
bMask.registerMask = GeneralRegisterMask
| (static_cast<uint64_t>(GeneralRegisterMask) << 32);
*thunk = false;
@ -3256,14 +3253,14 @@ class MyArchitecture: public Assembler::Architecture {
case lir::FloatMultiply:
case lir::FloatDivide:
if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
*bTypeMask = (1 << lir::RegisterOperand);
aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand);
bMask.typeMask = (1 << lir::RegisterOperand);
const uint64_t mask
= (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
*aRegisterMask = mask;
*bRegisterMask = mask;
aMask.registerMask = mask;
bMask.registerMask = mask;
} else {
*thunk = true;
}
@ -3276,11 +3273,11 @@ class MyArchitecture: public Assembler::Architecture {
case lir::Multiply:
if (TargetBytesPerWord == 4 and aSize == 8) {
const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
*aRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask;
*bRegisterMask = (static_cast<uint64_t>(1) << (rdx + 32)) | mask;
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
bMask.registerMask = (static_cast<uint64_t>(1) << (rdx + 32)) | mask;
} else {
*aRegisterMask = GeneralRegisterMask;
*bRegisterMask = GeneralRegisterMask;
aMask.registerMask = GeneralRegisterMask;
bMask.registerMask = GeneralRegisterMask;
}
break;
@ -3288,9 +3285,9 @@ class MyArchitecture: public Assembler::Architecture {
if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true;
} else {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
*bRegisterMask = 1 << rax;
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
bMask.registerMask = 1 << rax;
}
break;
@ -3298,9 +3295,9 @@ class MyArchitecture: public Assembler::Architecture {
if (TargetBytesPerWord == 4 and aSize == 8) {
*thunk = true;
} else {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
*bRegisterMask = 1 << rax;
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
bMask.registerMask = 1 << rax;
}
break;
@ -3309,13 +3306,13 @@ class MyArchitecture: public Assembler::Architecture {
case lir::UnsignedShiftRight: {
if (TargetBytesPerWord == 4 and bSize == 8) {
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx);
*aRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask;
*bRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask;
aMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
} else {
*aRegisterMask = (static_cast<uint64_t>(GeneralRegisterMask) << 32)
aMask.registerMask = (static_cast<uint64_t>(GeneralRegisterMask) << 32)
| (static_cast<uint64_t>(1) << rcx);
const uint32_t mask = GeneralRegisterMask & ~(1 << rcx);
*bRegisterMask = (static_cast<uint64_t>(mask) << 32) | mask;
bMask.registerMask = (static_cast<uint64_t>(mask) << 32) | mask;
}
} break;
@ -3330,11 +3327,11 @@ class MyArchitecture: public Assembler::Architecture {
case lir::JumpIfFloatLessOrEqualOrUnordered:
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
if (useSSE(&c)) {
*aTypeMask = (1 << lir::RegisterOperand);
*aRegisterMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
aMask.typeMask = (1 << lir::RegisterOperand);
aMask.registerMask = (static_cast<uint64_t>(FloatRegisterMask) << 32)
| FloatRegisterMask;
*bTypeMask = *aTypeMask;
*bRegisterMask = *aRegisterMask;
bMask.typeMask = aMask.typeMask;
bMask.registerMask = aMask.registerMask;
} else {
*thunk = true;
}
@ -3346,16 +3343,17 @@ class MyArchitecture: public Assembler::Architecture {
}
virtual void planDestination
(lir::TernaryOperation op, unsigned, uint8_t, uint64_t, unsigned, uint8_t,
uint64_t bRegisterMask, unsigned, uint8_t* cTypeMask,
uint64_t* cRegisterMask)
(lir::TernaryOperation op,
unsigned, const OperandMask&,
unsigned, const OperandMask& bMask,
unsigned, OperandMask& cMask)
{
if (isBranch(op)) {
*cTypeMask = (1 << lir::ConstantOperand);
*cRegisterMask = 0;
cMask.typeMask = (1 << lir::ConstantOperand);
cMask.registerMask = 0;
} else {
*cTypeMask = (1 << lir::RegisterOperand);
*cRegisterMask = bRegisterMask;
cMask.typeMask = (1 << lir::RegisterOperand);
cMask.registerMask = bMask.registerMask;
}
}

View File

@ -84,12 +84,11 @@ public:
for(int op = (int)lir::Call; op < (int)lir::AlignedJump; op++) {
bool thunk;
uint8_t typeMask;
uint64_t registerMask;
env.arch->plan((lir::UnaryOperation)op, vm::TargetBytesPerWord, &typeMask, &registerMask, &thunk);
OperandMask mask;
env.arch->plan((lir::UnaryOperation)op, vm::TargetBytesPerWord, mask, &thunk);
assertFalse(thunk);
assertNotEqual(static_cast<uint8_t>(0), typeMask);
assertNotEqual(static_cast<uint64_t>(0), registerMask);
assertNotEqual(static_cast<uint8_t>(0), mask.typeMask);
assertNotEqual(static_cast<uint64_t>(0), mask.registerMask);
}
}