diff --git a/include/avian/codegen/assembler.h b/include/avian/codegen/assembler.h index 4331c11465..25707f4853 100644 --- a/include/avian/codegen/assembler.h +++ b/include/avian/codegen/assembler.h @@ -26,11 +26,11 @@ class Architecture; class OperandInfo { public: const unsigned size; - const lir::OperandType type; + const lir::Operand::Type type; lir::Operand* const operand; inline OperandInfo(unsigned size, - lir::OperandType type, + lir::Operand::Type type, lir::Operand* operand) : size(size), type(type), operand(operand) { diff --git a/include/avian/codegen/lir.h b/include/avian/codegen/lir.h index 251e31aadd..91b48fb96d 100644 --- a/include/avian/codegen/lir.h +++ b/include/avian/codegen/lir.h @@ -79,17 +79,8 @@ const unsigned NonBranchTernaryOperationCount = FloatMin + 1; const unsigned BranchOperationCount = JumpIfFloatGreaterOrEqualOrUnordered - FloatMin; -enum OperandType { - ConstantOperand, - AddressOperand, - RegisterOperand, - MemoryOperand -}; - enum ValueType { ValueGeneral, ValueFloat }; -const unsigned OperandTypeCount = MemoryOperand + 1; - const int NoRegister = -1; inline bool isBranch(lir::TernaryOperation op) @@ -128,6 +119,16 @@ inline bool isFloatUnaryOp(lir::BinaryOperation op) } class Operand { +public: + + enum class Type { + Constant, + Address, + RegisterPair, + Memory + }; + + const static unsigned TypeCount = (unsigned)Type::Memory + 1; }; class Constant : public Operand { diff --git a/src/codegen/compiler.cpp b/src/codegen/compiler.cpp index 32047d52fc..1e367497d5 100644 --- a/src/codegen/compiler.cpp +++ b/src/codegen/compiler.cpp @@ -256,7 +256,7 @@ Site* pickTargetSite(Context* c, expect(c, target.cost < Target::Impossible); - if (target.type == lir::MemoryOperand) { + if (target.type == lir::Operand::Type::Memory) { return frameSite(c, target.index); } else { return registerSite(c, target.index); @@ -369,7 +369,7 @@ Site* maybeMove(Context* c, size, src, tmp, - OperandMask(1 << dstSite->type(c), dstSite->registerMask(c), 0)); + OperandMask(1 << (unsigned)dstSite->type(c), dstSite->registerMask(c), 0)); SiteMask srcMask = SiteMask::lowPart(src); unsigned cost = 0xFFFFFFFF; @@ -514,14 +514,14 @@ void steal(Context* c, Resource* r, Value* thief) SiteMask generalRegisterMask(Context* c) { - return SiteMask(1 << lir::RegisterOperand, + return SiteMask(1 << (unsigned)lir::Operand::Type::RegisterPair, c->regFile->generalRegisters.mask, NoFrameIndex); } SiteMask generalRegisterOrConstantMask(Context* c) { - return SiteMask((1 << lir::RegisterOperand) | (1 << lir::ConstantOperand), + return SiteMask((1 << (unsigned)lir::Operand::Type::RegisterPair) | (1 << (unsigned)lir::Operand::Type::Constant), c->regFile->generalRegisters.mask, NoFrameIndex); } @@ -616,11 +616,11 @@ bool isHome(Value* v, int frameIndex) bool acceptForResolve(Context* c, Site* s, Read* read, const SiteMask& mask) { if (acceptMatch(c, s, read, mask) and (not s->frozen(c))) { - if (s->type(c) == lir::RegisterOperand) { + if (s->type(c) == lir::Operand::Type::RegisterPair) { return c->availableGeneralRegisterCount > ResolveRegisterReserveCount; } else { assertT(c, - s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))); + s->match(c, SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, AnyFrameIndex))); return isHome(read->value, offsetToFrameIndex(c, static_cast(s)->offset)); @@ -698,7 +698,7 @@ void apply(Context* c, { assertT(c, s1Low->type(c) == s1High->type(c)); - lir::OperandType s1Type = s1Low->type(c); + lir::Operand::Type s1Type = s1Low->type(c); OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union); @@ -717,11 +717,11 @@ void apply(Context* c, assertT(c, s1Low->type(c) == s1High->type(c)); assertT(c, s2Low->type(c) == s2High->type(c)); - lir::OperandType s1Type = s1Low->type(c); + lir::Operand::Type s1Type = s1Low->type(c); OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union); - lir::OperandType s2Type = s2Low->type(c); + lir::Operand::Type s2Type = s2Low->type(c); OperandUnion s2Union; asAssemblerOperand(c, s2Low, s2High, &s2Union); @@ -746,15 +746,15 @@ void apply(Context* c, assertT(c, s2Low->type(c) == s2High->type(c)); assertT(c, s3Low->type(c) == s3High->type(c)); - lir::OperandType s1Type = s1Low->type(c); + lir::Operand::Type s1Type = s1Low->type(c); OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union); - lir::OperandType s2Type = s2Low->type(c); + lir::Operand::Type s2Type = s2Low->type(c); OperandUnion s2Union; asAssemblerOperand(c, s2Low, s2High, &s2Union); - lir::OperandType s3Type = s3Low->type(c); + lir::Operand::Type s3Type = s3Low->type(c); OperandUnion s3Union; asAssemblerOperand(c, s3Low, s3High, &s3Union); @@ -782,7 +782,7 @@ void saveLocals(Context* c, Event* e) e->addRead( c, local->value, - SiteMask(1 << lir::MemoryOperand, 0, compiler::frameIndex(c, li))); + SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, compiler::frameIndex(c, li))); } } } @@ -815,10 +815,10 @@ void maybeMove(Context* c, if (cost) { // todo: let c->arch->planMove decide this: - bool useTemporary = ((target->type(c) == lir::MemoryOperand - and srcValue->source->type(c) == lir::MemoryOperand) + bool useTemporary = ((target->type(c) == lir::Operand::Type::Memory + and srcValue->source->type(c) == lir::Operand::Type::Memory) or (srcSelectSize < dstSize - and target->type(c) != lir::RegisterOperand)); + and target->type(c) != lir::Operand::Type::RegisterPair)); srcValue->source->freeze(c, srcValue); @@ -827,7 +827,7 @@ void maybeMove(Context* c, srcValue->source->thaw(c, srcValue); bool addOffset = srcSize != srcSelectSize and c->arch->bigEndian() - and srcValue->source->type(c) == lir::MemoryOperand; + and srcValue->source->type(c) == lir::Operand::Type::Memory; if (addOffset) { static_cast(srcValue->source)->offset @@ -878,7 +878,7 @@ void maybeMove(Context* c, } assertT(c, thunk == 0); - assertT(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand)); + assertT(c, dstMask.typeMask & src.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair)); Site* tmpTarget = freeRegisterSite(c, dstMask.registerMask & src.lowRegisterMask); @@ -1635,7 +1635,7 @@ bool resolveSourceSites(Context* c, Read* r = live(c, v); if (r and sites[el.localIndex] == 0) { - SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand), + SiteMask mask((1 << (unsigned)lir::Operand::Type::RegisterPair) | (1 << (unsigned)lir::Operand::Type::Memory), c->regFile->generalRegisters.mask, AnyFrameIndex); @@ -1677,7 +1677,7 @@ void resolveTargetSites(Context* c, Read* r = live(c, v); if (r and sites[el.localIndex] == 0) { - SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand), + SiteMask mask((1 << (unsigned)lir::Operand::Type::RegisterPair) | (1 << (unsigned)lir::Operand::Type::Memory), c->regFile->generalRegisters.mask, AnyFrameIndex); diff --git a/src/codegen/compiler/event.cpp b/src/codegen/compiler/event.cpp index 789d470f44..d5b6a67dcf 100644 --- a/src/codegen/compiler/event.cpp +++ b/src/codegen/compiler/event.cpp @@ -415,7 +415,7 @@ class CallEvent : public Event { fprintf(stderr, "stack %d arg read %p\n", frameIndex, v); } - targetMask = SiteMask(1 << lir::MemoryOperand, 0, frameIndex); + targetMask = SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, frameIndex); } this->addRead(c, v, targetMask); @@ -512,7 +512,7 @@ class CallEvent : public Event { this->addRead(c, v, generalRegisterMask(c)); } else { this->addRead( - c, v, SiteMask(1 << lir::MemoryOperand, 0, frameIndex)); + c, v, SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, frameIndex)); } } } @@ -544,7 +544,7 @@ class CallEvent : public Event { this->addRead(c, stack->value, - SiteMask(1 << lir::MemoryOperand, 0, logicalIndex)); + SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, logicalIndex)); } stack = stack->next; @@ -581,11 +581,11 @@ class CallEvent : public Event { assertT( c, returnAddressSurrogate == 0 - or returnAddressSurrogate->source->type(c) == lir::RegisterOperand); + or returnAddressSurrogate->source->type(c) == lir::Operand::Type::RegisterPair); assertT( c, framePointerSurrogate == 0 - or framePointerSurrogate->source->type(c) == lir::RegisterOperand); + or framePointerSurrogate->source->type(c) == lir::Operand::Type::RegisterPair); int ras; if (returnAddressSurrogate) { @@ -783,7 +783,7 @@ class MoveEvent : public Event { op, srcSelectSize, OperandMask( - 1 << srcValue->source->type(c), + 1 << (unsigned)srcValue->source->type(c), srcValue->source->registerMask(c), srcValue->nextWord->source->registerMask(c)), dstSize, @@ -866,7 +866,7 @@ class MoveEvent : public Event { assertT(c, srcSelectSize == c->targetInfo.pointerSize); if (dstValue->nextWord->target or live(c, dstValue->nextWord)) { - assertT(c, dstLowMask.typeMask & (1 << lir::RegisterOperand)); + assertT(c, dstLowMask.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair)); Site* low = freeRegisterSite(c, dstLowMask.registerMask); @@ -897,7 +897,7 @@ class MoveEvent : public Event { srcValue->source->thaw(c, srcValue); - assertT(c, dstHighMask.typeMask & (1 << lir::RegisterOperand)); + assertT(c, dstHighMask.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair)); Site* high = freeRegisterSite(c, dstHighMask.registerMask); @@ -1126,12 +1126,12 @@ class CombineEvent : public Event { op, firstValue->type.size(c->targetInfo), OperandMask( - 1 << firstValue->source->type(c), + 1 << (unsigned)firstValue->source->type(c), firstValue->source->registerMask(c), firstValue->nextWord->source->registerMask(c)), secondValue->type.size(c->targetInfo), OperandMask( - 1 << secondValue->source->type(c), + 1 << (unsigned)secondValue->source->type(c), secondValue->source->registerMask(c), secondValue->nextWord->source->registerMask(c)), resultValue->type.size(c->targetInfo), @@ -1314,7 +1314,7 @@ class TranslateEvent : public Event { op, firstValue->type.size(c->targetInfo), OperandMask( - 1 << firstValue->source->type(c), + 1 << (unsigned)firstValue->source->type(c), firstValue->source->registerMask(c), firstValue->nextWord->source->registerMask(c)), resultValue->type.size(c->targetInfo), @@ -1451,7 +1451,7 @@ ConstantSite* findConstantSite(Context* c, Value* v) { for (SiteIterator it(c, v); it.hasMore();) { Site* s = it.next(); - if (s->type(c) == lir::ConstantOperand) { + if (s->type(c) == lir::Operand::Type::Constant) { return static_cast(s); } } @@ -1461,7 +1461,7 @@ ConstantSite* findConstantSite(Context* c, Value* v) void moveIfConflict(Context* c, Value* v, MemorySite* s) { if (v->reads) { - SiteMask mask(1 << lir::RegisterOperand, ~0, AnyFrameIndex); + SiteMask mask(1 << (unsigned)lir::Operand::Type::RegisterPair, ~0, AnyFrameIndex); v->reads->intersect(&mask); if (s->conflicts(mask)) { maybeMove(c, v->reads, true, false); @@ -1509,13 +1509,13 @@ class MemoryEvent : public Event { displacement += (constant->value->value() * scale); scale = 1; } else { - assertT(c, index->source->type(c) == lir::RegisterOperand); + assertT(c, index->source->type(c) == lir::Operand::Type::RegisterPair); indexRegister = static_cast(index->source)->number; } } else { indexRegister = lir::NoRegister; } - assertT(c, base->source->type(c) == lir::RegisterOperand); + assertT(c, base->source->type(c) == lir::Operand::Type::RegisterPair); int baseRegister = static_cast(base->source)->number; popRead(c, this, base); @@ -1873,12 +1873,12 @@ void clean(Context* c, Value* v, unsigned popIndex) { for (SiteIterator it(c, v); it.hasMore();) { Site* s = it.next(); - if (not(s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex)) + if (not(s->match(c, SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, AnyFrameIndex)) and offsetToFrameIndex(c, static_cast(s)->offset) >= popIndex)) { if (false and s->match(c, - SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))) { + SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, AnyFrameIndex))) { char buffer[256]; s->toString(c, buffer, 256); fprintf(stderr, @@ -2010,7 +2010,7 @@ class BoundsCheckEvent : public Event { lir::Constant handlerConstant(resolvedPromise(c, handler)); a->apply(lir::Call, OperandInfo(c->targetInfo.pointerSize, - lir::ConstantOperand, + lir::Operand::Type::Constant, &handlerConstant)); } } else { @@ -2032,7 +2032,7 @@ class BoundsCheckEvent : public Event { } if (constant == 0 or constant->value->value() >= 0) { - assertT(c, object->source->type(c) == lir::RegisterOperand); + assertT(c, object->source->type(c) == lir::Operand::Type::RegisterPair); MemorySite length(static_cast(object->source)->number, lengthOffset, lir::NoRegister, @@ -2066,7 +2066,7 @@ class BoundsCheckEvent : public Event { lir::Constant handlerConstant(resolvedPromise(c, handler)); a->apply(lir::Call, OperandInfo(c->targetInfo.pointerSize, - lir::ConstantOperand, + lir::Operand::Type::Constant, &handlerConstant)); nextPromise->offset = a->offset(); diff --git a/src/codegen/compiler/read.cpp b/src/codegen/compiler/read.cpp index 0c7d09fd76..d9909ed343 100644 --- a/src/codegen/compiler/read.cpp +++ b/src/codegen/compiler/read.cpp @@ -205,7 +205,7 @@ Read* StubRead::next(Context*) SingleRead* read(Context* c, const SiteMask& mask, Value* successor) { assertT(c, - (mask.typeMask != 1 << lir::MemoryOperand) or mask.frameIndex >= 0); + (mask.typeMask != 1 << (unsigned)lir::Operand::Type::Memory) or mask.frameIndex >= 0); return new (c->zone) SingleRead(mask, successor); } diff --git a/src/codegen/compiler/regalloc.cpp b/src/codegen/compiler/regalloc.cpp index b75162405d..0c60ae0d0d 100644 --- a/src/codegen/compiler/regalloc.cpp +++ b/src/codegen/compiler/regalloc.cpp @@ -71,7 +71,7 @@ bool pickRegisterTarget(Context* c, c, v, r, - SiteMask(1 << lir::RegisterOperand, 1 << i, NoFrameIndex), + SiteMask(1 << (unsigned)lir::Operand::Type::RegisterPair, 1 << i, NoFrameIndex), costCalculator) + Target::MinimumRegisterCost; if (mask.containsExactly(i)) { @@ -124,7 +124,7 @@ Target pickRegisterTarget(Context* c, { unsigned cost; Register number = pickRegisterTarget(c, v, mask, &cost, costCalculator); - return Target(number, lir::RegisterOperand, cost); + return Target(number, lir::Operand::Type::RegisterPair, cost); } unsigned frameCost(Context* c, @@ -135,7 +135,7 @@ unsigned frameCost(Context* c, return resourceCost(c, v, c->frameResources + frameIndex, - SiteMask(1 << lir::MemoryOperand, 0, frameIndex), + SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, frameIndex), costCalculator) + Target::MinimumFrameCost; } @@ -147,7 +147,7 @@ Target pickFrameTarget(Context* c, Value* v, CostCalculator* costCalculator) do { if (p->home >= 0) { Target mine(p->home, - lir::MemoryOperand, + lir::Operand::Type::Memory, frameCost(c, v, p->home, costCalculator)); if (mine.cost == Target::MinimumFrameCost) { @@ -168,7 +168,7 @@ Target pickAnyFrameTarget(Context* c, Value* v, CostCalculator* costCalculator) unsigned count = totalFrameSize(c); for (unsigned i = 0; i < count; ++i) { - Target mine(i, lir::MemoryOperand, frameCost(c, v, i, costCalculator)); + Target mine(i, lir::Operand::Type::Memory, frameCost(c, v, i, costCalculator)); if (mine.cost == Target::MinimumFrameCost) { return mine; } else if (mine.cost < best.cost) { @@ -186,7 +186,7 @@ Target pickTarget(Context* c, Target best, CostCalculator* costCalculator) { - if (mask.typeMask & (1 << lir::RegisterOperand)) { + if (mask.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair)) { Target mine = pickRegisterTarget(c, value, mask.registerMask, costCalculator); @@ -198,10 +198,10 @@ Target pickTarget(Context* c, } } - if (mask.typeMask & (1 << lir::MemoryOperand)) { + if (mask.typeMask & (1 << (unsigned)lir::Operand::Type::Memory)) { if (mask.frameIndex >= 0) { Target mine(mask.frameIndex, - lir::MemoryOperand, + lir::Operand::Type::Memory, frameCost(c, value, mask.frameIndex, costCalculator)); if (mine.cost == Target::MinimumFrameCost) { return mine; diff --git a/src/codegen/compiler/regalloc.h b/src/codegen/compiler/regalloc.h index e512e63183..0dab9d1e3d 100644 --- a/src/codegen/compiler/regalloc.h +++ b/src/codegen/compiler/regalloc.h @@ -55,13 +55,13 @@ class Target { { } - Target(Register index, lir::OperandType type, unsigned cost) + Target(Register index, lir::Operand::Type type, unsigned cost) : index(index), type(type), cost(cost) { } int16_t index; - lir::OperandType type; + lir::Operand::Type type; uint8_t cost; }; diff --git a/src/codegen/compiler/site.cpp b/src/codegen/compiler/site.cpp index 1f83991a4a..99ce5fb154 100644 --- a/src/codegen/compiler/site.cpp +++ b/src/codegen/compiler/site.cpp @@ -152,7 +152,7 @@ class AddressSite : public Site { virtual bool match(Context*, const SiteMask& mask) { - return mask.typeMask & (1 << lir::AddressOperand); + return mask.typeMask & (1 << (unsigned)lir::Operand::Type::Address); } virtual bool loneMatch(Context*, const SiteMask&) @@ -165,9 +165,9 @@ class AddressSite : public Site { abort(c); } - virtual lir::OperandType type(Context*) + virtual lir::Operand::Type type(Context*) { - return lir::AddressOperand; + return lir::Operand::Type::Address; } virtual void asAssemblerOperand(Context* c UNUSED, @@ -201,7 +201,7 @@ class AddressSite : public Site { virtual SiteMask mask(Context*) { - return SiteMask(1 << lir::AddressOperand, 0, NoFrameIndex); + return SiteMask(1 << (unsigned)lir::Operand::Type::Address, 0, NoFrameIndex); } virtual SiteMask nextWordMask(Context* c, unsigned) @@ -237,7 +237,7 @@ unsigned RegisterSite::copyCost(Context* c, Site* s) assertT(c, number != lir::NoRegister); if (s and (this == s - or (s->type(c) == lir::RegisterOperand + or (s->type(c) == lir::Operand::Type::RegisterPair and (static_cast(s)->mask_.contains(number))))) { return 0; } else { @@ -249,7 +249,7 @@ bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask) { assertT(c, number != lir::NoRegister); - if ((mask.typeMask & (1 << lir::RegisterOperand))) { + if ((mask.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair))) { return mask.registerMask.contains(number); } else { return false; @@ -260,7 +260,7 @@ bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask) { assertT(c, number != lir::NoRegister); - if ((mask.typeMask & (1 << lir::RegisterOperand))) { + if ((mask.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair))) { return mask.registerMask.containsExactly(number); } else { return false; @@ -271,7 +271,7 @@ bool RegisterSite::matchNextWord(Context* c, Site* s, unsigned) { assertT(c, number != lir::NoRegister); - if (s->type(c) != lir::RegisterOperand) { + if (s->type(c) != lir::Operand::Type::RegisterPair) { return false; } @@ -290,7 +290,7 @@ void RegisterSite::acquire(Context* c, Value* v) { Target target; if (number != lir::NoRegister) { - target = Target(number, lir::RegisterOperand, 0); + target = Target(number, lir::Operand::Type::RegisterPair, 0); } else { target = pickRegisterTarget(c, v, mask_); expect(c, target.cost < Target::Impossible); @@ -330,9 +330,9 @@ bool RegisterSite::frozen(Context* c UNUSED) return c->registerResources[number].freezeCount != 0; } -lir::OperandType RegisterSite::type(Context*) +lir::Operand::Type RegisterSite::type(Context*) { - return lir::RegisterOperand; + return lir::Operand::Type::RegisterPair; } void RegisterSite::asAssemblerOperand(Context* c UNUSED, @@ -385,7 +385,7 @@ Site* RegisterSite::makeNextWord(Context* c, unsigned) SiteMask RegisterSite::mask(Context* c UNUSED) { - return SiteMask(1 << lir::RegisterOperand, mask_, NoFrameIndex); + return SiteMask(1 << (unsigned)lir::Operand::Type::RegisterPair, mask_, NoFrameIndex); } SiteMask RegisterSite::nextWordMask(Context* c, unsigned) @@ -393,9 +393,9 @@ SiteMask RegisterSite::nextWordMask(Context* c, unsigned) assertT(c, number != lir::NoRegister); if (registerSize(c) > c->targetInfo.pointerSize) { - return SiteMask(1 << lir::RegisterOperand, number, NoFrameIndex); + return SiteMask(1 << (unsigned)lir::Operand::Type::RegisterPair, number, NoFrameIndex); } else { - return SiteMask(1 << lir::RegisterOperand, + return SiteMask(1 << (unsigned)lir::Operand::Type::RegisterPair, c->regFile->generalRegisters.mask, NoFrameIndex); } @@ -453,7 +453,7 @@ unsigned MemorySite::copyCost(Context* c, Site* s) { assertT(c, acquired); - if (s and (this == s or (s->type(c) == lir::MemoryOperand + if (s and (this == s or (s->type(c) == lir::Operand::Type::Memory and static_cast(s)->base == base and static_cast(s)->offset == offset and static_cast(s)->index == index @@ -466,7 +466,7 @@ unsigned MemorySite::copyCost(Context* c, Site* s) bool MemorySite::conflicts(const SiteMask& mask) { - return (mask.typeMask & (1 << lir::RegisterOperand)) != 0 + return (mask.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair)) != 0 and (!mask.registerMask.contains(base) or (index != lir::NoRegister and !mask.registerMask.contains(index))); @@ -476,7 +476,7 @@ bool MemorySite::match(Context* c, const SiteMask& mask) { assertT(c, acquired); - if (mask.typeMask & (1 << lir::MemoryOperand)) { + if (mask.typeMask & (1 << (unsigned)lir::Operand::Type::Memory)) { if (mask.frameIndex >= 0) { if (base == c->arch->stack()) { assertT(c, index == lir::NoRegister); @@ -497,7 +497,7 @@ bool MemorySite::loneMatch(Context* c, const SiteMask& mask) { assertT(c, acquired); - if (mask.typeMask & (1 << lir::MemoryOperand)) { + if (mask.typeMask & (1 << (unsigned)lir::Operand::Type::Memory)) { if (base == c->arch->stack()) { assertT(c, index == lir::NoRegister); @@ -513,7 +513,7 @@ bool MemorySite::loneMatch(Context* c, const SiteMask& mask) bool MemorySite::matchNextWord(Context* c, Site* s, unsigned index) { - if (s->type(c) == lir::MemoryOperand) { + if (s->type(c) == lir::Operand::Type::Memory) { MemorySite* ms = static_cast(s); return ms->base == this->base and ((index == 1 @@ -596,9 +596,9 @@ bool MemorySite::frozen(Context* c) and c->frameResources[offsetToFrameIndex(c, offset)].freezeCount != 0; } -lir::OperandType MemorySite::type(Context*) +lir::Operand::Type MemorySite::type(Context*) { - return lir::MemoryOperand; + return lir::Operand::Type::Memory; } void MemorySite::asAssemblerOperand(Context* c UNUSED, @@ -657,7 +657,7 @@ Site* MemorySite::makeNextWord(Context* c, unsigned index) SiteMask MemorySite::mask(Context* c) { - return SiteMask(1 << lir::MemoryOperand, + return SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, (base == c->arch->stack()) ? static_cast(offsetToFrameIndex(c, offset)) @@ -674,7 +674,7 @@ SiteMask MemorySite::nextWordMask(Context* c, unsigned index) } else { frameIndex = NoFrameIndex; } - return SiteMask(1 << lir::MemoryOperand, 0, frameIndex); + return SiteMask(1 << (unsigned)lir::Operand::Type::Memory, 0, frameIndex); } bool MemorySite::isVolatile(Context* c) diff --git a/src/codegen/compiler/site.h b/src/codegen/compiler/site.h index 6d549c116e..6344936e0c 100644 --- a/src/codegen/compiler/site.h +++ b/src/codegen/compiler/site.h @@ -43,7 +43,7 @@ class SiteMask { static SiteMask fixedRegisterMask(int number) { - return SiteMask(1 << lir::RegisterOperand, 1 << number, NoFrameIndex); + return SiteMask(1 << (unsigned)lir::Operand::Type::RegisterPair, 1 << number, NoFrameIndex); } static SiteMask lowPart(const OperandMask& mask) @@ -103,7 +103,7 @@ class Site { return false; } - virtual lir::OperandType type(Context*) = 0; + virtual lir::Operand::Type type(Context*) = 0; virtual void asAssemblerOperand(Context*, Site*, lir::Operand*) = 0; @@ -187,7 +187,7 @@ class ConstantSite : public Site { virtual bool match(Context*, const SiteMask& mask) { - return mask.typeMask & (1 << lir::ConstantOperand); + return mask.typeMask & (1 << (unsigned)lir::Operand::Type::Constant); } virtual bool loneMatch(Context*, const SiteMask&) @@ -197,12 +197,12 @@ class ConstantSite : public Site { virtual bool matchNextWord(Context* c, Site* s, unsigned) { - return s->type(c) == lir::ConstantOperand; + return s->type(c) == lir::Operand::Type::Constant; } - virtual lir::OperandType type(Context*) + virtual lir::Operand::Type type(Context*) { - return lir::ConstantOperand; + return lir::Operand::Type::Constant; } virtual void asAssemblerOperand(Context* c, Site* high, lir::Operand* result) @@ -236,12 +236,12 @@ class ConstantSite : public Site { virtual SiteMask mask(Context*) { - return SiteMask(1 << lir::ConstantOperand, 0, NoFrameIndex); + return SiteMask(1 << (unsigned)lir::Operand::Type::Constant, 0, NoFrameIndex); } virtual SiteMask nextWordMask(Context*, unsigned) { - return SiteMask(1 << lir::ConstantOperand, 0, NoFrameIndex); + return SiteMask(1 << (unsigned)lir::Operand::Type::Constant, 0, NoFrameIndex); } Promise* value; @@ -273,7 +273,7 @@ class RegisterSite : public Site { virtual bool frozen(Context* c UNUSED); - virtual lir::OperandType type(Context*); + virtual lir::Operand::Type type(Context*); virtual void asAssemblerOperand(Context* c UNUSED, Site* high, @@ -328,7 +328,7 @@ class MemorySite : public Site { virtual bool frozen(Context* c); - virtual lir::OperandType type(Context*); + virtual lir::Operand::Type type(Context*); virtual void asAssemblerOperand(Context* c UNUSED, Site* high UNUSED, diff --git a/src/codegen/target/arm/assembler.cpp b/src/codegen/target/arm/assembler.cpp index 84e7d8d3cc..c383aea819 100644 --- a/src/codegen/target/arm/assembler.cpp +++ b/src/codegen/target/arm/assembler.cpp @@ -396,7 +396,7 @@ class MyArchitecture : public Architecture { OperandMask& aMask, bool* thunk) { - aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) | (1 << (unsigned)lir::Operand::Type::Constant); aMask.setLowHighRegisterMasks(~static_cast(0), ~static_cast(0)); *thunk = false; } @@ -413,7 +413,7 @@ class MyArchitecture : public Architecture { switch (op) { case lir::Negate: - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK); break; @@ -426,7 +426,7 @@ class MyArchitecture : public Architecture { case lir::FloatNegate: case lir::Float2Float: if (vfpSupported()) { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK); } else { *thunk = true; @@ -439,7 +439,7 @@ class MyArchitecture : public Architecture { // thunks or produce inline machine code which handles edge // cases properly. if (false && vfpSupported() && bSize == 4) { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK); } else { *thunk = true; @@ -448,7 +448,7 @@ class MyArchitecture : public Architecture { case lir::Int2Float: if (vfpSupported() && aSize == 4) { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK); } else { *thunk = true; @@ -466,12 +466,12 @@ class MyArchitecture : public Architecture { unsigned, OperandMask& bMask) { - bMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) | (1 << (unsigned)lir::Operand::Type::Memory); bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK); switch (op) { case lir::Negate: - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK); break; @@ -480,18 +480,18 @@ class MyArchitecture : public Architecture { case lir::FloatNegate: case lir::Float2Float: case lir::Int2Float: - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK); break; case lir::Float2Int: - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK); break; case lir::Move: - if (!(aMask.typeMask & 1 << lir::RegisterOperand)) { - bMask.typeMask = 1 << lir::RegisterOperand; + if (!(aMask.typeMask & 1 << (unsigned)lir::Operand::Type::RegisterPair)) { + bMask.typeMask = 1 << (unsigned)lir::Operand::Type::RegisterPair; } break; @@ -511,15 +511,15 @@ class MyArchitecture : public Architecture { tmpMask.typeMask = 0; tmpMask.setLowHighRegisterMasks(0, 0); - if (dstMask.typeMask & (1 << lir::MemoryOperand)) { + if (dstMask.typeMask & (1 << (unsigned)lir::Operand::Type::Memory)) { // can't move directly from memory or constant to memory - srcMask.typeMask = 1 << lir::RegisterOperand; - tmpMask.typeMask = 1 << lir::RegisterOperand; + srcMask.typeMask = 1 << (unsigned)lir::Operand::Type::RegisterPair; + tmpMask.typeMask = 1 << (unsigned)lir::Operand::Type::RegisterPair; tmpMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK); - } else if (vfpSupported() && dstMask.typeMask & 1 << lir::RegisterOperand + } else if (vfpSupported() && dstMask.typeMask & 1 << (unsigned)lir::Operand::Type::RegisterPair && dstMask.lowRegisterMask & FPR_MASK) { - srcMask.typeMask = tmpMask.typeMask = 1 << lir::RegisterOperand - | 1 << lir::MemoryOperand; + srcMask.typeMask = tmpMask.typeMask = 1 << (unsigned)lir::Operand::Type::RegisterPair + | 1 << (unsigned)lir::Operand::Type::Memory; tmpMask.setLowHighRegisterMasks(~static_cast(0), ~static_cast(0)); } } @@ -532,10 +532,10 @@ class MyArchitecture : public Architecture { unsigned, bool* thunk) { - aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) | (1 << (unsigned)lir::Operand::Type::Constant); aMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK); - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.setLowHighRegisterMasks(GPR_MASK, GPR_MASK); *thunk = false; @@ -545,7 +545,7 @@ class MyArchitecture : public Architecture { case lir::ShiftRight: case lir::UnsignedShiftRight: if (bSize == 8) - aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); break; case lir::Add: @@ -553,7 +553,7 @@ class MyArchitecture : public Architecture { case lir::Or: case lir::Xor: case lir::Multiply: - aMask.typeMask = bMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); break; case lir::Divide: @@ -567,7 +567,7 @@ class MyArchitecture : public Architecture { case lir::FloatMultiply: case lir::FloatDivide: if (vfpSupported()) { - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK); bMask = aMask; } else { @@ -586,7 +586,7 @@ class MyArchitecture : public Architecture { case lir::JumpIfFloatLessOrEqualOrUnordered: case lir::JumpIfFloatGreaterOrEqualOrUnordered: if (vfpSupported()) { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(FPR_MASK, FPR_MASK); bMask = aMask; } else { @@ -608,10 +608,10 @@ class MyArchitecture : public Architecture { OperandMask& cMask) { if (isBranch(op)) { - cMask.typeMask = (1 << lir::ConstantOperand); + cMask.typeMask = (1 << (unsigned)lir::Operand::Type::Constant); cMask.setLowHighRegisterMasks(0, 0); } else { - cMask.typeMask = (1 << lir::RegisterOperand); + cMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); cMask.lowRegisterMask = bMask.lowRegisterMask; cMask.highRegisterMask = bMask.highRegisterMask; } @@ -682,7 +682,7 @@ class MyAssembler : public Assembler { { struct Argument { unsigned size; - lir::OperandType type; + lir::Operand::Type type; lir::Operand* operand; }; RUNTIME_ARRAY(Argument, arguments, argumentCount); @@ -693,7 +693,7 @@ class MyAssembler : public Assembler { for (unsigned i = 0; i < argumentCount; ++i) { RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned); RUNTIME_ARRAY_BODY(arguments)[i].type - = static_cast(va_arg(a, int)); + = static_cast(va_arg(a, int)); RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*); footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord); @@ -713,7 +713,7 @@ class MyAssembler : public Assembler { RUNTIME_ARRAY_BODY(arguments)[i].operand), OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord), - lir::RegisterOperand, + lir::Operand::Type::RegisterPair, &dst)); offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, @@ -727,7 +727,7 @@ class MyAssembler : public Assembler { RUNTIME_ARRAY_BODY(arguments)[i].operand), OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord), - lir::MemoryOperand, + lir::Operand::Type::Memory, &dst)); offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, @@ -888,14 +888,14 @@ class MyAssembler : public Assembler { if (isBranch(op)) { assertT(&con, a.size == b.size); assertT(&con, c.size == TargetBytesPerWord); - assertT(&con, c.type == lir::ConstantOperand); + assertT(&con, c.type == lir::Operand::Type::Constant); arch_->con.branchOperations[branchIndex(&(arch_->con), a.type, b.type)]( &con, op, a.size, a.operand, b.operand, c.operand); } else { assertT(&con, b.size == c.size); - assertT(&con, b.type == lir::RegisterOperand); - assertT(&con, c.type == lir::RegisterOperand); + assertT(&con, b.type == lir::Operand::Type::RegisterPair); + assertT(&con, c.type == lir::Operand::Type::RegisterPair); arch_->con.ternaryOperations[index(&(arch_->con), op, a.type)]( &con, b.size, a.operand, b.operand, c.operand); diff --git a/src/codegen/target/arm/context.h b/src/codegen/target/arm/context.h index 45d937da9b..fde5a54a29 100644 --- a/src/codegen/target/arm/context.h +++ b/src/codegen/target/arm/context.h @@ -85,15 +85,15 @@ class ArchitectureContext { vm::System* s; OperationType operations[lir::OperationCount]; UnaryOperationType - unaryOperations[lir::UnaryOperationCount * lir::OperandTypeCount]; + unaryOperations[lir::UnaryOperationCount * lir::Operand::TypeCount]; BinaryOperationType binaryOperations[lir::BinaryOperationCount - * lir::OperandTypeCount - * lir::OperandTypeCount]; + * lir::Operand::TypeCount + * lir::Operand::TypeCount]; TernaryOperationType ternaryOperations[lir::NonBranchTernaryOperationCount - * lir::OperandTypeCount]; + * lir::Operand::TypeCount]; BranchOperationType branchOperations[lir::BranchOperationCount - * lir::OperandTypeCount - * lir::OperandTypeCount]; + * lir::Operand::TypeCount + * lir::Operand::TypeCount]; }; inline avian::util::Aborter* getAborter(Context* c) diff --git a/src/codegen/target/arm/multimethod.cpp b/src/codegen/target/arm/multimethod.cpp index 0ee2a642bc..c01c934e5b 100644 --- a/src/codegen/target/arm/multimethod.cpp +++ b/src/codegen/target/arm/multimethod.cpp @@ -22,16 +22,16 @@ using namespace util; unsigned index(ArchitectureContext*, lir::BinaryOperation operation, - lir::OperandType operand1, - lir::OperandType operand2) + lir::Operand::Type operand1, + lir::Operand::Type operand2) { return operation + (lir::BinaryOperationCount * operand1) - + (lir::BinaryOperationCount * lir::OperandTypeCount * operand2); + + (lir::BinaryOperationCount * lir::Operand::TypeCount * operand2); } unsigned index(ArchitectureContext* con UNUSED, lir::TernaryOperation operation, - lir::OperandType operand1) + lir::Operand::Type operand1) { assertT(con, not isBranch(operation)); @@ -39,18 +39,18 @@ unsigned index(ArchitectureContext* con UNUSED, } unsigned branchIndex(ArchitectureContext* con UNUSED, - lir::OperandType operand1, - lir::OperandType operand2) + lir::Operand::Type operand1, + lir::Operand::Type operand2) { - return operand1 + (lir::OperandTypeCount * operand2); + return operand1 + (lir::Operand::TypeCount * operand2); } void populateTables(ArchitectureContext* con) { - const lir::OperandType C = lir::ConstantOperand; - const lir::OperandType A = lir::AddressOperand; - const lir::OperandType R = lir::RegisterOperand; - const lir::OperandType M = lir::MemoryOperand; + const lir::Operand::Type C = lir::Operand::Type::Constant; + const lir::Operand::Type A = lir::Operand::Type::Address; + const lir::Operand::Type R = lir::Operand::Type::RegisterPair; + const lir::Operand::Type M = lir::Operand::Type::Memory; OperationType* zo = con->operations; UnaryOperationType* uo = con->unaryOperations; diff --git a/src/codegen/target/arm/multimethod.h b/src/codegen/target/arm/multimethod.h index 1949bf5486..f234385ee6 100644 --- a/src/codegen/target/arm/multimethod.h +++ b/src/codegen/target/arm/multimethod.h @@ -25,16 +25,16 @@ namespace arm { unsigned index(ArchitectureContext*, lir::BinaryOperation operation, - lir::OperandType operand1, - lir::OperandType operand2); + lir::Operand::Type operand1, + lir::Operand::Type operand2); unsigned index(ArchitectureContext* con UNUSED, lir::TernaryOperation operation, - lir::OperandType operand1); + lir::Operand::Type operand1); unsigned branchIndex(ArchitectureContext* con UNUSED, - lir::OperandType operand1, - lir::OperandType operand2); + lir::Operand::Type operand1, + lir::Operand::Type operand2); void populateTables(ArchitectureContext* con); diff --git a/src/codegen/target/multimethod.h b/src/codegen/target/multimethod.h index e5de2ca23c..965d16a402 100644 --- a/src/codegen/target/multimethod.h +++ b/src/codegen/target/multimethod.h @@ -17,9 +17,9 @@ namespace codegen { class Multimethod { public: inline static unsigned index(lir::UnaryOperation operation, - lir::OperandType operand) + lir::Operand::Type operand) { - return operation + (lir::UnaryOperationCount * operand); + return operation + (lir::UnaryOperationCount * (unsigned)operand); } }; diff --git a/src/codegen/target/x86/assembler.cpp b/src/codegen/target/x86/assembler.cpp index 2bf5eaeb0a..4049683651 100644 --- a/src/codegen/target/x86/assembler.cpp +++ b/src/codegen/target/x86/assembler.cpp @@ -501,8 +501,8 @@ class MyArchitecture : public Architecture { OperandMask& aMask, bool* thunk) { - aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::MemoryOperand) - | (1 << lir::ConstantOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) | (1 << (unsigned)lir::Operand::Type::Memory) + | (1 << (unsigned)lir::Operand::Type::Constant); *thunk = false; } @@ -518,13 +518,13 @@ class MyArchitecture : public Architecture { switch (op) { case lir::Negate: - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(1 << rax, 1 << rdx); break; case lir::Absolute: if (aSize <= TargetBytesPerWord) { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(1 << rax, 0); } else { *thunk = true; @@ -533,7 +533,7 @@ class MyArchitecture : public Architecture { case lir::FloatAbsolute: if (useSSE(&c)) { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); } else { *thunk = true; @@ -543,7 +543,7 @@ class MyArchitecture : public Architecture { case lir::FloatNegate: // floatNegateRR does not support doubles if (useSSE(&c) and aSize == 4 and bSize == 4) { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(FloatRegisterMask, 0); } else { *thunk = true; @@ -552,8 +552,8 @@ class MyArchitecture : public Architecture { case lir::FloatSquareRoot: if (useSSE(&c)) { - aMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); } else { *thunk = true; @@ -562,8 +562,8 @@ class MyArchitecture : public Architecture { case lir::Float2Float: if (useSSE(&c)) { - aMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); } else { *thunk = true; @@ -576,8 +576,8 @@ class MyArchitecture : public Architecture { // thunks or produce inline machine code which handles edge // cases properly. if (false and useSSE(&c) and bSize <= TargetBytesPerWord) { - aMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); } else { *thunk = true; @@ -586,8 +586,8 @@ class MyArchitecture : public Architecture { case lir::Int2Float: if (useSSE(&c) and aSize <= TargetBytesPerWord) { - aMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask); } else { *thunk = true; @@ -600,14 +600,14 @@ class MyArchitecture : public Architecture { if (TargetBytesPerWord == 4) { if (aSize == 4 and bSize == 8) { - aMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx)); aMask.setLowHighRegisterMasks(mask, mask); } else if (aSize == 1 or bSize == 1) { - aMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); const uint32_t mask = (1 << rax) | (1 << rcx) | (1 << rdx) | (1 << rbx); aMask.setLowHighRegisterMasks(mask, mask); @@ -631,18 +631,18 @@ class MyArchitecture : public Architecture { switch (op) { case lir::Absolute: - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.setLowHighRegisterMasks(1 << rax, 0); break; case lir::FloatAbsolute: - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.lowRegisterMask = aMask.lowRegisterMask; bMask.highRegisterMask = aMask.highRegisterMask; break; case lir::Negate: - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.lowRegisterMask = aMask.lowRegisterMask; bMask.highRegisterMask = aMask.highRegisterMask; break; @@ -651,30 +651,30 @@ class MyArchitecture : public Architecture { case lir::FloatSquareRoot: case lir::Float2Float: case lir::Int2Float: - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); break; case lir::Float2Int: - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); break; case lir::Move: if (aMask.typeMask - & ((1 << lir::MemoryOperand) | 1 << lir::AddressOperand)) { - bMask.typeMask = (1 << lir::RegisterOperand); + & ((1 << (unsigned)lir::Operand::Type::Memory) | 1 << (unsigned)lir::Operand::Type::Address)) { + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.setLowHighRegisterMasks(GeneralRegisterMask | FloatRegisterMask, GeneralRegisterMask); - } else if (aMask.typeMask & (1 << lir::RegisterOperand)) { - bMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + } else if (aMask.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair)) { + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); if (aMask.lowRegisterMask & FloatRegisterMask) { bMask.setLowHighRegisterMasks(FloatRegisterMask, 0); } else { bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask); } } else { - bMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); } if (TargetBytesPerWord == 4) { @@ -704,32 +704,32 @@ class MyArchitecture : public Architecture { tmpMask.typeMask = 0; tmpMask.setLowHighRegisterMasks(0, 0); - if (dstMask.typeMask & (1 << lir::MemoryOperand)) { + if (dstMask.typeMask & (1 << (unsigned)lir::Operand::Type::Memory)) { // can't move directly from memory to memory - srcMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::ConstantOperand); - tmpMask.typeMask = 1 << lir::RegisterOperand; + srcMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Constant); + tmpMask.typeMask = 1 << (unsigned)lir::Operand::Type::RegisterPair; tmpMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask); - } else if (dstMask.typeMask & (1 << lir::RegisterOperand)) { + } else if (dstMask.typeMask & (1 << (unsigned)lir::Operand::Type::RegisterPair)) { if (size > TargetBytesPerWord) { // can't move directly from FPR to GPR or vice-versa for // values larger than the GPR size if (dstMask.lowRegisterMask & FloatRegisterMask) { srcMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); - tmpMask.typeMask = 1 << lir::MemoryOperand; + tmpMask.typeMask = 1 << (unsigned)lir::Operand::Type::Memory; } else if (dstMask.lowRegisterMask & GeneralRegisterMask) { srcMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask); - tmpMask.typeMask = 1 << lir::MemoryOperand; + tmpMask.typeMask = 1 << (unsigned)lir::Operand::Type::Memory; } } if (dstMask.lowRegisterMask & FloatRegisterMask) { // can't move directly from constant to FPR - srcMask.typeMask &= ~(1 << lir::ConstantOperand); + srcMask.typeMask &= ~(1 << (unsigned)lir::Operand::Type::Constant); if (size > TargetBytesPerWord) { - tmpMask.typeMask = 1 << lir::MemoryOperand; + tmpMask.typeMask = 1 << (unsigned)lir::Operand::Type::Memory; } else { - tmpMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); + tmpMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); tmpMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask); } } @@ -744,10 +744,10 @@ class MyArchitecture : public Architecture { unsigned, bool* thunk) { - aMask.typeMask = (1 << lir::RegisterOperand) | (1 << lir::ConstantOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) | (1 << (unsigned)lir::Operand::Type::Constant); aMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask); - bMask.typeMask = (1 << lir::RegisterOperand); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); bMask.setLowHighRegisterMasks(GeneralRegisterMask, GeneralRegisterMask); *thunk = false; @@ -758,9 +758,9 @@ class MyArchitecture : public Architecture { case lir::FloatMultiply: case lir::FloatDivide: if (useSSE(&c)) { - aMask.typeMask = (1 << lir::RegisterOperand) - | (1 << lir::MemoryOperand); - bMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair) + | (1 << (unsigned)lir::Operand::Type::Memory); + bMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); bMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); @@ -788,7 +788,7 @@ class MyArchitecture : public Architecture { if (TargetBytesPerWord == 4 and aSize == 8) { *thunk = true; } else { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(GeneralRegisterMask & ~((1 << rax) | (1 << rdx)), 0); bMask.setLowHighRegisterMasks(1 << rax, 0); } @@ -798,7 +798,7 @@ class MyArchitecture : public Architecture { if (TargetBytesPerWord == 4 and aSize == 8) { *thunk = true; } else { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(GeneralRegisterMask & ~((1 << rax) | (1 << rdx)), 0); bMask.setLowHighRegisterMasks(1 << rax, 0); } @@ -829,7 +829,7 @@ class MyArchitecture : public Architecture { case lir::JumpIfFloatLessOrEqualOrUnordered: case lir::JumpIfFloatGreaterOrEqualOrUnordered: if (useSSE(&c)) { - aMask.typeMask = (1 << lir::RegisterOperand); + aMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); aMask.setLowHighRegisterMasks(FloatRegisterMask, FloatRegisterMask); bMask.typeMask = aMask.typeMask; bMask.lowRegisterMask = aMask.lowRegisterMask; @@ -853,10 +853,10 @@ class MyArchitecture : public Architecture { OperandMask& cMask) { if (isBranch(op)) { - cMask.typeMask = (1 << lir::ConstantOperand); + cMask.typeMask = (1 << (unsigned)lir::Operand::Type::Constant); cMask.setLowHighRegisterMasks(0, 0); } else { - cMask.typeMask = (1 << lir::RegisterOperand); + cMask.typeMask = (1 << (unsigned)lir::Operand::Type::RegisterPair); cMask.lowRegisterMask = bMask.lowRegisterMask; cMask.highRegisterMask = bMask.highRegisterMask; } @@ -918,8 +918,8 @@ class MyAssembler : public Assembler { lir::Register stack(rsp); lir::Memory stackDst(rbx, stackOffset); apply(lir::Move, - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), - OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &stackDst)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &stackDst)); } virtual void pushFrame(unsigned argumentCount, ...) @@ -927,7 +927,7 @@ class MyAssembler : public Assembler { // TODO: Argument should be replaced by OperandInfo... struct Argument { unsigned size; - lir::OperandType type; + lir::Operand::Type type; lir::Operand* operand; }; RUNTIME_ARRAY(Argument, arguments, argumentCount); @@ -937,7 +937,7 @@ class MyAssembler : public Assembler { for (unsigned i = 0; i < argumentCount; ++i) { RUNTIME_ARRAY_BODY(arguments)[i].size = va_arg(a, unsigned); RUNTIME_ARRAY_BODY(arguments)[i].type - = static_cast(va_arg(a, int)); + = static_cast(va_arg(a, int)); RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, lir::Operand*); footprint += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord); @@ -956,7 +956,7 @@ class MyAssembler : public Assembler { RUNTIME_ARRAY_BODY(arguments)[i].operand), OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord), - lir::RegisterOperand, + lir::Operand::Type::RegisterPair, &dst)); } else { lir::Memory dst(rsp, offset * TargetBytesPerWord); @@ -966,7 +966,7 @@ class MyAssembler : public Assembler { RUNTIME_ARRAY_BODY(arguments)[i].operand), OperandInfo(pad(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord), - lir::MemoryOperand, + lir::Operand::Type::Memory, &dst)); offset += ceilingDivide(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord); @@ -983,17 +983,17 @@ class MyAssembler : public Assembler { pushR(&c, TargetBytesPerWord, &base); apply(lir::Move, - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &base)); } lir::Constant footprintConstant( resolvedPromise(&c, footprint * TargetBytesPerWord)); apply(lir::Subtract, OperandInfo( - TargetBytesPerWord, lir::ConstantOperand, &footprintConstant), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack)); + TargetBytesPerWord, lir::Operand::Type::Constant, &footprintConstant), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack)); } virtual void adjustFrame(unsigned difference) @@ -1003,9 +1003,9 @@ class MyAssembler : public Assembler { resolvedPromise(&c, difference * TargetBytesPerWord)); apply(lir::Subtract, OperandInfo( - TargetBytesPerWord, lir::ConstantOperand, &differenceConstant), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack)); + TargetBytesPerWord, lir::Operand::Type::Constant, &differenceConstant), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack)); } virtual void popFrame(unsigned frameFootprint) @@ -1014,8 +1014,8 @@ class MyAssembler : public Assembler { lir::Register base(rbp); lir::Register stack(rsp); apply(lir::Move, - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &base), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &base), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack)); popR(&c, TargetBytesPerWord, &base); } else { @@ -1023,9 +1023,9 @@ class MyAssembler : public Assembler { lir::Constant footprint( resolvedPromise(&c, frameFootprint * TargetBytesPerWord)); apply(lir::Add, - OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &footprint), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &stack)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &footprint), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &stack)); } } @@ -1157,7 +1157,7 @@ class MyAssembler : public Assembler { if (isBranch(op)) { assertT(&this->c, a.size == b.size); assertT(&this->c, c.size == TargetBytesPerWord); - assertT(&this->c, c.type == lir::ConstantOperand); + assertT(&this->c, c.type == lir::Operand::Type::Constant); arch_->c.branchOperations[branchIndex(&(arch_->c), a.type, b.type)]( &this->c, op, a.size, a.operand, b.operand, c.operand); diff --git a/src/codegen/target/x86/context.h b/src/codegen/target/x86/context.h index 93a6f1cc5b..89f877ce1a 100644 --- a/src/codegen/target/x86/context.h +++ b/src/codegen/target/x86/context.h @@ -68,13 +68,13 @@ class ArchitectureContext { bool useNativeFeatures; OperationType operations[lir::OperationCount]; UnaryOperationType - unaryOperations[lir::UnaryOperationCount * lir::OperandTypeCount]; + unaryOperations[lir::UnaryOperationCount * lir::Operand::TypeCount]; BinaryOperationType binaryOperations [(lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) - * lir::OperandTypeCount * lir::OperandTypeCount]; + * lir::Operand::TypeCount * lir::Operand::TypeCount]; BranchOperationType branchOperations[lir::BranchOperationCount - * lir::OperandTypeCount - * lir::OperandTypeCount]; + * lir::Operand::TypeCount + * lir::Operand::TypeCount]; }; class Context { diff --git a/src/codegen/target/x86/multimethod.cpp b/src/codegen/target/x86/multimethod.cpp index 19e6135e1a..17bc3d91cc 100644 --- a/src/codegen/target/x86/multimethod.cpp +++ b/src/codegen/target/x86/multimethod.cpp @@ -28,42 +28,42 @@ using namespace util; unsigned index(ArchitectureContext*, lir::BinaryOperation operation, - lir::OperandType operand1, - lir::OperandType operand2) + lir::Operand::Type operand1, + lir::Operand::Type operand2) { return operation + ((lir::BinaryOperationCount - + lir::NonBranchTernaryOperationCount) * operand1) + + lir::NonBranchTernaryOperationCount) * (unsigned)operand1) + ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) - * lir::OperandTypeCount * operand2); + * lir::Operand::TypeCount * (unsigned)operand2); } unsigned index(ArchitectureContext* c UNUSED, lir::TernaryOperation operation, - lir::OperandType operand1, - lir::OperandType operand2) + lir::Operand::Type operand1, + lir::Operand::Type operand2) { assertT(c, not isBranch(operation)); return lir::BinaryOperationCount + operation + ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) - * operand1) + * (unsigned)operand1) + ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) - * lir::OperandTypeCount * operand2); + * lir::Operand::TypeCount * (unsigned)operand2); } unsigned branchIndex(ArchitectureContext* c UNUSED, - lir::OperandType operand1, - lir::OperandType operand2) + lir::Operand::Type operand1, + lir::Operand::Type operand2) { - return operand1 + (lir::OperandTypeCount * operand2); + return (unsigned)operand1 + (lir::Operand::TypeCount * (unsigned)operand2); } void populateTables(ArchitectureContext* c) { - const lir::OperandType C = lir::ConstantOperand; - const lir::OperandType A = lir::AddressOperand; - const lir::OperandType R = lir::RegisterOperand; - const lir::OperandType M = lir::MemoryOperand; + const lir::Operand::Type C = lir::Operand::Type::Constant; + const lir::Operand::Type A = lir::Operand::Type::Address; + const lir::Operand::Type R = lir::Operand::Type::RegisterPair; + const lir::Operand::Type M = lir::Operand::Type::Memory; OperationType* zo = c->operations; UnaryOperationType* uo = c->unaryOperations; diff --git a/src/codegen/target/x86/multimethod.h b/src/codegen/target/x86/multimethod.h index e9a4b36de6..5816ccb543 100644 --- a/src/codegen/target/x86/multimethod.h +++ b/src/codegen/target/x86/multimethod.h @@ -23,17 +23,17 @@ class ArchitectureContext; unsigned index(ArchitectureContext*, lir::BinaryOperation operation, - lir::OperandType operand1, - lir::OperandType operand2); + lir::Operand::Type operand1, + lir::Operand::Type operand2); unsigned index(ArchitectureContext* c UNUSED, lir::TernaryOperation operation, - lir::OperandType operand1, - lir::OperandType operand2); + lir::Operand::Type operand1, + lir::Operand::Type operand2); unsigned branchIndex(ArchitectureContext* c UNUSED, - lir::OperandType operand1, - lir::OperandType operand2); + lir::Operand::Type operand1, + lir::Operand::Type operand2); void populateTables(ArchitectureContext* c); diff --git a/src/compile.cpp b/src/compile.cpp index b64d06f1c5..a3f6e47cae 100644 --- a/src/compile.cpp +++ b/src/compile.cpp @@ -9785,20 +9785,20 @@ void compileCall(MyThread* t, Context* c, ThunkIndex index, bool call = true) lir::Memory table(t->arch->thread(), TARGET_THREAD_THUNKTABLE); lir::Register scratch(t->arch->scratch()); a->apply(lir::Move, - OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &table), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &table), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch)); lir::Memory proc(scratch.low, index * TargetBytesPerWord); a->apply(lir::Move, - OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &proc), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &proc), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch)); a->apply(call ? lir::Call : lir::Jump, - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &scratch)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &scratch)); } else { lir::Constant proc(new (&c->zone) avian::codegen::ResolvedPromise( reinterpret_cast(t->thunkTable[index]))); a->apply(call ? lir::LongCall : lir::LongJump, - OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &proc)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &proc)); } } @@ -9815,7 +9815,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator) p->thunks.default_.frameSavedOffset = a->length(); lir::Register thread(t->arch->thread()); - a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); + a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread); compileCall(t, &context, compileMethodIndex); @@ -9823,7 +9823,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator) lir::Register result(t->arch->returnLow()); a->apply(lir::Jump, - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &result)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result)); p->thunks.default_.length = a->endBlock(false)->resolve(0, 0); @@ -9843,17 +9843,17 @@ void compileThunks(MyThread* t, FixedAllocator* allocator) a->apply(lir::Move, OperandInfo( - TargetBytesPerWord, lir::MemoryOperand, &virtualCallTargetSrc), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &class_)); + TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetSrc), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_)); lir::Memory virtualCallTargetDst(t->arch->thread(), TARGET_THREAD_VIRTUALCALLTARGET); a->apply( lir::Move, - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &class_), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &class_), OperandInfo( - TargetBytesPerWord, lir::MemoryOperand, &virtualCallTargetDst)); + TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallTargetDst)); lir::Register index(t->arch->virtualCallIndex()); lir::Memory virtualCallIndex(t->arch->thread(), @@ -9861,15 +9861,15 @@ void compileThunks(MyThread* t, FixedAllocator* allocator) a->apply( lir::Move, - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &index), - OperandInfo(TargetBytesPerWord, lir::MemoryOperand, &virtualCallIndex)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &index), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::Memory, &virtualCallIndex)); a->saveFrame(TARGET_THREAD_STACK, TARGET_THREAD_IP); p->thunks.defaultVirtual.frameSavedOffset = a->length(); lir::Register thread(t->arch->thread()); - a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); + a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread); compileCall(t, &context, compileVirtualMethodIndex); @@ -9877,7 +9877,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator) lir::Register result(t->arch->returnLow()); a->apply(lir::Jump, - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &result)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &result)); p->thunks.defaultVirtual.length = a->endBlock(false)->resolve(0, 0); @@ -9894,7 +9894,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator) p->thunks.native.frameSavedOffset = a->length(); lir::Register thread(t->arch->thread()); - a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); + a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread); compileCall(t, &context, invokeNativeIndex); @@ -9916,7 +9916,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator) p->thunks.aioob.frameSavedOffset = a->length(); lir::Register thread(t->arch->thread()); - a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); + a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread); compileCall(t, &context, throwArrayIndexOutOfBoundsIndex); @@ -9935,7 +9935,7 @@ void compileThunks(MyThread* t, FixedAllocator* allocator) p->thunks.stackOverflow.frameSavedOffset = a->length(); lir::Register thread(t->arch->thread()); - a->pushFrame(1, TargetBytesPerWord, lir::RegisterOperand, &thread); + a->pushFrame(1, TargetBytesPerWord, lir::Operand::Type::RegisterPair, &thread); compileCall(t, &context, throwStackOverflowIndex); @@ -10061,14 +10061,14 @@ uintptr_t compileVirtualThunk(MyThread* t, unsigned index, unsigned* size) lir::Register indexRegister(t->arch->virtualCallIndex()); a->apply( lir::Move, - OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &indexConstant), - OperandInfo(TargetBytesPerWord, lir::RegisterOperand, &indexRegister)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &indexConstant), + OperandInfo(TargetBytesPerWord, lir::Operand::Type::RegisterPair, &indexRegister)); avian::codegen::ResolvedPromise defaultVirtualThunkPromise( defaultVirtualThunk(t)); lir::Constant thunk(&defaultVirtualThunkPromise); a->apply(lir::Jump, - OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &thunk)); + OperandInfo(TargetBytesPerWord, lir::Operand::Type::Constant, &thunk)); *size = a->endBlock(false)->resolve(0, 0);