corda/src/codegen/compiler.cpp

4754 lines
121 KiB
C++
Raw Normal View History

2012-05-11 23:43:27 +00:00
/* Copyright (c) 2008-2012, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "target.h"
2007-12-08 23:22:13 +00:00
#include "util/runtime-array.h"
#include "codegen/compiler.h"
#include "codegen/assembler.h"
2013-02-13 23:49:46 +00:00
#include "codegen/compiler/regalloc.h"
2013-02-13 19:11:47 +00:00
#include "codegen/compiler/context.h"
#include "codegen/compiler/resource.h"
#include "codegen/compiler/value.h"
2013-02-13 19:56:56 +00:00
#include "codegen/compiler/site.h"
2013-02-13 23:49:46 +00:00
#include "codegen/compiler/read.h"
2013-02-14 02:33:40 +00:00
#include "codegen/compiler/event.h"
#include "codegen/compiler/stack.h"
#include "codegen/compiler/promise.h"
2007-12-08 23:22:13 +00:00
2013-02-13 19:11:47 +00:00
using namespace vm;
2007-12-08 23:22:13 +00:00
2013-02-13 19:11:47 +00:00
namespace avian {
namespace codegen {
namespace compiler {
const bool DebugAppend = false;
const bool DebugCompile = false;
const bool DebugResources = false;
2009-02-01 23:21:55 +00:00
const bool DebugFrame = false;
const bool DebugControl = false;
2009-02-01 23:21:55 +00:00
const bool DebugMoves = false;
const bool DebugBuddies = false;
2008-04-19 21:52:45 +00:00
const unsigned StealRegisterReserveCount = 2;
// this should be equal to the largest number of registers used by a
// compare instruction:
const unsigned ResolveRegisterReserveCount = (TargetBytesPerWord == 8 ? 2 : 4);
2008-04-17 22:07:32 +00:00
class Stack;
2008-04-18 03:47:42 +00:00
class PushEvent;
2008-04-18 00:39:41 +00:00
class Read;
class MultiRead;
2008-09-22 14:28:18 +00:00
class StubRead;
2008-08-30 20:12:27 +00:00
class Block;
2008-11-01 22:16:18 +00:00
class Snapshot;
2007-12-09 22:45:43 +00:00
2008-04-17 22:07:32 +00:00
void
apply(Context* c, lir::UnaryOperation op,
unsigned s1Size, Site* s1Low, Site* s1High);
2008-04-17 22:07:32 +00:00
void
apply(Context* c, lir::BinaryOperation op,
unsigned s1Size, Site* s1Low, Site* s1High,
unsigned s2Size, Site* s2Low, Site* s2High);
2008-08-16 17:45:36 +00:00
void
apply(Context* c, lir::TernaryOperation op,
unsigned s1Size, Site* s1Low, Site* s1High,
unsigned s2Size, Site* s2Low, Site* s2High,
unsigned s3Size, Site* s3Low, Site* s3High);
2008-04-17 22:07:32 +00:00
2008-09-24 00:01:42 +00:00
class Local {
public:
Value* value;
};
2008-11-02 20:35:35 +00:00
class ForkElement {
2008-09-22 14:28:18 +00:00
public:
Value* value;
MultiRead* read;
2008-11-02 20:35:35 +00:00
bool local;
2008-09-22 14:28:18 +00:00
};
class ForkState: public Compiler::State {
2008-04-17 22:07:32 +00:00
public:
2013-02-14 01:18:51 +00:00
ForkState(Stack* stack, Local* locals, Cell<Value>* saved, Event* predecessor,
unsigned logicalIp):
stack(stack),
2008-07-05 20:21:13 +00:00
locals(locals),
saved(saved),
2008-09-20 23:42:46 +00:00
predecessor(predecessor),
logicalIp(logicalIp),
readCount(0)
2008-04-17 22:07:32 +00:00
{ }
Stack* stack;
2008-09-24 00:01:42 +00:00
Local* locals;
2013-02-14 01:18:51 +00:00
Cell<Value>* saved;
2008-09-20 23:42:46 +00:00
Event* predecessor;
unsigned logicalIp;
unsigned readCount;
2008-11-02 20:35:35 +00:00
ForkElement elements[0];
2008-04-17 22:07:32 +00:00
};
class MySubroutine: public Compiler::Subroutine {
public:
MySubroutine(): forkState(0) { }
ForkState* forkState;
};
2008-04-17 22:07:32 +00:00
class LogicalInstruction {
public:
2008-09-24 00:01:42 +00:00
LogicalInstruction(int index, Stack* stack, Local* locals):
firstEvent(0), lastEvent(0), immediatePredecessor(0), stack(stack),
locals(locals), machineOffset(0), subroutine(0), index(index)
2008-09-07 20:12:11 +00:00
{ }
2008-04-19 07:03:59 +00:00
Event* firstEvent;
2008-04-17 22:07:32 +00:00
Event* lastEvent;
2008-04-20 19:35:36 +00:00
LogicalInstruction* immediatePredecessor;
Stack* stack;
2008-09-24 00:01:42 +00:00
Local* locals;
Promise* machineOffset;
MySubroutine* subroutine;
2008-08-30 20:12:27 +00:00
int index;
2008-04-17 22:07:32 +00:00
};
class ConstantPoolNode {
public:
ConstantPoolNode(Promise* promise): promise(promise), next(0) { }
Promise* promise;
ConstantPoolNode* next;
};
class PoolPromise: public Promise {
public:
PoolPromise(Context* c, int key): c(c), key(key) { }
virtual int64_t value() {
if (resolved()) {
return reinterpret_cast<int64_t>
(c->machineCode + pad(c->machineCodeSize, TargetBytesPerWord)
+ (key * TargetBytesPerWord));
}
abort(c);
}
virtual bool resolved() {
return c->machineCode != 0;
}
Context* c;
int key;
};
2008-09-07 20:12:11 +00:00
unsigned
machineOffset(Context* c, int logicalIp)
{
2008-09-22 14:28:18 +00:00
return c->logicalCode[logicalIp]->machineOffset->value();
2008-09-07 20:12:11 +00:00
}
class IpPromise: public Promise {
public:
IpPromise(Context* c, int logicalIp):
c(c),
logicalIp(logicalIp)
{ }
virtual int64_t value() {
if (resolved()) {
return reinterpret_cast<intptr_t>
2008-09-07 20:12:11 +00:00
(c->machineCode + machineOffset(c, logicalIp));
}
abort(c);
}
virtual bool resolved() {
return c->machineCode != 0
and c->logicalCode[logicalIp]->machineOffset->resolved();
}
Context* c;
int logicalIp;
};
2013-02-14 01:18:51 +00:00
template<class T>
Cell<T>* reverseDestroy(Cell<T>* cell) {
Cell<T>* previous = 0;
2008-10-15 00:45:31 +00:00
while (cell) {
2013-02-14 01:18:51 +00:00
Cell<T>* next = cell->next;
2008-10-15 00:45:31 +00:00
cell->next = previous;
previous = cell;
cell = next;
}
return previous;
}
2008-09-22 14:28:18 +00:00
class StubReadPair {
public:
Value* value;
StubRead* read;
};
class JunctionState {
public:
JunctionState(unsigned frameFootprint): frameFootprint(frameFootprint) { }
unsigned frameFootprint;
StubReadPair reads[0];
};
class Link {
public:
Link(Event* predecessor, Link* nextPredecessor, Event* successor,
Link* nextSuccessor, ForkState* forkState):
predecessor(predecessor), nextPredecessor(nextPredecessor),
successor(successor), nextSuccessor(nextSuccessor), forkState(forkState),
junctionState(0)
{ }
Event* predecessor;
Link* nextPredecessor;
Event* successor;
Link* nextSuccessor;
ForkState* forkState;
JunctionState* junctionState;
};
Link*
link(Context* c, Event* predecessor, Link* nextPredecessor, Event* successor,
Link* nextSuccessor, ForkState* forkState)
{
return new(c->zone) Link
(predecessor, nextPredecessor, successor, nextSuccessor, forkState);
}
unsigned
countPredecessors(Link* link)
{
unsigned c = 0;
for (; link; link = link->nextPredecessor) ++ c;
return c;
}
Link*
lastPredecessor(Link* link)
{
while (link->nextPredecessor) link = link->nextPredecessor;
return link;
}
unsigned
countSuccessors(Link* link)
{
unsigned c = 0;
for (; link; link = link->nextSuccessor) ++ c;
return c;
}
unsigned
totalFrameSize(Context* c)
{
return c->alignedFrameSize
+ c->arch->frameHeaderSize()
+ c->arch->argumentFootprint(c->parameterFootprint);
}
int
frameIndex(Context* c, int localIndex)
{
assert(c, localIndex >= 0);
2009-04-22 01:39:25 +00:00
int index = c->alignedFrameSize + c->parameterFootprint - localIndex - 1;
if (localIndex < static_cast<int>(c->parameterFootprint)) {
index += c->arch->frameHeaderSize();
} else {
index -= c->arch->frameFooterSize();
}
assert(c, index >= 0);
2009-04-22 01:39:25 +00:00
assert(c, static_cast<unsigned>(index) < totalFrameSize(c));
return index;
}
unsigned
frameIndexToOffset(Context* c, unsigned frameIndex)
{
2009-04-22 01:39:25 +00:00
assert(c, frameIndex < totalFrameSize(c));
return (frameIndex + c->arch->frameFooterSize()) * TargetBytesPerWord;
}
unsigned
offsetToFrameIndex(Context* c, unsigned offset)
{
2009-04-22 01:39:25 +00:00
assert(c, static_cast<int>
((offset / TargetBytesPerWord) - c->arch->frameFooterSize()) >= 0);
assert(c, ((offset / TargetBytesPerWord) - c->arch->frameFooterSize())
2009-04-22 01:39:25 +00:00
< totalFrameSize(c));
return (offset / TargetBytesPerWord) - c->arch->frameFooterSize();
}
2009-04-22 01:39:25 +00:00
unsigned
frameBase(Context* c)
{
return c->alignedFrameSize
- c->arch->frameReturnAddressSize()
2009-04-22 01:39:25 +00:00
- c->arch->frameFooterSize()
+ c->arch->frameHeaderSize();
}
2008-11-01 19:14:13 +00:00
class FrameIterator {
public:
class Element {
public:
Element(Value* value, unsigned localIndex):
value(value), localIndex(localIndex)
2008-11-01 19:14:13 +00:00
{ }
Value* const value;
const unsigned localIndex;
};
FrameIterator(Context* c, Stack* stack, Local* locals,
bool includeEmpty = false):
stack(stack), locals(locals), localIndex(c->localFootprint - 1),
includeEmpty(includeEmpty)
2008-11-01 19:14:13 +00:00
{ }
bool hasMore() {
if (not includeEmpty) {
while (stack and stack->value == 0) stack = stack->next;
2009-01-30 01:36:19 +00:00
while (localIndex >= 0 and locals[localIndex].value == 0) -- localIndex;
}
2008-11-01 19:14:13 +00:00
return stack != 0 or localIndex >= 0;
}
Element next(Context* c) {
Value* v;
unsigned li;
if (stack) {
Stack* s = stack;
v = s->value;
li = s->index + c->localFootprint;
stack = stack->next;
} else {
Local* l = locals + localIndex;
v = l->value;
li = localIndex;
-- localIndex;
}
return Element(v, li);
2008-11-01 19:14:13 +00:00
}
Stack* stack;
Local* locals;
int localIndex;
bool includeEmpty;
2008-11-01 19:14:13 +00:00
};
int
frameIndex(Context* c, FrameIterator::Element* element)
{
return frameIndex(c, element->localIndex);
2008-11-01 19:14:13 +00:00
}
2008-11-01 22:16:18 +00:00
bool
2009-09-26 19:43:44 +00:00
hasSite(Context* c, Value* v)
2008-11-01 22:16:18 +00:00
{
2009-09-26 19:43:44 +00:00
SiteIterator it(c, v);
2008-11-01 22:16:18 +00:00
return it.hasMore();
}
bool
uniqueSite(Context* c, Value* v, Site* s)
{
SiteIterator it(c, v);
Site* p UNUSED = it.next();
if (it.hasMore()) {
// the site is not this word's only site, but if the site is
// shared with the next word, it may be that word's only site
if (v->nextWord != v and s->registerSize(c) > TargetBytesPerWord) {
SiteIterator nit(c, v->nextWord);
Site* p = nit.next();
if (nit.hasMore()) {
return false;
} else {
return p == s;
}
} else {
return false;
}
} else {
assert(c, p == s);
return true;
}
}
2008-04-18 00:39:41 +00:00
void
2008-04-19 07:03:59 +00:00
removeSite(Context* c, Value* v, Site* s)
2008-04-18 00:39:41 +00:00
{
2009-09-26 19:43:44 +00:00
for (SiteIterator it(c, v); it.hasMore();) {
2008-11-01 19:14:13 +00:00
if (s == it.next()) {
2008-12-12 01:09:36 +00:00
if (DebugSites) {
char buffer[256]; s->toString(c, buffer, 256);
fprintf(stderr, "remove site %s from %p\n", buffer, v);
}
2008-11-01 19:14:13 +00:00
it.remove(c);
2008-04-30 15:44:17 +00:00
break;
}
}
2008-12-12 01:09:36 +00:00
if (DebugSites) {
2009-09-26 19:43:44 +00:00
fprintf(stderr, "%p has more: %d\n", v, hasSite(c, v));
2008-12-12 01:09:36 +00:00
}
2013-02-13 23:49:46 +00:00
assert(c, not v->findSite(s));
2008-04-30 15:44:17 +00:00
}
2008-04-19 07:03:59 +00:00
void
clearSites(Context* c, Value* v)
{
2008-12-12 01:09:36 +00:00
if (DebugSites) {
fprintf(stderr, "clear sites for %p\n", v);
}
2009-09-26 19:43:44 +00:00
for (SiteIterator it(c, v); it.hasMore();) {
2008-11-01 22:16:18 +00:00
it.next();
it.remove(c);
2008-04-19 07:03:59 +00:00
}
}
2008-07-05 20:21:13 +00:00
bool
valid(Read* r)
{
return r and r->valid();
}
#ifndef NDEBUG
bool
hasBuddy(Context* c, Value* a, Value* b)
{
if (a == b) {
return true;
}
int i = 0;
for (Value* p = a->buddy; p != a; p = p->buddy) {
if (p == b) {
return true;
}
if (++i > 1000) {
abort(c);
}
}
return false;
}
#endif // not NDEBUG
2008-11-01 22:16:18 +00:00
Read*
live(Context* c UNUSED, Value* v)
2008-07-05 20:21:13 +00:00
{
assert(c, hasBuddy(c, v->buddy, v));
Value* p = v;
do {
if (valid(p->reads)) {
return p->reads;
}
p = p->buddy;
} while (p != v);
2008-11-01 19:14:13 +00:00
2008-11-01 22:16:18 +00:00
return 0;
2008-11-01 19:14:13 +00:00
}
2008-11-01 22:16:18 +00:00
Read*
2008-11-01 19:14:13 +00:00
liveNext(Context* c, Value* v)
{
assert(c, hasBuddy(c, v->buddy, v));
2008-11-01 22:16:18 +00:00
Read* r = v->reads->next(c);
if (valid(r)) return r;
2008-11-01 19:14:13 +00:00
for (Value* p = v->buddy; p != v; p = p->buddy) {
2008-11-01 22:16:18 +00:00
if (valid(p->reads)) return p->reads;
2008-11-01 19:14:13 +00:00
}
2008-11-01 22:16:18 +00:00
return 0;
2008-07-05 20:21:13 +00:00
}
unsigned
sitesToString(Context* c, Value* v, char* buffer, unsigned size);
void
deadWord(Context* c, Value* v)
{
Value* nextWord = v->nextWord;
assert(c, nextWord != v);
2009-11-03 21:14:27 +00:00
for (SiteIterator it(c, v, true, false); it.hasMore();) {
Site* s = it.next();
if (s->registerSize(c) > TargetBytesPerWord) {
it.remove(c);
2013-02-14 02:33:40 +00:00
nextWord->addSite(c, s);
}
}
}
void
deadBuddy(Context* c, Value* v, Read* r UNUSED)
{
assert(c, v->buddy != v);
assert(c, r);
if (DebugBuddies) {
fprintf(stderr, "remove dead buddy %p from", v);
for (Value* p = v->buddy; p != v; p = p->buddy) {
fprintf(stderr, " %p", p);
}
fprintf(stderr, "\n");
}
assert(c, v->buddy);
Value* next = v->buddy;
v->buddy = v;
Value* p = next;
while (p->buddy != v) p = p->buddy;
p->buddy = next;
assert(c, p->buddy);
for (SiteIterator it(c, v, false, false); it.hasMore();) {
Site* s = it.next();
it.remove(c);
2013-02-14 02:33:40 +00:00
next->addSite(c, s);
}
}
2008-04-19 00:19:45 +00:00
void
popRead(Context* c, Event* e UNUSED, Value* v)
2008-04-19 00:19:45 +00:00
{
2008-09-25 00:48:32 +00:00
assert(c, e == v->reads->event);
2008-12-12 01:09:36 +00:00
if (DebugReads) {
fprintf(stderr, "pop read %p from %p next %p event %p (%s)\n",
v->reads, v, v->reads->next(c), e, (e ? e->name() : 0));
}
2008-04-19 07:03:59 +00:00
v->reads = v->reads->next(c);
if (not valid(v->reads)) {
Value* nextWord = v->nextWord;
if (nextWord != v) {
if (valid(nextWord->reads)) {
deadWord(c, v);
} else {
deadWord(c, nextWord);
}
}
Read* r = live(c, v);
if (r) {
deadBuddy(c, v, r);
} else {
clearSites(c, v);
}
2008-04-19 00:19:45 +00:00
}
}
void
addBuddy(Value* original, Value* buddy)
{
buddy->buddy = original;
Value* p = original;
while (p->buddy != original) p = p->buddy;
p->buddy = buddy;
if (DebugBuddies) {
fprintf(stderr, "add buddy %p to", buddy);
for (Value* p = buddy->buddy; p != buddy; p = p->buddy) {
fprintf(stderr, " %p", p);
}
fprintf(stderr, "\n");
}
}
lir::ValueType
valueType(Context* c, Compiler::OperandType type)
{
switch (type) {
case Compiler::ObjectType:
case Compiler::AddressType:
case Compiler::IntegerType:
case Compiler::VoidType:
return lir::ValueGeneral;
case Compiler::FloatType:
return lir::ValueFloat;
default:
abort(c);
}
}
2013-02-13 19:56:56 +00:00
Promise* shiftMaskPromise(Context* c, Promise* base, unsigned shift, int64_t mask) {
return new(c->zone) ShiftMaskPromise(base, shift, mask);
}
2013-02-13 19:56:56 +00:00
Promise* combinedPromise(Context* c, Promise* low, Promise* high) {
return new(c->zone) CombinedPromise(low, high);
}
2013-02-13 23:49:46 +00:00
Promise* resolved(Context* c, int64_t value) {
return new(c->zone) ResolvedPromise(value);
}
void
2009-10-04 19:56:48 +00:00
move(Context* c, Value* value, Site* src, Site* dst);
2009-01-04 01:17:51 +00:00
unsigned
sitesToString(Context* c, Site* sites, char* buffer, unsigned size)
{
unsigned total = 0;
for (Site* s = sites; s; s = s->next) {
total += s->toString(c, buffer + total, size - total);
if (s->next) {
assert(c, size > total + 2);
memcpy(buffer + total, ", ", 2);
total += 2;
}
}
assert(c, size > total);
buffer[total] = 0;
return total;
}
unsigned
sitesToString(Context* c, Value* v, char* buffer, unsigned size)
{
unsigned total = 0;
Value* p = v;
do {
if (total) {
assert(c, size > total + 2);
memcpy(buffer + total, "; ", 2);
total += 2;
}
if (p->sites) {
total += vm::snprintf(buffer + total, size - total, "%p has ", p);
2009-01-04 01:17:51 +00:00
total += sitesToString(c, p->sites, buffer + total, size - total);
} else {
total += vm::snprintf(buffer + total, size - total, "%p has nothing", p);
2009-01-04 01:17:51 +00:00
}
p = p->buddy;
2009-09-26 19:43:44 +00:00
} while (p != v);
2009-01-04 01:17:51 +00:00
return total;
}
Site*
pickTargetSite(Context* c, Read* read, bool intersectRead = false,
unsigned registerReserveCount = 0,
CostCalculator* costCalculator = 0)
{
Target target
(pickTarget
(c, read, intersectRead, registerReserveCount, costCalculator));
expect(c, target.cost < Target::Impossible);
if (target.type == lir::MemoryOperand) {
return frameSite(c, target.index);
} else {
return registerSite(c, target.index);
}
}
bool
acceptMatch(Context* c, Site* s, Read*, const SiteMask& mask)
{
return s->match(c, mask);
}
Site*
pickSourceSite(Context* c, Read* read, Site* target = 0,
2009-12-01 02:06:01 +00:00
unsigned* cost = 0, SiteMask* extraMask = 0,
bool intersectRead = true, bool includeBuddies = true,
bool includeNextWord = true,
bool (*accept)(Context*, Site*, Read*, const SiteMask&)
= acceptMatch)
{
2009-12-01 02:06:01 +00:00
SiteMask mask;
if (extraMask) {
2013-02-14 01:18:51 +00:00
mask = mask.intersectionWith(*extraMask);
2009-12-01 02:06:01 +00:00
}
if (intersectRead) {
read->intersect(&mask);
}
Site* site = 0;
unsigned copyCost = 0xFFFFFFFF;
for (SiteIterator it(c, read->value, includeBuddies, includeNextWord);
it.hasMore();)
{
Site* s = it.next();
if (accept(c, s, read, mask)) {
unsigned v = s->copyCost(c, target);
if (v < copyCost) {
site = s;
copyCost = v;
}
}
}
if (DebugMoves and site and target) {
char srcb[256]; site->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "pick source %s to %s for %p cost %d\n",
srcb, dstb, read->value, copyCost);
}
if (cost) *cost = copyCost;
return site;
}
Site*
maybeMove(Context* c, Read* read, bool intersectRead, bool includeNextWord,
unsigned registerReserveCount = 0)
{
Value* value = read->value;
unsigned size = value == value->nextWord ? TargetBytesPerWord : 8;
class MyCostCalculator: public CostCalculator {
public:
MyCostCalculator(Value* value, unsigned size, bool includeNextWord):
value(value),
size(size),
includeNextWord(includeNextWord)
{ }
virtual unsigned cost(Context* c, SiteMask dstMask)
{
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
uint8_t tmpTypeMask;
uint64_t tmpRegisterMask;
c->arch->planMove
(size, &srcTypeMask, &srcRegisterMask,
&tmpTypeMask, &tmpRegisterMask,
dstMask.typeMask, dstMask.registerMask);
SiteMask srcMask(srcTypeMask, srcRegisterMask, AnyFrameIndex);
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
Site* s = it.next();
if (s->match(c, srcMask) or s->match(c, dstMask)) {
return 0;
}
}
return Target::IndirectMovePenalty;
}
Value* value;
unsigned size;
bool includeNextWord;
} costCalculator(value, size, includeNextWord);
Site* dst = pickTargetSite
(c, read, intersectRead, registerReserveCount, &costCalculator);
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
uint8_t tmpTypeMask;
uint64_t tmpRegisterMask;
c->arch->planMove
(size, &srcTypeMask, &srcRegisterMask,
&tmpTypeMask, &tmpRegisterMask,
1 << dst->type(c), dst->registerMask(c));
SiteMask srcMask(srcTypeMask, srcRegisterMask, AnyFrameIndex);
unsigned cost = 0xFFFFFFFF;
Site* src = 0;
for (SiteIterator it(c, value, true, includeNextWord); it.hasMore();) {
Site* s = it.next();
unsigned v = s->copyCost(c, dst);
if (v == 0) {
src = s;
cost = 0;
break;
}
if (not s->match(c, srcMask)) {
v += CopyPenalty;
}
if (v < cost) {
src = s;
cost = v;
}
}
if (cost) {
if (DebugMoves) {
char srcb[256]; src->toString(c, srcb, 256);
char dstb[256]; dst->toString(c, dstb, 256);
fprintf(stderr, "maybe move %s to %s for %p to %p\n",
srcb, dstb, value, value);
}
src->freeze(c, value);
2013-02-14 02:33:40 +00:00
value->addSite(c, dst);
src->thaw(c, value);
if (not src->match(c, srcMask)) {
src->freeze(c, value);
dst->freeze(c, value);
SiteMask tmpMask(tmpTypeMask, tmpRegisterMask, AnyFrameIndex);
SingleRead tmpRead(tmpMask, 0);
tmpRead.value = value;
tmpRead.successor_ = value;
Site* tmp = pickTargetSite(c, &tmpRead, true);
2013-02-14 02:33:40 +00:00
value->addSite(c, tmp);
move(c, value, src, tmp);
dst->thaw(c, value);
src->thaw(c, value);
src = tmp;
}
move(c, value, src, dst);
}
return dst;
}
Site*
maybeMove(Context* c, Value* v, const SiteMask& mask, bool intersectMask,
bool includeNextWord, unsigned registerReserveCount = 0)
{
SingleRead read(mask, 0);
read.value = v;
read.successor_ = v;
return maybeMove
(c, &read, intersectMask, includeNextWord, registerReserveCount);
}
Site*
pickSiteOrMove(Context* c, Read* read, bool intersectRead,
bool includeNextWord, unsigned registerReserveCount = 0)
{
Site* s = pickSourceSite
2009-12-01 02:06:01 +00:00
(c, read, 0, 0, 0, intersectRead, true, includeNextWord);
if (s) {
return s;
} else {
return maybeMove
(c, read, intersectRead, includeNextWord, registerReserveCount);
}
}
Site*
pickSiteOrMove(Context* c, Value* v, const SiteMask& mask, bool intersectMask,
bool includeNextWord, unsigned registerReserveCount = 0)
{
SingleRead read(mask, 0);
read.value = v;
read.successor_ = v;
return pickSiteOrMove
(c, &read, intersectMask, includeNextWord, registerReserveCount);
}
void
steal(Context* c, Resource* r, Value* thief)
{
if (DebugResources) {
2009-01-04 01:17:51 +00:00
char resourceBuffer[256]; r->toString(c, resourceBuffer, 256);
char siteBuffer[1024]; sitesToString(c, r->value, siteBuffer, 1024);
2009-01-04 01:17:51 +00:00
fprintf(stderr, "%p steal %s from %p (%s)\n",
thief, resourceBuffer, r->value, siteBuffer);
}
2013-02-13 23:49:46 +00:00
if ((not (thief and thief->isBuddyOf(r->value))
and uniqueSite(c, r->value, r->site)))
{
r->site->freeze(c, r->value);
maybeMove(c, live(c, r->value), false, true, StealRegisterReserveCount);
r->site->thaw(c, r->value);
}
removeSite(c, r->value, r->site);
}
2009-10-04 19:56:48 +00:00
SiteMask
generalRegisterMask(Context* c)
2008-08-28 22:43:35 +00:00
{
2009-10-04 19:56:48 +00:00
return SiteMask
(1 << lir::RegisterOperand, c->regFile->generalRegisters.mask, NoFrameIndex);
2008-08-28 22:43:35 +00:00
}
2009-10-04 19:56:48 +00:00
SiteMask
generalRegisterOrConstantMask(Context* c)
2008-08-28 22:43:35 +00:00
{
2009-10-04 19:56:48 +00:00
return SiteMask
((1 << lir::RegisterOperand) | (1 << lir::ConstantOperand),
c->regFile->generalRegisters.mask, NoFrameIndex);
2008-08-28 22:43:35 +00:00
}
MultiRead*
multiRead(Context* c)
2009-01-03 00:44:47 +00:00
{
return new(c->zone) MultiRead;
2009-01-03 00:44:47 +00:00
}
StubRead*
stubRead(Context* c)
{
return new(c->zone) StubRead;
}
2009-10-04 19:56:48 +00:00
Site*
pickSite(Context* c, Value* v, Site* s, unsigned index, bool includeNextWord)
2009-10-04 19:56:48 +00:00
{
for (SiteIterator it(c, v, true, includeNextWord); it.hasMore();) {
2009-10-04 19:56:48 +00:00
Site* candidate = it.next();
if (s->matchNextWord(c, candidate, index)) {
2009-10-04 19:56:48 +00:00
return candidate;
}
}
return 0;
}
Site*
pickSiteOrMove(Context* c, Value* v, Site* s, unsigned index)
{
Site* n = pickSite(c, v, s, index, false);
if (n) {
return n;
}
return maybeMove(c, v, s->nextWordMask(c, index), true, false);
}
Site*
pickSiteOrMove(Context* c, Value* v, Site* s, Site** low, Site** high)
2009-10-04 19:56:48 +00:00
{
if (v->wordIndex == 0) {
2009-10-04 19:56:48 +00:00
*low = s;
*high = pickSiteOrMove(c, v->nextWord, s, 1);
2009-10-04 19:56:48 +00:00
return *high;
} else {
*low = pickSiteOrMove(c, v->nextWord, s, 0);
2009-10-04 19:56:48 +00:00
*high = s;
return *low;
}
}
Site*
pickSiteOrGrow(Context* c, Value* v, Site* s, unsigned index)
2009-10-04 19:56:48 +00:00
{
Site* n = pickSite(c, v, s, index, false);
if (n) {
return n;
}
n = s->makeNextWord(c, index);
2013-02-14 02:33:40 +00:00
v->addSite(c, n);
2009-10-04 19:56:48 +00:00
return n;
}
Site*
pickSiteOrGrow(Context* c, Value* v, Site* s, Site** low, Site** high)
2009-10-04 19:56:48 +00:00
{
if (v->wordIndex == 0) {
2009-10-04 19:56:48 +00:00
*low = s;
*high = pickSiteOrGrow(c, v->nextWord, s, 1);
2009-10-04 19:56:48 +00:00
return *high;
} else {
*low = pickSiteOrGrow(c, v->nextWord, s, 0);
2009-10-04 19:56:48 +00:00
*high = s;
return *low;
}
}
bool
isHome(Value* v, int frameIndex)
{
Value* p = v;
do {
if (p->home == frameIndex) {
return true;
}
p = p->buddy;
} while (p != v);
return false;
}
bool
acceptForResolve(Context* c, Site* s, Read* read, const SiteMask& mask)
{
if (acceptMatch(c, s, read, mask) and (not s->frozen(c))) {
if (s->type(c) == lir::RegisterOperand) {
2009-10-04 19:56:48 +00:00
return c->availableGeneralRegisterCount > ResolveRegisterReserveCount;
} else {
assert(c, s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex)));
2009-10-04 19:56:48 +00:00
return isHome(read->value, offsetToFrameIndex
(c, static_cast<MemorySite*>(s)->offset));
}
} else {
return false;
}
}
void
move(Context* c, Value* value, Site* src, Site* dst)
{
if (DebugMoves) {
char srcb[256]; src->toString(c, srcb, 256);
char dstb[256]; dst->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, value, value);
2009-10-04 19:56:48 +00:00
}
2013-02-13 23:49:46 +00:00
assert(c, value->findSite(dst));
src->freeze(c, value);
2009-10-04 19:56:48 +00:00
dst->freeze(c, value);
unsigned srcSize;
unsigned dstSize;
if (value->nextWord == value) {
srcSize = TargetBytesPerWord;
dstSize = TargetBytesPerWord;
2009-10-04 19:56:48 +00:00
} else {
srcSize = src->registerSize(c);
dstSize = dst->registerSize(c);
}
if (srcSize == dstSize) {
apply(c, lir::Move, srcSize, src, src, dstSize, dst, dst);
} else if (srcSize > TargetBytesPerWord) {
Site* low, *high, *other = pickSiteOrGrow(c, value, dst, &low, &high);
other->freeze(c, value->nextWord);
2009-10-04 19:56:48 +00:00
apply(c, lir::Move, srcSize, src, src, srcSize, low, high);
2009-10-04 19:56:48 +00:00
other->thaw(c, value->nextWord);
2009-10-04 19:56:48 +00:00
} else {
Site* low, *high, *other = pickSiteOrMove(c, value, src, &low, &high);
other->freeze(c, value->nextWord);
2009-10-04 19:56:48 +00:00
apply(c, lir::Move, dstSize, low, high, dstSize, dst, dst);
2009-10-04 19:56:48 +00:00
other->thaw(c, value->nextWord);
2009-10-04 19:56:48 +00:00
}
dst->thaw(c, value);
src->thaw(c, value);
}
void
asAssemblerOperand(Context* c, Site* low, Site* high,
lir::Operand* result)
2009-01-03 00:44:47 +00:00
{
low->asAssemblerOperand(c, high, result);
}
class OperandUnion: public lir::Operand {
// must be large enough and aligned properly to hold any operand
// type (we'd use an actual union type here, except that classes
// with constructors cannot be used in a union):
uintptr_t padding[4];
};
2008-04-17 02:55:38 +00:00
void
apply(Context* c, lir::UnaryOperation op,
unsigned s1Size, Site* s1Low, Site* s1High)
2008-08-16 17:45:36 +00:00
{
2009-09-26 19:43:44 +00:00
assert(c, s1Low->type(c) == s1High->type(c));
2008-08-16 17:45:36 +00:00
lir::OperandType s1Type = s1Low->type(c);
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
c->assembler->apply(op,
OperandInfo(s1Size, s1Type, &s1Union));
2008-08-16 17:45:36 +00:00
}
void
apply(Context* c, lir::BinaryOperation op,
unsigned s1Size, Site* s1Low, Site* s1High,
unsigned s2Size, Site* s2Low, Site* s2High)
2008-04-17 02:55:38 +00:00
{
2009-09-26 19:43:44 +00:00
assert(c, s1Low->type(c) == s1High->type(c));
assert(c, s2Low->type(c) == s2High->type(c));
lir::OperandType s1Type = s1Low->type(c);
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
2008-08-16 17:45:36 +00:00
lir::OperandType s2Type = s2Low->type(c);
OperandUnion s2Union; asAssemblerOperand(c, s2Low, s2High, &s2Union);
2008-03-15 20:24:04 +00:00
c->assembler->apply(op,
OperandInfo(s1Size, s1Type, &s1Union),
OperandInfo(s2Size, s2Type, &s2Union));
2008-04-17 02:55:38 +00:00
}
2008-04-17 02:55:38 +00:00
void
apply(Context* c, lir::TernaryOperation op,
unsigned s1Size, Site* s1Low, Site* s1High,
unsigned s2Size, Site* s2Low, Site* s2High,
unsigned s3Size, Site* s3Low, Site* s3High)
2008-04-17 02:55:38 +00:00
{
2009-09-26 19:43:44 +00:00
assert(c, s1Low->type(c) == s1High->type(c));
assert(c, s2Low->type(c) == s2High->type(c));
assert(c, s3Low->type(c) == s3High->type(c));
lir::OperandType s1Type = s1Low->type(c);
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
lir::OperandType s2Type = s2Low->type(c);
OperandUnion s2Union; asAssemblerOperand(c, s2Low, s2High, &s2Union);
2008-02-17 22:29:04 +00:00
lir::OperandType s3Type = s3Low->type(c);
OperandUnion s3Union; asAssemblerOperand(c, s3Low, s3High, &s3Union);
2008-08-16 17:45:36 +00:00
c->assembler->apply(op,
OperandInfo(s1Size, s1Type, &s1Union),
OperandInfo(s2Size, s2Type, &s2Union),
OperandInfo(s3Size, s3Type, &s3Union));
2008-04-17 02:55:38 +00:00
}
2008-07-05 20:21:13 +00:00
void
2008-10-06 00:50:59 +00:00
clean(Context* c, Value* v, unsigned popIndex)
2008-07-05 20:21:13 +00:00
{
2009-09-26 19:43:44 +00:00
for (SiteIterator it(c, v); it.hasMore();) {
2008-11-01 22:16:18 +00:00
Site* s = it.next();
if (not (s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex))
2008-11-01 22:16:18 +00:00
and offsetToFrameIndex
(c, static_cast<MemorySite*>(s)->offset)
2008-11-01 22:16:18 +00:00
>= popIndex))
2008-10-06 00:50:59 +00:00
{
2009-04-27 03:59:22 +00:00
if (false and
s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex)))
2009-04-27 03:59:22 +00:00
{
2009-01-04 22:58:05 +00:00
char buffer[256]; s->toString(c, buffer, 256);
2009-04-27 03:59:22 +00:00
fprintf(stderr, "remove %s from %p at %d pop offset 0x%x\n",
2009-01-04 22:58:05 +00:00
buffer, v, offsetToFrameIndex
2009-04-27 03:59:22 +00:00
(c, static_cast<MemorySite*>(s)->offset),
frameIndexToOffset(c, popIndex));
2009-01-04 22:58:05 +00:00
}
2008-11-01 22:16:18 +00:00
it.remove(c);
2008-07-05 20:21:13 +00:00
}
}
}
2008-04-17 22:07:32 +00:00
void
2008-10-06 00:50:59 +00:00
clean(Context* c, Event* e, Stack* stack, Local* locals, Read* reads,
unsigned popIndex)
{
2008-11-01 19:14:13 +00:00
for (FrameIterator it(c, stack, locals); it.hasMore();) {
FrameIterator::Element e = it.next(c);
clean(c, e.value, popIndex);
}
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, e, r->value);
}
}
void
append(Context* c, Event* e);
void
saveLocals(Context* c, Event* e)
{
for (unsigned li = 0; li < c->localFootprint; ++li) {
Local* local = e->localsBefore + li;
if (local->value) {
if (DebugReads) {
fprintf(stderr, "local save read %p at %d of %d\n",
2013-02-13 19:11:47 +00:00
local->value, compiler::frameIndex(c, li), totalFrameSize(c));
}
2013-02-14 02:33:40 +00:00
e->addRead(c, local->value, SiteMask
2013-02-13 19:11:47 +00:00
(1 << lir::MemoryOperand, 0, compiler::frameIndex(c, li)));
}
}
}
bool
unreachable(Event* event)
{
for (Link* p = event->predecessors; p; p = p->nextPredecessor) {
if (not p->predecessor->allExits()) return false;
}
return event->predecessors != 0;
}
2008-04-17 02:55:38 +00:00
class ReturnEvent: public Event {
public:
2008-04-17 02:55:38 +00:00
ReturnEvent(Context* c, unsigned size, Value* value):
Event(c), value(value)
{
if (value) {
2013-02-14 02:33:40 +00:00
this->addReads(c, value, size,
SiteMask::fixedRegisterMask(c->arch->returnLow()),
SiteMask::fixedRegisterMask(c->arch->returnHigh()));
2008-04-17 02:55:38 +00:00
}
2008-03-15 20:24:04 +00:00
}
virtual const char* name() {
return "ReturnEvent";
}
virtual void compile(Context* c) {
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
2008-04-19 00:19:45 +00:00
}
if (not unreachable(this)) {
c->assembler->popFrameAndPopArgumentsAndReturn
support stack unwinding without using a frame pointer Previously, we unwound the stack by following the chain of frame pointers for normal returns, stack trace creation, and exception unwinding. On x86, this required reserving EBP/RBP for frame pointer duties, making it unavailable for general computation and requiring that it be explicitly saved and restored on entry and exit, respectively. On PowerPC, we use an ABI that makes the stack pointer double as a frame pointer, so it doesn't cost us anything. We've been using the same convention on ARM, but it doesn't match the native calling convention, which makes it unusable when we want to call native code from Java and pass arguments on the stack. So far, the ARM calling convention mismatch hasn't been an issue because we've never passed more arguments from Java to native code than would fit in registers. However, we must now pass an extra argument (the thread pointer) to e.g. divideLong so it can throw an exception on divide by zero, which means the last argument must be passed on the stack. This will clobber the linkage area we've been using to hold the frame pointer, so we need to stop using it. One solution would be to use the same convention on ARM as we do on x86, but this would introduce the same overhead of making a register unavailable for general use and extra code at method entry and exit. Instead, this commit removes the need for a frame pointer. Unwinding involves consulting a map of instruction offsets to frame sizes which is generated at compile time. This is necessary because stack trace creation can happen at any time due to Thread.getStackTrace being called by another thread, and the frame size varies during the execution of a method. So far, only x86(_64) is working, and continuations and tail call optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
(c->alignedFrameSize,
c->arch->argumentFootprint(c->parameterFootprint));
}
}
2008-04-17 02:55:38 +00:00
Value* value;
};
void
2008-04-17 22:07:32 +00:00
appendReturn(Context* c, unsigned size, Value* value)
{
append(c, new(c->zone) ReturnEvent(c, size, value));
}
void
maybeMove(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst,
const SiteMask& dstMask)
{
Read* read = live(c, dst);
bool isStore = read == 0;
2008-03-15 20:24:04 +00:00
Site* target;
if (dst->target) {
target = dst->target;
} else if (isStore) {
return;
} else {
target = pickTargetSite(c, read);
}
2008-03-15 20:24:04 +00:00
unsigned cost = src->source->copyCost(c, target);
if (srcSelectSize < dstSize) cost = 1;
if (cost) {
// todo: let c->arch->planMove decide this:
bool useTemporary = ((target->type(c) == lir::MemoryOperand
and src->source->type(c) == lir::MemoryOperand)
or (srcSelectSize < dstSize
and target->type(c) != lir::RegisterOperand));
src->source->freeze(c, src);
2013-02-14 02:33:40 +00:00
dst->addSite(c, target);
src->source->thaw(c, src);
bool addOffset = srcSize != srcSelectSize
and c->arch->bigEndian()
and src->source->type(c) == lir::MemoryOperand;
if (addOffset) {
static_cast<MemorySite*>(src->source)->offset
+= (srcSize - srcSelectSize);
}
target->freeze(c, dst);
if (target->match(c, dstMask) and not useTemporary) {
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
}
src->source->freeze(c, src);
2009-09-26 19:43:44 +00:00
apply(c, type, min(srcSelectSize, dstSize), src->source, src->source,
dstSize, target, target);
src->source->thaw(c, src);
} else {
// pick a temporary register which is valid as both a
// destination and a source for the moves we need to perform:
removeSite(c, dst, target);
bool thunk;
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
c->arch->planSource(type, dstSize, &srcTypeMask, &srcRegisterMask,
dstSize, &thunk);
if (src->type == lir::ValueGeneral) {
srcRegisterMask &= c->regFile->generalRegisters.mask;
}
assert(c, thunk == 0);
assert(c, dstMask.typeMask & srcTypeMask & (1 << lir::RegisterOperand));
Site* tmpTarget = freeRegisterSite
(c, dstMask.registerMask & srcRegisterMask);
src->source->freeze(c, src);
2013-02-14 02:33:40 +00:00
dst->addSite(c, tmpTarget);
2008-08-28 22:43:35 +00:00
tmpTarget->freeze(c, dst);
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char dstb[256]; tmpTarget->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
}
2008-10-04 17:26:35 +00:00
2009-09-26 19:43:44 +00:00
apply(c, type, srcSelectSize, src->source, src->source,
dstSize, tmpTarget, tmpTarget);
tmpTarget->thaw(c, dst);
src->source->thaw(c, src);
if (useTemporary or isStore) {
if (DebugMoves) {
char srcb[256]; tmpTarget->toString(c, srcb, 256);
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p to %p\n",
srcb, dstb, src, dst);
}
2008-10-19 00:15:57 +00:00
2013-02-14 02:33:40 +00:00
dst->addSite(c, target);
tmpTarget->freeze(c, dst);
apply(c, lir::Move, dstSize, tmpTarget, tmpTarget, dstSize, target, target);
tmpTarget->thaw(c, dst);
if (isStore) {
removeSite(c, dst, tmpTarget);
}
}
}
target->thaw(c, dst);
if (addOffset) {
static_cast<MemorySite*>(src->source)->offset
-= (srcSize - srcSelectSize);
}
} else {
target = src->source;
if (DebugMoves) {
char dstb[256]; target->toString(c, dstb, 256);
fprintf(stderr, "null move in %s for %p to %p\n", dstb, src, dst);
}
}
2008-10-19 00:15:57 +00:00
if (isStore) {
removeSite(c, dst, target);
}
}
Site*
pickMatchOrMove(Context* c, Read* r, Site* nextWord, unsigned index,
bool intersectRead)
{
Site* s = pickSite(c, r->value, nextWord, index, true);
SiteMask mask;
if (intersectRead) {
r->intersect(&mask);
}
if (s and s->match(c, mask)) {
return s;
}
return pickSiteOrMove
2013-02-14 01:18:51 +00:00
(c, r->value, mask.intersectionWith(nextWord->nextWordMask(c, index)),
true, true);
}
Site*
pickSiteOrMove(Context* c, Value* src, Value* dst, Site* nextWord,
unsigned index)
{
if (live(c, dst)) {
Read* read = live(c, src);
Site* s;
if (nextWord) {
s = pickMatchOrMove(c, read, nextWord, index, false);
} else {
2009-12-01 02:06:01 +00:00
s = pickSourceSite(c, read, 0, 0, 0, false, true, true);
if (s == 0 or s->isVolatile(c)) {
s = maybeMove(c, read, false, true);
}
}
assert(c, s);
addBuddy(src, dst);
if (src->source->isVolatile(c)) {
removeSite(c, src, src->source);
}
return s;
} else {
return 0;
}
}
Value*
value(Context* c, lir::ValueType type, Site* site = 0, Site* target = 0)
{
return new(c->zone) Value(site, target, type);
}
void
2009-09-26 19:43:44 +00:00
grow(Context* c, Value* v)
{
assert(c, v->nextWord == v);
2009-09-26 19:43:44 +00:00
Value* next = value(c, v->type);
v->nextWord = next;
next->nextWord = v;
next->wordIndex = 1;
2009-09-26 19:43:44 +00:00
}
void
split(Context* c, Value* v)
{
grow(c, v);
for (SiteIterator it(c, v); it.hasMore();) {
Site* s = it.next();
removeSite(c, v, s);
2013-02-14 02:33:40 +00:00
v->addSite(c, s->copyLow(c));
v->nextWord->addSite(c, s->copyHigh(c));
}
}
void
maybeSplit(Context* c, Value* v)
{
if (v->nextWord == v) {
split(c, v);
}
}
class MoveEvent: public Event {
public:
MoveEvent(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst,
2009-10-04 19:56:48 +00:00
const SiteMask& srcLowMask, const SiteMask& srcHighMask):
Event(c), type(type), srcSize(srcSize), srcSelectSize(srcSelectSize),
2009-10-04 19:56:48 +00:00
src(src), dstSize(dstSize), dst(dst)
{
assert(c, srcSelectSize <= srcSize);
bool noop = srcSelectSize >= dstSize;
if (dstSize > TargetBytesPerWord) {
grow(c, dst);
}
if (srcSelectSize > TargetBytesPerWord) {
maybeSplit(c, src);
}
2009-10-04 19:56:48 +00:00
2013-02-14 02:33:40 +00:00
this->addReads(c, src, srcSelectSize, srcLowMask, noop ? dst : 0,
srcHighMask,
noop and dstSize > TargetBytesPerWord ? dst->nextWord : 0);
}
2008-10-19 00:15:57 +00:00
virtual const char* name() {
return "MoveEvent";
}
virtual void compile(Context* c) {
2009-10-04 19:56:48 +00:00
uint8_t dstTypeMask;
uint64_t dstRegisterMask;
c->arch->planDestination
(type,
srcSelectSize,
1 << src->source->type(c),
(static_cast<uint64_t>(src->nextWord->source->registerMask(c)) << 32)
2009-10-04 19:56:48 +00:00
| static_cast<uint64_t>(src->source->registerMask(c)),
dstSize,
&dstTypeMask,
&dstRegisterMask);
SiteMask dstLowMask(dstTypeMask, dstRegisterMask, AnyFrameIndex);
SiteMask dstHighMask(dstTypeMask, dstRegisterMask >> 32, AnyFrameIndex);
if (srcSelectSize >= TargetBytesPerWord
and dstSize >= TargetBytesPerWord
and srcSelectSize >= dstSize)
{
if (dst->target) {
if (dstSize > TargetBytesPerWord) {
if (src->source->registerSize(c) > TargetBytesPerWord) {
apply(c, lir::Move, srcSelectSize, src->source, src->source,
dstSize, dst->target, dst->target);
if (live(c, dst) == 0) {
removeSite(c, dst, dst->target);
if (dstSize > TargetBytesPerWord) {
removeSite(c, dst->nextWord, dst->nextWord->target);
}
}
} else {
src->nextWord->source->freeze(c, src->nextWord);
maybeMove(c, lir::Move, TargetBytesPerWord, TargetBytesPerWord, src,
TargetBytesPerWord, dst, dstLowMask);
src->nextWord->source->thaw(c, src->nextWord);
maybeMove
(c, lir::Move, TargetBytesPerWord, TargetBytesPerWord, src->nextWord,
TargetBytesPerWord, dst->nextWord, dstHighMask);
}
} else {
maybeMove(c, lir::Move, TargetBytesPerWord, TargetBytesPerWord, src,
TargetBytesPerWord, dst, dstLowMask);
}
} else {
Site* low = pickSiteOrMove(c, src, dst, 0, 0);
if (dstSize > TargetBytesPerWord) {
pickSiteOrMove(c, src->nextWord, dst->nextWord, low, 1);
}
}
} else if (srcSelectSize <= TargetBytesPerWord
and dstSize <= TargetBytesPerWord)
{
maybeMove(c, type, srcSize, srcSelectSize, src, dstSize, dst,
dstLowMask);
} else {
assert(c, srcSize == TargetBytesPerWord);
assert(c, srcSelectSize == TargetBytesPerWord);
if (dst->nextWord->target or live(c, dst->nextWord)) {
assert(c, dstLowMask.typeMask & (1 << lir::RegisterOperand));
Site* low = freeRegisterSite(c, dstLowMask.registerMask);
src->source->freeze(c, src);
2013-02-14 02:33:40 +00:00
dst->addSite(c, low);
low->freeze(c, dst);
if (DebugMoves) {
char srcb[256]; src->source->toString(c, srcb, 256);
char dstb[256]; low->toString(c, dstb, 256);
fprintf(stderr, "move %s to %s for %p\n",
srcb, dstb, src);
}
apply(c, lir::Move, TargetBytesPerWord, src->source, src->source,
TargetBytesPerWord, low, low);
low->thaw(c, dst);
src->source->thaw(c, src);
assert(c, dstHighMask.typeMask & (1 << lir::RegisterOperand));
Site* high = freeRegisterSite(c, dstHighMask.registerMask);
low->freeze(c, dst);
2013-02-14 02:33:40 +00:00
dst->nextWord->addSite(c, high);
high->freeze(c, dst->nextWord);
if (DebugMoves) {
char srcb[256]; low->toString(c, srcb, 256);
char dstb[256]; high->toString(c, dstb, 256);
fprintf(stderr, "extend %s to %s for %p %p\n",
srcb, dstb, dst, dst->nextWord);
}
apply(c, lir::Move, TargetBytesPerWord, low, low, dstSize, low, high);
high->thaw(c, dst->nextWord);
low->thaw(c, dst);
} else {
pickSiteOrMove(c, src, dst, 0, 0);
}
}
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
2008-04-20 19:35:36 +00:00
}
2007-12-11 21:26:59 +00:00
}
lir::BinaryOperation type;
2008-08-16 17:45:36 +00:00
unsigned srcSize;
unsigned srcSelectSize;
2008-04-17 02:55:38 +00:00
Value* src;
2008-08-16 17:45:36 +00:00
unsigned dstSize;
2008-04-17 02:55:38 +00:00
Value* dst;
2008-02-11 17:21:41 +00:00
};
2007-12-11 21:26:59 +00:00
void
appendMove(Context* c, lir::BinaryOperation type, unsigned srcSize,
unsigned srcSelectSize, Value* src, unsigned dstSize, Value* dst)
2007-12-11 21:26:59 +00:00
{
bool thunk;
2008-08-28 22:43:35 +00:00
uint8_t srcTypeMask;
uint64_t srcRegisterMask;
2009-10-04 19:56:48 +00:00
c->arch->planSource
(type, srcSelectSize, &srcTypeMask, &srcRegisterMask, dstSize, &thunk);
assert(c, not thunk);
append(c, new(c->zone)
MoveEvent
(c, type, srcSize, srcSelectSize, src, dstSize, dst,
SiteMask(srcTypeMask, srcRegisterMask, AnyFrameIndex),
2009-10-04 19:56:48 +00:00
SiteMask(srcTypeMask, srcRegisterMask >> 32, AnyFrameIndex)));
2008-02-11 17:21:41 +00:00
}
2007-12-20 01:42:12 +00:00
ConstantSite*
findConstantSite(Context* c, Value* v)
{
2009-09-26 19:43:44 +00:00
for (SiteIterator it(c, v); it.hasMore();) {
2008-11-01 22:16:18 +00:00
Site* s = it.next();
if (s->type(c) == lir::ConstantOperand) {
return static_cast<ConstantSite*>(s);
}
}
return 0;
}
void
preserve(Context* c, Value* v, Read* r, Site* s)
{
s->freeze(c, v);
maybeMove(c, r, false, true, 0);
s->thaw(c, v);
}
Site*
getTarget(Context* c, Value* value, Value* result, const SiteMask& resultMask)
{
Site* s;
Value* v;
Read* r = liveNext(c, value);
if (value->source->match
(c, static_cast<const SiteMask&>(resultMask))
and (r == 0 or value->source->loneMatch
(c, static_cast<const SiteMask&>(resultMask))))
{
s = value->source;
v = value;
if (r and uniqueSite(c, v, s)) {
preserve(c, v, r, s);
}
} else {
SingleRead r(resultMask, 0);
2009-10-04 22:10:36 +00:00
r.value = result;
r.successor_ = result;
s = pickTargetSite(c, &r, true);
v = result;
2013-02-14 02:33:40 +00:00
result->addSite(c, s);
}
removeSite(c, v, s);
s->freeze(c, v);
return s;
}
void
freezeSource(Context* c, unsigned size, Value* v)
{
v->source->freeze(c, v);
if (size > TargetBytesPerWord) {
v->nextWord->source->freeze(c, v->nextWord);
}
}
void
thawSource(Context* c, unsigned size, Value* v)
{
v->source->thaw(c, v);
if (size > TargetBytesPerWord) {
v->nextWord->source->thaw(c, v->nextWord);
}
2008-02-11 17:21:41 +00:00
}
class CombineEvent: public Event {
public:
CombineEvent(Context* c, lir::TernaryOperation type,
2008-08-16 17:45:36 +00:00
unsigned firstSize, Value* first,
unsigned secondSize, Value* second,
unsigned resultSize, Value* result,
const SiteMask& firstLowMask,
const SiteMask& firstHighMask,
const SiteMask& secondLowMask,
const SiteMask& secondHighMask):
2008-08-16 17:45:36 +00:00
Event(c), type(type), firstSize(firstSize), first(first),
secondSize(secondSize), second(second), resultSize(resultSize),
result(result)
{
2013-02-14 02:33:40 +00:00
this->addReads(c, first, firstSize, firstLowMask, firstHighMask);
if (resultSize > TargetBytesPerWord) {
grow(c, result);
}
bool condensed = c->arch->alwaysCondensed(type);
2013-02-14 02:33:40 +00:00
this->addReads(c, second, secondSize,
2009-10-04 19:56:48 +00:00
secondLowMask, condensed ? result : 0,
secondHighMask, condensed ? result->nextWord : 0);
2008-02-11 17:21:41 +00:00
}
virtual const char* name() {
return "CombineEvent";
}
virtual void compile(Context* c) {
assert(c, first->source->type(c) == first->nextWord->source->type(c));
2009-10-04 22:10:36 +00:00
// if (second->source->type(c) != second->nextWord->source->type(c)) {
// fprintf(stderr, "%p %p %d : %p %p %d\n",
// second, second->source, second->source->type(c),
// second->nextWord, second->nextWord->source,
// second->nextWord->source->type(c));
// }
2009-10-04 22:10:36 +00:00
assert(c, second->source->type(c) == second->nextWord->source->type(c));
2009-10-04 19:56:48 +00:00
freezeSource(c, firstSize, first);
uint8_t cTypeMask;
uint64_t cRegisterMask;
2009-10-04 19:56:48 +00:00
c->arch->planDestination
2009-10-04 19:56:48 +00:00
(type,
firstSize,
1 << first->source->type(c),
(static_cast<uint64_t>(first->nextWord->source->registerMask(c)) << 32)
2009-10-04 19:56:48 +00:00
| static_cast<uint64_t>(first->source->registerMask(c)),
secondSize,
1 << second->source->type(c),
(static_cast<uint64_t>(second->nextWord->source->registerMask(c)) << 32)
2009-10-04 19:56:48 +00:00
| static_cast<uint64_t>(second->source->registerMask(c)),
resultSize,
&cTypeMask,
&cRegisterMask);
SiteMask resultLowMask(cTypeMask, cRegisterMask, AnyFrameIndex);
SiteMask resultHighMask(cTypeMask, cRegisterMask >> 32, AnyFrameIndex);
Site* low = getTarget(c, second, result, resultLowMask);
2009-10-04 19:56:48 +00:00
unsigned lowSize = low->registerSize(c);
Site* high
2009-10-04 19:56:48 +00:00
= (resultSize > lowSize
? getTarget(c, second->nextWord, result->nextWord, resultHighMask)
2009-09-26 19:43:44 +00:00
: low);
// fprintf(stderr, "combine %p:%p and %p:%p into %p:%p\n",
// first, first->nextWord,
// second, second->nextWord,
// result, result->nextWord);
2009-09-26 19:43:44 +00:00
apply(c, type,
firstSize, first->source, first->nextWord->source,
secondSize, second->source, second->nextWord->source,
resultSize, low, high);
2008-04-19 00:19:45 +00:00
thawSource(c, firstSize, first);
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
}
2008-09-23 21:18:41 +00:00
low->thaw(c, second);
2009-10-04 19:56:48 +00:00
if (resultSize > lowSize) {
high->thaw(c, second->nextWord);
}
if (live(c, result)) {
2013-02-14 02:33:40 +00:00
result->addSite(c, low);
if (resultSize > lowSize and live(c, result->nextWord)) {
2013-02-14 02:33:40 +00:00
result->nextWord->addSite(c, high);
2008-11-11 00:07:44 +00:00
}
2008-09-23 21:18:41 +00:00
}
2008-02-11 17:21:41 +00:00
}
lir::TernaryOperation type;
2008-08-16 17:45:36 +00:00
unsigned firstSize;
2008-04-17 02:55:38 +00:00
Value* first;
2008-08-16 17:45:36 +00:00
unsigned secondSize;
2008-04-17 02:55:38 +00:00
Value* second;
2008-08-16 17:45:36 +00:00
unsigned resultSize;
2008-04-17 22:07:32 +00:00
Value* result;
2008-02-11 17:21:41 +00:00
};
2008-11-01 19:14:13 +00:00
void
2008-11-01 22:16:18 +00:00
removeBuddy(Context* c, Value* v)
2008-11-01 19:14:13 +00:00
{
if (v->buddy != v) {
if (DebugBuddies) {
fprintf(stderr, "remove buddy %p from", v);
for (Value* p = v->buddy; p != v; p = p->buddy) {
fprintf(stderr, " %p", p);
}
fprintf(stderr, "\n");
}
2008-11-01 19:14:13 +00:00
assert(c, v->buddy);
2008-11-01 19:14:13 +00:00
Value* next = v->buddy;
v->buddy = v;
Value* p = next;
while (p->buddy != v) p = p->buddy;
p->buddy = next;
2008-11-01 22:16:18 +00:00
assert(c, p->buddy);
if (not live(c, next)) {
2008-11-01 22:16:18 +00:00
clearSites(c, next);
}
if (not live(c, v)) {
clearSites(c, v);
}
2008-11-01 19:14:13 +00:00
}
}
2008-11-01 22:16:18 +00:00
Site*
copy(Context* c, Site* s)
{
Site* start = 0;
Site* end = 0;
for (; s; s = s->next) {
Site* n = s->copy(c);
if (end) {
end->next = n;
} else {
start = n;
}
end = n;
}
return start;
}
class Snapshot {
public:
Snapshot(Context* c, Value* value, Snapshot* next):
value(value), buddy(value->buddy), sites(copy(c, value->sites)), next(next)
{ }
Value* value;
Value* buddy;
Site* sites;
Snapshot* next;
};
Snapshot*
snapshot(Context* c, Value* value, Snapshot* next)
{
2008-11-02 22:25:51 +00:00
if (DebugControl) {
2009-01-04 01:17:51 +00:00
char buffer[256]; sitesToString(c, value->sites, buffer, 256);
2008-11-02 22:25:51 +00:00
fprintf(stderr, "snapshot %p buddy %p sites %s\n",
value, value->buddy, buffer);
}
2008-11-01 22:16:18 +00:00
return new(c->zone) Snapshot(c, value, next);
2008-11-01 22:16:18 +00:00
}
Snapshot*
makeSnapshots(Context* c, Value* value, Snapshot* next)
{
next = snapshot(c, value, next);
for (Value* p = value->buddy; p != value; p = p->buddy) {
next = snapshot(c, p, next);
}
return next;
}
2008-08-28 22:43:35 +00:00
Stack*
stack(Context* c, Value* value, Stack* next)
{
return new(c->zone) Stack(next ? next->index + 1 : 0, value, next);
2008-08-28 22:43:35 +00:00
}
2008-11-02 20:35:35 +00:00
Value*
maybeBuddy(Context* c, Value* v);
2008-11-02 20:35:35 +00:00
Value*
pushWord(Context* c, Value* v)
{
if (v) {
v = maybeBuddy(c, v);
}
Stack* s = stack(c, v, c->stack);
if (DebugFrame) {
fprintf(stderr, "push %p\n", v);
}
if (v) {
v->home = frameIndex(c, s->index + c->localFootprint);
}
c->stack = s;
return v;
}
void
push(Context* c, unsigned footprint, Value* v)
2008-02-11 17:21:41 +00:00
{
2008-11-02 22:25:51 +00:00
assert(c, footprint);
bool bigEndian = c->arch->bigEndian();
Value* low = v;
if (bigEndian) {
v = pushWord(c, v);
}
Value* high;
2009-01-30 01:36:19 +00:00
if (footprint > 1) {
assert(c, footprint == 2);
if (TargetBytesPerWord == 4) {
2009-10-04 22:10:36 +00:00
maybeSplit(c, low);
high = pushWord(c, low->nextWord);
2009-10-04 22:10:36 +00:00
} else {
high = pushWord(c, 0);
}
2009-02-01 23:19:11 +00:00
} else {
high = 0;
}
if (not bigEndian) {
v = pushWord(c, v);
2009-01-30 01:36:19 +00:00
}
2009-10-04 22:10:36 +00:00
if (high) {
v->nextWord = high;
high->nextWord = v;
high->wordIndex = 1;
2008-11-02 22:25:51 +00:00
}
2008-08-28 22:43:35 +00:00
}
void
popWord(Context* c)
2008-08-28 22:43:35 +00:00
{
Stack* s = c->stack;
2009-01-30 01:36:19 +00:00
assert(c, s->value == 0 or s->value->home >= 0);
2008-11-02 20:35:35 +00:00
2008-11-02 22:25:51 +00:00
if (DebugFrame) {
2009-02-01 23:19:11 +00:00
fprintf(stderr, "pop %p\n", s->value);
2008-11-02 22:25:51 +00:00
}
c->stack = s->next;
}
Value*
pop(Context* c, unsigned footprint)
{
assert(c, footprint);
2009-03-08 00:52:18 +00:00
Stack* s = 0;
bool bigEndian = c->arch->bigEndian();
if (not bigEndian) {
s = c->stack;
}
if (footprint > 1) {
assert(c, footprint == 2);
2009-01-30 01:36:19 +00:00
#ifndef NDEBUG
Stack* low;
Stack* high;
if (bigEndian) {
2009-05-03 20:57:11 +00:00
high = c->stack;
low = high->next;
} else {
low = c->stack;
high = low->next;
}
assert(c, (TargetBytesPerWord == 8
and low->value->nextWord == low->value and high->value == 0)
or (TargetBytesPerWord == 4 and low->value->nextWord == high->value));
#endif // not NDEBUG
popWord(c);
}
if (bigEndian) {
s = c->stack;
}
popWord(c);
2008-08-28 22:43:35 +00:00
return s->value;
}
Value*
storeLocal(Context* c, unsigned footprint, Value* v, unsigned index, bool copy)
{
assert(c, index + footprint <= c->localFootprint);
if (copy) {
unsigned sizeInBytes = sizeof(Local) * c->localFootprint;
Local* newLocals = static_cast<Local*>(c->zone->allocate(sizeInBytes));
memcpy(newLocals, c->locals, sizeInBytes);
c->locals = newLocals;
}
Value* high;
if (footprint > 1) {
assert(c, footprint == 2);
unsigned highIndex;
unsigned lowIndex;
if (c->arch->bigEndian()) {
highIndex = index + 1;
lowIndex = index;
} else {
lowIndex = index + 1;
highIndex = index;
}
if (TargetBytesPerWord == 4) {
assert(c, v->nextWord != v);
high = storeLocal(c, 1, v->nextWord, highIndex, false);
2009-02-01 23:19:11 +00:00
} else {
high = 0;
}
index = lowIndex;
} else {
2009-10-04 22:10:36 +00:00
high = 0;
}
v = maybeBuddy(c, v);
2009-10-04 22:10:36 +00:00
if (high != 0) {
v->nextWord = high;
high->nextWord = v;
high->wordIndex = 1;
2009-10-04 22:10:36 +00:00
}
Local* local = c->locals + index;
local->value = v;
if (DebugFrame) {
fprintf(stderr, "store local %p at %d\n", local->value, index);
}
local->value->home = frameIndex(c, index);
return v;
}
Value*
loadLocal(Context* c, unsigned footprint, unsigned index)
{
assert(c, index + footprint <= c->localFootprint);
if (footprint > 1) {
assert(c, footprint == 2);
if (not c->arch->bigEndian()) {
++ index;
}
}
assert(c, c->locals[index].value);
assert(c, c->locals[index].value->home >= 0);
if (DebugFrame) {
fprintf(stderr, "load local %p at %d\n", c->locals[index].value, index);
}
return c->locals[index].value;
2008-08-28 22:43:35 +00:00
}
Value*
register_(Context* c, int number)
{
assert(c, (1 << number) & (c->regFile->generalRegisters.mask
| c->regFile->floatRegisters.mask));
Site* s = registerSite(c, number);
lir::ValueType type = ((1 << number) & c->regFile->floatRegisters.mask)
? lir::ValueFloat: lir::ValueGeneral;
return value(c, type, s, s);
}
2008-08-28 22:43:35 +00:00
void
appendCombine(Context* c, lir::TernaryOperation type,
2008-08-28 22:43:35 +00:00
unsigned firstSize, Value* first,
unsigned secondSize, Value* second,
unsigned resultSize, Value* result)
{
bool thunk;
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
uint8_t secondTypeMask;
uint64_t secondRegisterMask;
c->arch->planSource(type, firstSize, &firstTypeMask, &firstRegisterMask,
secondSize, &secondTypeMask, &secondRegisterMask,
resultSize, &thunk);
2008-05-16 00:35:17 +00:00
2008-08-28 22:43:35 +00:00
if (thunk) {
Stack* oldStack = c->stack;
bool threadParameter;
intptr_t handler = c->client->getThunk
(type, firstSize, resultSize, &threadParameter);
2013-02-11 01:06:15 +00:00
unsigned stackSize = ceilingDivide(secondSize, TargetBytesPerWord)
+ ceilingDivide(firstSize, TargetBytesPerWord);
2013-02-13 19:11:47 +00:00
compiler::push(c, ceilingDivide(secondSize, TargetBytesPerWord), second);
compiler::push(c, ceilingDivide(firstSize, TargetBytesPerWord), first);
2008-04-18 03:47:42 +00:00
if (threadParameter) {
++ stackSize;
2013-02-13 19:11:47 +00:00
compiler::push(c, 1, register_(c, c->arch->thread()));
}
Stack* argumentStack = c->stack;
c->stack = oldStack;
2008-08-28 22:43:35 +00:00
appendCall
(c, value(c, lir::ValueGeneral, constantSite(c, handler)), 0, 0, result,
resultSize, argumentStack, stackSize, 0);
} else {
append
(c, new(c->zone)
CombineEvent
(c, type,
firstSize, first,
secondSize, second,
resultSize, result,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask >> 32, AnyFrameIndex)));
}
2008-02-11 17:21:41 +00:00
}
2008-02-11 17:21:41 +00:00
class TranslateEvent: public Event {
public:
TranslateEvent(Context* c, lir::BinaryOperation type, unsigned valueSize,
2009-10-04 19:56:48 +00:00
Value* value, unsigned resultSize, Value* result,
const SiteMask& valueLowMask,
const SiteMask& valueHighMask):
2009-10-04 19:56:48 +00:00
Event(c), type(type), valueSize(valueSize), resultSize(resultSize),
value(value), result(result)
{
bool condensed = c->arch->alwaysCondensed(type);
if (resultSize > TargetBytesPerWord) {
grow(c, result);
}
2009-10-04 19:56:48 +00:00
2013-02-14 02:33:40 +00:00
this->addReads(c, value, valueSize, valueLowMask, condensed ? result : 0,
valueHighMask, condensed ? result->nextWord : 0);
2008-03-15 20:24:04 +00:00
}
virtual const char* name() {
return "TranslateEvent";
}
virtual void compile(Context* c) {
assert(c, value->source->type(c) == value->nextWord->source->type(c));
2009-10-04 22:10:36 +00:00
uint8_t bTypeMask;
uint64_t bRegisterMask;
c->arch->planDestination
2009-10-04 19:56:48 +00:00
(type,
valueSize,
1 << value->source->type(c),
(static_cast<uint64_t>(value->nextWord->source->registerMask(c)) << 32)
2009-10-04 19:56:48 +00:00
| static_cast<uint64_t>(value->source->registerMask(c)),
resultSize,
&bTypeMask,
&bRegisterMask);
2009-10-04 19:56:48 +00:00
SiteMask resultLowMask(bTypeMask, bRegisterMask, AnyFrameIndex);
SiteMask resultHighMask(bTypeMask, bRegisterMask >> 32, AnyFrameIndex);
Site* low = getTarget(c, value, result, resultLowMask);
2009-10-04 19:56:48 +00:00
unsigned lowSize = low->registerSize(c);
Site* high
2009-10-04 19:56:48 +00:00
= (resultSize > lowSize
? getTarget(c, value->nextWord, result->nextWord, resultHighMask)
2009-09-26 19:43:44 +00:00
: low);
apply(c, type, valueSize, value->source, value->nextWord->source,
2009-10-04 19:56:48 +00:00
resultSize, low, high);
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
}
2008-04-19 00:19:45 +00:00
low->thaw(c, value);
2009-10-04 19:56:48 +00:00
if (resultSize > lowSize) {
high->thaw(c, value->nextWord);
}
if (live(c, result)) {
2013-02-14 02:33:40 +00:00
result->addSite(c, low);
if (resultSize > lowSize and live(c, result->nextWord)) {
2013-02-14 02:33:40 +00:00
result->nextWord->addSite(c, high);
2008-11-11 00:07:44 +00:00
}
2008-04-21 00:21:48 +00:00
}
2008-02-11 17:21:41 +00:00
}
lir::BinaryOperation type;
2009-10-04 19:56:48 +00:00
unsigned valueSize;
unsigned resultSize;
2008-04-17 02:55:38 +00:00
Value* value;
Value* result;
2008-10-15 00:45:31 +00:00
Read* resultRead;
SiteMask resultLowMask;
SiteMask resultHighMask;
2008-02-11 17:21:41 +00:00
};
2008-02-11 17:21:41 +00:00
void
appendTranslate(Context* c, lir::BinaryOperation type, unsigned firstSize,
Value* first, unsigned resultSize, Value* result)
2008-02-11 17:21:41 +00:00
{
bool thunk;
2008-08-28 22:43:35 +00:00
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
c->arch->planSource(type, firstSize, &firstTypeMask, &firstRegisterMask,
resultSize, &thunk);
if (thunk) {
Stack* oldStack = c->stack;
2013-02-13 19:11:47 +00:00
compiler::push(c, ceilingDivide(firstSize, TargetBytesPerWord), first);
Stack* argumentStack = c->stack;
c->stack = oldStack;
appendCall
(c, value
(c, lir::ValueGeneral, constantSite
(c, c->client->getThunk(type, firstSize, resultSize))),
0, 0, result, resultSize, argumentStack,
2013-02-11 01:06:15 +00:00
ceilingDivide(firstSize, TargetBytesPerWord), 0);
} else {
append(c, new(c->zone)
TranslateEvent
2009-10-04 19:56:48 +00:00
(c, type, firstSize, first, resultSize, result,
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex)));
}
2008-02-11 17:21:41 +00:00
}
fix a couple of subtle Thread.getStackTrace bugs The first problem was that, on x86, we failed to properly keep track of whether to expect the return address to be on the stack or not when unwinding through a frame. We were relying on a "stackLimit" pointer to tell us whether we were looking at the most recently-called frame by comparing it with the stack pointer for that frame. That was inaccurate in the case of a thread executing at the beginning of a method before a new frame is allocated, in which case the most recent two frames share a stack pointer, confusing the unwinder. The solution involves keeping track of how many frames we've looked at while walking the stack. The other problem was that compareIpToMethodBounds assumed every method was followed by at least one byte of padding before the next method started. That assumption was usually valid because we were storing the size following method code prior to the code itself. However, the last method of an AOT-compiled code image is not followed by any such method header and may instead be followed directly by native code with no intervening padding. In that case, we risk interpreting that native code as part of the preceding method, with potentially bizarre results. The reason for the compareIpToMethodBounds assumption was that methods which throw exceptions as their last instruction generate a non-returning call, which nonetheless push a return address on the stack which points past the end of the method, and the unwinder needs to know that return address belongs to that method. A better solution is to add an extra trap instruction to the end of such methods, which is what this patch does.
2012-05-05 00:35:13 +00:00
class OperationEvent: public Event {
2009-03-03 03:18:15 +00:00
public:
OperationEvent(Context* c, lir::Operation op):
2009-03-03 03:18:15 +00:00
Event(c), op(op)
{ }
virtual const char* name() {
fix a couple of subtle Thread.getStackTrace bugs The first problem was that, on x86, we failed to properly keep track of whether to expect the return address to be on the stack or not when unwinding through a frame. We were relying on a "stackLimit" pointer to tell us whether we were looking at the most recently-called frame by comparing it with the stack pointer for that frame. That was inaccurate in the case of a thread executing at the beginning of a method before a new frame is allocated, in which case the most recent two frames share a stack pointer, confusing the unwinder. The solution involves keeping track of how many frames we've looked at while walking the stack. The other problem was that compareIpToMethodBounds assumed every method was followed by at least one byte of padding before the next method started. That assumption was usually valid because we were storing the size following method code prior to the code itself. However, the last method of an AOT-compiled code image is not followed by any such method header and may instead be followed directly by native code with no intervening padding. In that case, we risk interpreting that native code as part of the preceding method, with potentially bizarre results. The reason for the compareIpToMethodBounds assumption was that methods which throw exceptions as their last instruction generate a non-returning call, which nonetheless push a return address on the stack which points past the end of the method, and the unwinder needs to know that return address belongs to that method. A better solution is to add an extra trap instruction to the end of such methods, which is what this patch does.
2012-05-05 00:35:13 +00:00
return "OperationEvent";
2009-03-03 03:18:15 +00:00
}
virtual void compile(Context* c) {
c->assembler->apply(op);
}
lir::Operation op;
2009-03-03 03:18:15 +00:00
};
void
appendOperation(Context* c, lir::Operation op)
2009-03-03 03:18:15 +00:00
{
append(c, new(c->zone) OperationEvent(c, op));
}
void
moveIfConflict(Context* c, Value* v, MemorySite* s)
{
if (v->reads) {
SiteMask mask(1 << lir::RegisterOperand, ~0, AnyFrameIndex);
v->reads->intersect(&mask);
if (s->conflicts(mask)) {
maybeMove(c, v->reads, true, false);
removeSite(c, v, s);
}
}
2009-03-03 03:18:15 +00:00
}
2008-03-15 23:54:20 +00:00
class MemoryEvent: public Event {
public:
2008-04-17 20:48:26 +00:00
MemoryEvent(Context* c, Value* base, int displacement, Value* index,
unsigned scale, Value* result):
Event(c), base(base), displacement(displacement), index(index),
scale(scale), result(result)
2008-03-15 23:54:20 +00:00
{
2013-02-14 02:33:40 +00:00
this->addRead(c, base, generalRegisterMask(c));
2009-01-04 22:58:05 +00:00
if (index) {
2013-02-14 02:33:40 +00:00
this->addRead(c, index, generalRegisterOrConstantMask(c));
2009-01-04 22:58:05 +00:00
}
}
virtual const char* name() {
return "MemoryEvent";
2008-03-15 23:54:20 +00:00
}
2008-04-17 22:07:32 +00:00
virtual void compile(Context* c) {
2008-04-17 20:48:26 +00:00
int indexRegister;
int displacement = this->displacement;
unsigned scale = this->scale;
2008-04-17 20:48:26 +00:00
if (index) {
ConstantSite* constant = findConstantSite(c, index);
if (constant) {
indexRegister = lir::NoRegister;
displacement += (constant->value->value() * scale);
scale = 1;
} else {
assert(c, index->source->type(c) == lir::RegisterOperand);
indexRegister = static_cast<RegisterSite*>(index->source)->number;
}
2008-04-17 20:48:26 +00:00
} else {
indexRegister = lir::NoRegister;
2008-04-17 20:48:26 +00:00
}
assert(c, base->source->type(c) == lir::RegisterOperand);
int baseRegister = static_cast<RegisterSite*>(base->source)->number;
2008-04-17 20:48:26 +00:00
popRead(c, this, base);
2008-04-19 00:19:45 +00:00
if (index) {
if (TargetBytesPerWord == 8 and indexRegister != lir::NoRegister) {
apply(c, lir::Move, 4, index->source, index->source,
2009-09-26 19:43:44 +00:00
8, index->source, index->source);
}
popRead(c, this, index);
2008-04-19 00:19:45 +00:00
}
MemorySite* site = memorySite
(c, baseRegister, displacement, indexRegister, scale);
MemorySite* low;
if (result->nextWord != result) {
MemorySite* high = static_cast<MemorySite*>(site->copyHigh(c));
low = static_cast<MemorySite*>(site->copyLow(c));
result->nextWord->target = high;
2013-02-14 02:33:40 +00:00
result->nextWord->addSite(c, high);
moveIfConflict(c, result->nextWord, high);
} else {
low = site;
}
result->target = low;
2013-02-14 02:33:40 +00:00
result->addSite(c, low);
moveIfConflict(c, result, low);
2008-03-15 23:54:20 +00:00
}
2008-04-17 02:55:38 +00:00
Value* base;
2008-04-17 20:48:26 +00:00
int displacement;
2008-04-17 02:55:38 +00:00
Value* index;
2008-04-17 20:48:26 +00:00
unsigned scale;
2008-04-17 02:55:38 +00:00
Value* result;
2008-03-15 23:54:20 +00:00
};
void
2008-04-17 20:48:26 +00:00
appendMemory(Context* c, Value* base, int displacement, Value* index,
unsigned scale, Value* result)
2008-04-17 02:55:38 +00:00
{
append(c, new(c->zone)
MemoryEvent(c, base, displacement, index, scale, result));
2008-04-17 20:48:26 +00:00
}
2009-10-07 00:50:32 +00:00
double
asFloat(unsigned size, int64_t v)
{
if (size == 4) {
return bitsToFloat(v);
} else {
return bitsToDouble(v);
}
}
bool
unordered(double a, double b)
{
return not (a >= b or a < b);
}
bool
shouldJump(Context* c, lir::TernaryOperation type, unsigned size, int64_t b,
int64_t a)
2009-10-07 00:50:32 +00:00
{
switch (type) {
case lir::JumpIfEqual:
2009-10-07 00:50:32 +00:00
return a == b;
case lir::JumpIfNotEqual:
2009-10-07 00:50:32 +00:00
return a != b;
case lir::JumpIfLess:
2009-10-07 00:50:32 +00:00
return a < b;
case lir::JumpIfGreater:
2009-10-07 00:50:32 +00:00
return a > b;
case lir::JumpIfLessOrEqual:
2009-10-07 00:50:32 +00:00
return a <= b;
case lir::JumpIfGreaterOrEqual:
2009-10-07 00:50:32 +00:00
return a >= b;
case lir::JumpIfFloatEqual:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) == asFloat(size, b);
case lir::JumpIfFloatNotEqual:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) != asFloat(size, b);
case lir::JumpIfFloatLess:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) < asFloat(size, b);
case lir::JumpIfFloatGreater:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) > asFloat(size, b);
case lir::JumpIfFloatLessOrEqual:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) <= asFloat(size, b);
case lir::JumpIfFloatGreaterOrEqual:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) >= asFloat(size, b);
case lir::JumpIfFloatLessOrUnordered:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) < asFloat(size, b)
or unordered(asFloat(size, a), asFloat(size, b));
case lir::JumpIfFloatGreaterOrUnordered:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) > asFloat(size, b)
or unordered(asFloat(size, a), asFloat(size, b));
case lir::JumpIfFloatLessOrEqualOrUnordered:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) <= asFloat(size, b)
or unordered(asFloat(size, a), asFloat(size, b));
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
2009-10-07 00:50:32 +00:00
return asFloat(size, a) >= asFloat(size, b)
or unordered(asFloat(size, a), asFloat(size, b));
default:
abort(c);
2009-10-07 00:50:32 +00:00
}
}
lir::TernaryOperation
thunkBranch(Context* c, lir::TernaryOperation type)
{
switch (type) {
case lir::JumpIfFloatEqual:
return lir::JumpIfEqual;
case lir::JumpIfFloatNotEqual:
return lir::JumpIfNotEqual;
case lir::JumpIfFloatLess:
case lir::JumpIfFloatLessOrUnordered:
return lir::JumpIfLess;
case lir::JumpIfFloatGreater:
case lir::JumpIfFloatGreaterOrUnordered:
return lir::JumpIfGreater;
case lir::JumpIfFloatLessOrEqual:
case lir::JumpIfFloatLessOrEqualOrUnordered:
return lir::JumpIfLessOrEqual;
case lir::JumpIfFloatGreaterOrEqual:
case lir::JumpIfFloatGreaterOrEqualOrUnordered:
return lir::JumpIfGreaterOrEqual;
default:
abort(c);
}
}
2008-04-20 05:23:08 +00:00
class BranchEvent: public Event {
public:
BranchEvent(Context* c, lir::TernaryOperation type, unsigned size,
Value* first, Value* second, Value* address,
2009-10-07 00:50:32 +00:00
const SiteMask& firstLowMask,
const SiteMask& firstHighMask,
const SiteMask& secondLowMask,
const SiteMask& secondHighMask):
Event(c), type(type), size(size), first(first), second(second),
address(address)
2008-04-20 05:23:08 +00:00
{
2013-02-14 02:33:40 +00:00
this->addReads(c, first, size, firstLowMask, firstHighMask);
this->addReads(c, second, size, secondLowMask, secondHighMask);
uint8_t typeMask;
uint64_t registerMask;
c->arch->planDestination(type, size, 0, 0, size, 0, 0, TargetBytesPerWord,
&typeMask, &registerMask);
2013-02-14 02:33:40 +00:00
this->addRead(c, address, SiteMask(typeMask, registerMask, AnyFrameIndex));
2008-04-20 05:23:08 +00:00
}
virtual const char* name() {
return "BranchEvent";
}
2008-04-20 05:23:08 +00:00
virtual void compile(Context* c) {
2009-10-07 00:50:32 +00:00
ConstantSite* firstConstant = findConstantSite(c, first);
ConstantSite* secondConstant = findConstantSite(c, second);
2009-10-07 00:50:32 +00:00
if (not unreachable(this)) {
if (firstConstant
and secondConstant
and firstConstant->value->resolved()
and secondConstant->value->resolved())
{
int64_t firstValue = firstConstant->value->value();
int64_t secondValue = secondConstant->value->value();
if (size > TargetBytesPerWord) {
firstValue |= findConstantSite
(c, first->nextWord)->value->value() << 32;
secondValue |= findConstantSite
(c, second->nextWord)->value->value() << 32;
}
if (shouldJump(c, type, size, firstValue, secondValue)) {
apply(c, lir::Jump, TargetBytesPerWord, address->source, address->source);
2009-10-07 00:50:32 +00:00
}
} else {
freezeSource(c, size, first);
freezeSource(c, size, second);
freezeSource(c, TargetBytesPerWord, address);
apply(c, type, size, first->source, first->nextWord->source,
size, second->source, second->nextWord->source,
TargetBytesPerWord, address->source, address->source);
thawSource(c, TargetBytesPerWord, address);
thawSource(c, size, second);
thawSource(c, size, first);
}
}
2009-10-07 00:50:32 +00:00
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
}
2008-04-20 05:23:08 +00:00
}
virtual bool isBranch() { return true; }
lir::TernaryOperation type;
2009-10-07 00:50:32 +00:00
unsigned size;
Value* first;
Value* second;
2008-04-20 05:23:08 +00:00
Value* address;
};
2009-10-07 00:50:32 +00:00
void
appendBranch(Context* c, lir::TernaryOperation type, unsigned size, Value* first,
Value* second, Value* address)
2009-10-07 00:50:32 +00:00
{
bool thunk;
uint8_t firstTypeMask;
uint64_t firstRegisterMask;
uint8_t secondTypeMask;
uint64_t secondRegisterMask;
c->arch->planSource(type, size, &firstTypeMask, &firstRegisterMask,
size, &secondTypeMask, &secondRegisterMask,
TargetBytesPerWord, &thunk);
2009-10-07 00:50:32 +00:00
if (thunk) {
Stack* oldStack = c->stack;
bool threadParameter;
intptr_t handler = c->client->getThunk
(type, size, size, &threadParameter);
assert(c, not threadParameter);
2013-02-13 19:11:47 +00:00
compiler::push(c, ceilingDivide(size, TargetBytesPerWord), second);
compiler::push(c, ceilingDivide(size, TargetBytesPerWord), first);
2009-10-07 00:50:32 +00:00
Stack* argumentStack = c->stack;
c->stack = oldStack;
Value* result = value(c, lir::ValueGeneral);
2009-10-07 00:50:32 +00:00
appendCall
(c, value
(c, lir::ValueGeneral, constantSite(c, handler)), 0, 0, result, 4,
2013-02-11 01:06:15 +00:00
argumentStack, ceilingDivide(size, TargetBytesPerWord) * 2, 0);
2009-10-07 00:50:32 +00:00
appendBranch(c, thunkBranch(c, type), 4, value
(c, lir::ValueGeneral, constantSite(c, static_cast<int64_t>(0))),
2009-10-07 00:50:32 +00:00
result, address);
} else {
append
(c, new(c->zone)
2009-10-07 00:50:32 +00:00
BranchEvent
(c, type, size, first, second, address,
2009-10-07 00:50:32 +00:00
SiteMask(firstTypeMask, firstRegisterMask, AnyFrameIndex),
SiteMask(firstTypeMask, firstRegisterMask >> 32, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask, AnyFrameIndex),
SiteMask(secondTypeMask, secondRegisterMask >> 32, AnyFrameIndex)));
}
}
class JumpEvent: public Event {
public:
JumpEvent(Context* c, lir::UnaryOperation type, Value* address, bool exit,
bool cleanLocals):
Event(c), type(type), address(address), exit(exit),
cleanLocals(cleanLocals)
{
bool thunk;
uint8_t typeMask;
uint64_t registerMask;
c->arch->plan(type, TargetBytesPerWord, &typeMask, &registerMask, &thunk);
assert(c, not thunk);
2013-02-14 02:33:40 +00:00
this->addRead(c, address, SiteMask(typeMask, registerMask, AnyFrameIndex));
}
virtual const char* name() {
return "JumpEvent";
}
virtual void compile(Context* c) {
if (not unreachable(this)) {
apply(c, type, TargetBytesPerWord, address->source, address->source);
}
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
}
if (cleanLocals) {
for (FrameIterator it(c, 0, c->locals); it.hasMore();) {
FrameIterator::Element e = it.next(c);
clean(c, e.value, 0);
}
}
}
virtual bool isBranch() { return true; }
virtual bool allExits() {
return exit or unreachable(this);
}
lir::UnaryOperation type;
Value* address;
bool exit;
bool cleanLocals;
};
2008-04-20 05:23:08 +00:00
void
appendJump(Context* c, lir::UnaryOperation type, Value* address, bool exit = false,
bool cleanLocals = false)
2008-04-20 05:23:08 +00:00
{
append(c, new(c->zone) JumpEvent(c, type, address, exit, cleanLocals));
2008-04-18 03:47:42 +00:00
}
class BoundsCheckEvent: public Event {
public:
BoundsCheckEvent(Context* c, Value* object, unsigned lengthOffset,
Value* index, intptr_t handler):
Event(c), object(object), lengthOffset(lengthOffset), index(index),
handler(handler)
{
2013-02-14 02:33:40 +00:00
this->addRead(c, object, generalRegisterMask(c));
this->addRead(c, index, generalRegisterOrConstantMask(c));
}
virtual const char* name() {
return "BoundsCheckEvent";
}
virtual void compile(Context* c) {
Assembler* a = c->assembler;
ConstantSite* constant = findConstantSite(c, index);
CodePromise* outOfBoundsPromise = 0;
if (constant) {
if (constant->value->value() < 0) {
lir::Constant handlerConstant(resolved(c, handler));
a->apply(lir::Call,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &handlerConstant));
}
} else {
2013-02-14 02:33:40 +00:00
outOfBoundsPromise = compiler::codePromise(c, static_cast<Promise*>(0));
ConstantSite zero(resolved(c, 0));
ConstantSite oob(outOfBoundsPromise);
apply(c, lir::JumpIfLess,
4, &zero, &zero,
4, index->source, index->source,
TargetBytesPerWord, &oob, &oob);
}
if (constant == 0 or constant->value->value() >= 0) {
assert(c, object->source->type(c) == lir::RegisterOperand);
MemorySite length(static_cast<RegisterSite*>(object->source)->number,
lengthOffset, lir::NoRegister, 1);
length.acquired = true;
2013-02-14 02:33:40 +00:00
CodePromise* nextPromise = compiler::codePromise(c, static_cast<Promise*>(0));
freezeSource(c, TargetBytesPerWord, index);
ConstantSite next(nextPromise);
apply(c, lir::JumpIfGreater,
4, index->source,
index->source, 4, &length,
&length, TargetBytesPerWord, &next, &next);
thawSource(c, TargetBytesPerWord, index);
if (constant == 0) {
outOfBoundsPromise->offset = a->offset();
}
lir::Constant handlerConstant(resolved(c, handler));
a->apply(lir::Call,
OperandInfo(TargetBytesPerWord, lir::ConstantOperand, &handlerConstant));
nextPromise->offset = a->offset();
}
popRead(c, this, object);
popRead(c, this, index);
}
Value* object;
unsigned lengthOffset;
Value* index;
intptr_t handler;
};
void
appendBoundsCheck(Context* c, Value* object, unsigned lengthOffset,
Value* index, intptr_t handler)
{
append(c, new(c->zone) BoundsCheckEvent(c, object, lengthOffset, index, handler));
}
2008-09-25 00:48:32 +00:00
class FrameSiteEvent: public Event {
public:
FrameSiteEvent(Context* c, Value* value, int index):
Event(c), value(value), index(index)
{ }
virtual const char* name() {
2008-09-25 00:48:32 +00:00
return "FrameSiteEvent";
}
virtual void compile(Context* c) {
if (live(c, value)) {
2013-02-14 02:33:40 +00:00
value->addSite(c, frameSite(c, index));
}
}
Value* value;
int index;
};
void
appendFrameSite(Context* c, Value* value, int index)
{
append(c, new(c->zone) FrameSiteEvent(c, value, index));
}
unsigned
frameFootprint(Context* c, Stack* s)
{
return c->localFootprint + (s ? (s->index + 1) : 0);
}
void
2008-10-15 00:45:31 +00:00
visit(Context* c, Link* link)
{
// fprintf(stderr, "visit link from %d to %d fork %p junction %p\n",
// link->predecessor->logicalInstruction->index,
// link->successor->logicalInstruction->index,
// link->forkState,
// link->junctionState);
ForkState* forkState = link->forkState;
if (forkState) {
for (unsigned i = 0; i < forkState->readCount; ++i) {
2008-11-02 20:35:35 +00:00
ForkElement* p = forkState->elements + i;
Value* v = p->value;
v->reads = p->read->nextTarget();
// fprintf(stderr, "next read %p for %p from %p\n", v->reads, v, p->read);
if (not live(c, v)) {
clearSites(c, v);
}
}
}
JunctionState* junctionState = link->junctionState;
if (junctionState) {
for (unsigned i = 0; i < junctionState->frameFootprint; ++i) {
StubReadPair* p = junctionState->reads + i;
if (p->value and p->value->reads) {
assert(c, p->value->reads == p->read);
popRead(c, 0, p->value);
}
}
}
}
2008-11-01 19:14:13 +00:00
class BuddyEvent: public Event {
public:
BuddyEvent(Context* c, Value* original, Value* buddy):
2008-11-01 19:14:13 +00:00
Event(c), original(original), buddy(buddy)
{
2013-02-14 02:33:40 +00:00
this->addRead(c, original, SiteMask(~0, ~0, AnyFrameIndex), buddy);
2008-11-01 19:14:13 +00:00
}
virtual const char* name() {
return "BuddyEvent";
}
virtual void compile(Context* c) {
if (DebugBuddies) {
fprintf(stderr, "original %p buddy %p\n", original, buddy);
}
2009-09-26 19:43:44 +00:00
assert(c, hasSite(c, original));
2008-12-12 01:09:36 +00:00
assert(c, original);
assert(c, buddy);
addBuddy(original, buddy);
2008-11-01 19:14:13 +00:00
popRead(c, this, original);
2008-11-01 19:14:13 +00:00
}
Value* original;
Value* buddy;
};
void
appendBuddy(Context* c, Value* original, Value* buddy)
2008-11-01 19:14:13 +00:00
{
append(c, new(c->zone) BuddyEvent(c, original, buddy));
2008-11-01 19:14:13 +00:00
}
class SaveLocalsEvent: public Event {
public:
SaveLocalsEvent(Context* c):
Event(c)
{
saveLocals(c, this);
}
virtual const char* name() {
return "SaveLocalsEvent";
}
virtual void compile(Context* c) {
for (Read* r = reads; r; r = r->eventNext) {
popRead(c, this, r->value);
}
}
};
void
appendSaveLocals(Context* c)
{
append(c, new(c->zone) SaveLocalsEvent(c));
}
class DummyEvent: public Event {
public:
DummyEvent(Context* c, Local* locals):
Event(c),
locals_(locals)
{ }
virtual const char* name() {
return "DummyEvent";
}
virtual void compile(Context*) { }
virtual Local* locals() {
return locals_;
}
Local* locals_;
};
void
appendDummy(Context* c)
{
Stack* stack = c->stack;
Local* locals = c->locals;
LogicalInstruction* i = c->logicalCode[c->logicalIp];
c->stack = i->stack;
c->locals = i->locals;
append(c, new(c->zone) DummyEvent(c, locals));
c->stack = stack;
c->locals = locals;
}
void
append(Context* c, Event* e)
{
LogicalInstruction* i = c->logicalCode[c->logicalIp];
if (c->stack != i->stack or c->locals != i->locals) {
appendDummy(c);
}
if (DebugAppend) {
fprintf(stderr, " -- append %s at %d with %d stack before\n",
e->name(), e->logicalInstruction->index, c->stack ?
c->stack->index + 1 : 0);
}
if (c->lastEvent) {
c->lastEvent->next = e;
} else {
c->firstEvent = e;
}
c->lastEvent = e;
Event* p = c->predecessor;
if (p) {
if (DebugAppend) {
fprintf(stderr, "%d precedes %d\n", p->logicalInstruction->index,
e->logicalInstruction->index);
}
2013-02-13 19:11:47 +00:00
Link* link = compiler::link
(c, p, e->predecessors, e, p->successors, c->forkState);
e->predecessors = link;
p->successors = link;
}
c->forkState = 0;
c->predecessor = e;
if (e->logicalInstruction->firstEvent == 0) {
e->logicalInstruction->firstEvent = e;
}
e->logicalInstruction->lastEvent = e;
}
2009-01-04 01:17:51 +00:00
Site*
readSource(Context* c, Read* r)
2009-01-04 01:17:51 +00:00
{
2009-10-04 19:56:48 +00:00
Value* v = r->value;
if (DebugReads) {
2009-10-04 19:56:48 +00:00
char buffer[1024]; sitesToString(c, v, buffer, 1024);
fprintf(stderr, "read source for %p from %s\n", v, buffer);
}
2008-05-15 20:00:57 +00:00
2009-10-04 19:56:48 +00:00
if (not hasSite(c, v)) {
2009-09-26 19:43:44 +00:00
if (DebugReads) {
2009-10-04 19:56:48 +00:00
fprintf(stderr, "no sites found for %p\n", v);
2009-09-26 19:43:44 +00:00
}
return 0;
}
2009-01-04 01:17:51 +00:00
Value* high = r->high(c);
if (high) {
return pickMatchOrMove(c, r, high->source, 0, true);
} else {
return pickSiteOrMove(c, r, true, true);
}
2007-12-11 21:26:59 +00:00
}
2009-01-04 01:17:51 +00:00
void
propagateJunctionSites(Context* c, Event* e, Site** sites)
2008-07-23 23:58:29 +00:00
{
2009-01-04 01:17:51 +00:00
for (Link* pl = e->predecessors; pl; pl = pl->nextPredecessor) {
Event* p = pl->predecessor;
if (p->junctionSites == 0) {
p->junctionSites = sites;
for (Link* sl = p->successors; sl; sl = sl->nextSuccessor) {
Event* s = sl->successor;
propagateJunctionSites(c, s, sites);
}
}
}
}
2009-01-04 01:17:51 +00:00
void
propagateJunctionSites(Context* c, Event* e)
{
for (Link* sl = e->successors; sl; sl = sl->nextSuccessor) {
Event* s = sl->successor;
if (s->predecessors->nextPredecessor) {
unsigned size = sizeof(Site*) * frameFootprint(c, e->stackAfter);
Site** junctionSites = static_cast<Site**>
(c->zone->allocate(size));
memset(junctionSites, 0, size);
2008-07-23 23:58:29 +00:00
2009-01-04 01:17:51 +00:00
propagateJunctionSites(c, s, junctionSites);
break;
2008-07-23 23:58:29 +00:00
}
2009-01-04 01:17:51 +00:00
}
}
2008-07-23 23:58:29 +00:00
class SiteRecord {
public:
Site* site;
Value* value;
};
void
init(SiteRecord* r, Site* s, Value* v)
{
r->site = s;
r->value = v;
}
class SiteRecordList {
public:
SiteRecordList(SiteRecord* records, unsigned capacity):
records(records), index(0), capacity(capacity)
{ }
SiteRecord* records;
unsigned index;
unsigned capacity;
};
void
freeze(Context* c, SiteRecordList* frozen, Site* s, Value* v)
{
assert(c, frozen->index < frozen->capacity);
s->freeze(c, v);
init(new (frozen->records + (frozen->index ++)) SiteRecord, s, v);
}
2009-01-04 01:17:51 +00:00
void
thaw(Context* c, SiteRecordList* frozen)
{
while (frozen->index) {
SiteRecord* sr = frozen->records + (-- frozen->index);
sr->site->thaw(c, sr->value);
}
}
2009-01-04 01:17:51 +00:00
bool
resolveOriginalSites(Context* c, Event* e, SiteRecordList* frozen,
Site** sites)
2009-01-04 01:17:51 +00:00
{
bool complete = true;
for (FrameIterator it(c, e->stackAfter, e->localsAfter, true);
it.hasMore();)
{
2009-01-04 01:17:51 +00:00
FrameIterator::Element el = it.next(c);
Value* v = el.value;
Read* r = v ? live(c, v) : 0;
Site* s = sites[el.localIndex];
if (r) {
if (s) {
2009-01-04 01:17:51 +00:00
if (DebugControl) {
char buffer[256];
s->toString(c, buffer, 256);
2009-01-04 01:17:51 +00:00
fprintf(stderr, "resolve original %s for %p local %d frame %d\n",
buffer, v, el.localIndex, frameIndex(c, &el));
2009-01-04 01:17:51 +00:00
}
Site* target = pickSiteOrMove
(c, v, s->mask(c), true, true, ResolveRegisterReserveCount);
freeze(c, frozen, target, v);
2009-01-04 01:17:51 +00:00
} else {
complete = false;
}
} else if (s) {
if (DebugControl) {
char buffer[256];
s->toString(c, buffer, 256);
fprintf(stderr, "freeze original %s for %p local %d frame %d\n",
buffer, v, el.localIndex, frameIndex(c, &el));
}
Value dummy(0, 0, lir::ValueGeneral);
2013-02-14 02:33:40 +00:00
dummy.addSite(c, s);
removeSite(c, &dummy, s);
freeze(c, frozen, s, 0);
2008-07-23 23:58:29 +00:00
}
2009-01-04 01:17:51 +00:00
}
2008-07-23 23:58:29 +00:00
2009-01-04 01:17:51 +00:00
return complete;
}
2009-01-04 01:17:51 +00:00
bool
resolveSourceSites(Context* c, Event* e, SiteRecordList* frozen, Site** sites)
2009-01-04 01:17:51 +00:00
{
bool complete = true;
for (FrameIterator it(c, e->stackAfter, e->localsAfter); it.hasMore();) {
FrameIterator::Element el = it.next(c);
Value* v = el.value;
Read* r = live(c, v);
2009-01-04 01:17:51 +00:00
if (r and sites[el.localIndex] == 0) {
SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand),
c->regFile->generalRegisters.mask, AnyFrameIndex);
2009-01-04 01:17:51 +00:00
Site* s = pickSourceSite
2009-12-01 02:06:01 +00:00
(c, r, 0, 0, &mask, true, false, true, acceptForResolve);
2009-01-04 01:17:51 +00:00
if (s) {
if (DebugControl) {
char buffer[256]; s->toString(c, buffer, 256);
fprintf(stderr, "resolve source %s from %p local %d frame %d\n",
buffer, v, el.localIndex, frameIndex(c, &el));
}
freeze(c, frozen, s, v);
sites[el.localIndex] = s->copy(c);
2009-01-04 01:17:51 +00:00
} else {
complete = false;
}
2008-11-02 22:25:51 +00:00
}
2008-07-23 23:58:29 +00:00
}
2009-01-04 01:17:51 +00:00
return complete;
2008-07-23 23:58:29 +00:00
}
void
resolveTargetSites(Context* c, Event* e, SiteRecordList* frozen, Site** sites)
2008-07-23 23:58:29 +00:00
{
2009-01-04 01:17:51 +00:00
for (FrameIterator it(c, e->stackAfter, e->localsAfter); it.hasMore();) {
FrameIterator::Element el = it.next(c);
Value* v = el.value;
Read* r = live(c, v);
2009-01-04 01:17:51 +00:00
if (r and sites[el.localIndex] == 0) {
SiteMask mask((1 << lir::RegisterOperand) | (1 << lir::MemoryOperand),
c->regFile->generalRegisters.mask, AnyFrameIndex);
Site* s = pickSourceSite
(c, r, 0, 0, &mask, false, true, true, acceptForResolve);
if (s == 0) {
s = maybeMove(c, v, mask, false, true, ResolveRegisterReserveCount);
}
2009-01-04 01:17:51 +00:00
freeze(c, frozen, s, v);
sites[el.localIndex] = s->copy(c);
2009-01-04 01:17:51 +00:00
if (DebugControl) {
char buffer[256]; sites[el.localIndex]->toString(c, buffer, 256);
2009-01-04 01:17:51 +00:00
fprintf(stderr, "resolve target %s for %p local %d frame %d\n",
buffer, el.value, el.localIndex, frameIndex(c, &el));
2008-07-23 23:58:29 +00:00
}
}
}
}
2008-08-16 17:45:36 +00:00
void
resolveJunctionSites(Context* c, Event* e, SiteRecordList* frozen)
2008-08-16 17:45:36 +00:00
{
2009-01-04 01:17:51 +00:00
bool complete;
2008-12-12 01:09:36 +00:00
if (e->junctionSites) {
complete = resolveOriginalSites(c, e, frozen, e->junctionSites);
2008-12-12 01:09:36 +00:00
} else {
2009-01-04 01:17:51 +00:00
propagateJunctionSites(c, e);
complete = false;
2008-12-12 01:09:36 +00:00
}
2008-07-23 23:58:29 +00:00
2008-12-12 01:09:36 +00:00
if (e->junctionSites) {
if (not complete) {
complete = resolveSourceSites(c, e, frozen, e->junctionSites);
if (not complete) {
resolveTargetSites(c, e, frozen, e->junctionSites);
}
2008-12-12 01:09:36 +00:00
}
2008-12-12 01:09:36 +00:00
if (DebugControl) {
fprintf(stderr, "resolved junction sites %p at %d\n",
e->junctionSites, e->logicalInstruction->index);
}
2008-08-16 17:45:36 +00:00
}
2008-12-12 01:09:36 +00:00
}
2008-07-23 23:58:29 +00:00
void
resolveBranchSites(Context* c, Event* e, SiteRecordList* frozen)
2008-12-12 01:09:36 +00:00
{
if (e->successors->nextSuccessor and e->junctionSites == 0) {
unsigned footprint = frameFootprint(c, e->stackAfter);
RUNTIME_ARRAY(Site*, branchSites, footprint);
memset(RUNTIME_ARRAY_BODY(branchSites), 0, sizeof(Site*) * footprint);
if (not resolveSourceSites(c, e, frozen, RUNTIME_ARRAY_BODY(branchSites)))
{
resolveTargetSites(c, e, frozen, RUNTIME_ARRAY_BODY(branchSites));
}
}
}
void
captureBranchSnapshots(Context* c, Event* e)
{
if (e->successors->nextSuccessor) {
2008-11-01 19:14:13 +00:00
for (FrameIterator it(c, e->stackAfter, e->localsAfter); it.hasMore();) {
FrameIterator::Element el = it.next(c);
e->snapshots = makeSnapshots(c, el.value, e->snapshots);
}
2013-02-14 01:18:51 +00:00
for (Cell<Value>* sv = e->successors->forkState->saved; sv; sv = sv->next) {
e->snapshots = makeSnapshots(c, sv->value, e->snapshots);
2008-08-16 17:45:36 +00:00
}
2008-11-02 22:25:51 +00:00
if (DebugControl) {
fprintf(stderr, "captured snapshots %p at %d\n",
e->snapshots, e->logicalInstruction->index);
}
2008-11-01 22:16:18 +00:00
}
2008-08-16 17:45:36 +00:00
}
2008-04-17 02:55:38 +00:00
void
populateSiteTables(Context* c, Event* e, SiteRecordList* frozen)
2008-12-12 01:09:36 +00:00
{
resolveJunctionSites(c, e, frozen);
resolveBranchSites(c, e, frozen);
2008-12-12 01:09:36 +00:00
}
void
setSites(Context* c, Value* v, Site* s)
{
assert(c, live(c, v));
for (; s; s = s->next) {
2013-02-14 02:33:40 +00:00
v->addSite(c, s->copy(c));
}
2008-11-02 22:25:51 +00:00
if (DebugControl) {
2009-01-04 01:17:51 +00:00
char buffer[256]; sitesToString(c, v->sites, buffer, 256);
2008-11-02 22:25:51 +00:00
fprintf(stderr, "set sites %s for %p\n", buffer, v);
}
}
2008-08-16 17:45:36 +00:00
void
2008-11-01 22:16:18 +00:00
resetFrame(Context* c, Event* e)
2008-08-16 17:45:36 +00:00
{
2008-11-01 19:14:13 +00:00
for (FrameIterator it(c, e->stackBefore, e->localsBefore); it.hasMore();) {
FrameIterator::Element el = it.next(c);
clearSites(c, el.value);
}
while (c->acquiredResources) {
clearSites(c, c->acquiredResources->value);
}
2008-11-01 22:16:18 +00:00
}
void
setSites(Context* c, Event* e, Site** sites)
{
resetFrame(c, e);
2008-11-01 19:14:13 +00:00
for (FrameIterator it(c, e->stackBefore, e->localsBefore); it.hasMore();) {
FrameIterator::Element el = it.next(c);
2008-11-01 22:16:18 +00:00
if (sites[el.localIndex]) {
if (live(c, el.value)) {
setSites(c, el.value, sites[el.localIndex]);
} else if (DebugControl) {
char buffer[256]; sitesToString(c, sites[el.localIndex], buffer, 256);
fprintf(stderr, "skip sites %s for %p local %d frame %d\n",
buffer, el.value, el.localIndex, frameIndex(c, &el));
2008-11-01 22:16:18 +00:00
}
} else if (DebugControl) {
fprintf(stderr, "no sites for %p local %d frame %d\n",
el.value, el.localIndex, frameIndex(c, &el));
2008-11-01 22:16:18 +00:00
}
}
}
void
removeBuddies(Context* c)
{
for (FrameIterator it(c, c->stack, c->locals); it.hasMore();) {
FrameIterator::Element el = it.next(c);
removeBuddy(c, el.value);
}
}
2008-11-01 22:16:18 +00:00
void
restore(Context* c, Event* e, Snapshot* snapshots)
{
for (Snapshot* s = snapshots; s; s = s->next) {
Value* v = s->value;
Value* next = v->buddy;
if (v != next) {
v->buddy = v;
Value* p = next;
while (p->buddy != v) p = p->buddy;
p->buddy = next;
}
}
2008-11-01 22:16:18 +00:00
for (Snapshot* s = snapshots; s; s = s->next) {
assert(c, s->buddy);
2008-11-01 22:16:18 +00:00
s->value->buddy = s->buddy;
}
resetFrame(c, e);
for (Snapshot* s = snapshots; s; s = s->next) {
if (live(c, s->value)) {
if (live(c, s->value) and s->sites and s->value->sites == 0) {
setSites(c, s->value, s->sites);
2008-11-01 22:16:18 +00:00
}
}
// char buffer[256]; sitesToString(c, s->sites, buffer, 256);
// fprintf(stderr, "restore %p buddy %p sites %s live %p\n",
// s->value, s->value->buddy, buffer, live(c, s->value));
}
2008-08-16 17:45:36 +00:00
}
2008-07-23 23:58:29 +00:00
2008-08-16 17:45:36 +00:00
void
populateSources(Context* c, Event* e)
{
RUNTIME_ARRAY(SiteRecord, frozenRecords, e->readCount);
SiteRecordList frozen(RUNTIME_ARRAY_BODY(frozenRecords), e->readCount);
2008-08-16 17:45:36 +00:00
for (Read* r = e->reads; r; r = r->eventNext) {
r->value->source = readSource(c, r);
2008-08-16 17:45:36 +00:00
if (r->value->source) {
if (DebugReads) {
char buffer[256]; r->value->source->toString(c, buffer, 256);
fprintf(stderr, "freeze source %s for %p\n",
buffer, r->value);
}
2008-12-12 01:09:36 +00:00
freeze(c, &frozen, r->value->source, r->value);
2008-07-23 23:58:29 +00:00
}
2008-08-16 17:45:36 +00:00
}
thaw(c, &frozen);
2008-08-16 17:45:36 +00:00
}
2008-07-23 23:58:29 +00:00
void
setStubRead(Context* c, StubReadPair* p, Value* v)
{
2008-11-01 19:14:13 +00:00
if (v) {
StubRead* r = stubRead(c);
if (DebugReads) {
fprintf(stderr, "add stub read %p to %p\n", r, v);
}
2013-02-14 02:33:40 +00:00
// TODO: this is rather icky looking... but despite how it looks, it will not cause an NPE
((Event*)0)->addRead(c, v, r);
p->value = v;
p->read = r;
}
}
void
populateJunctionReads(Context* c, Link* link)
{
JunctionState* state = new
(c->zone->allocate
(sizeof(JunctionState)
+ (sizeof(StubReadPair) * frameFootprint(c, c->stack))))
JunctionState(frameFootprint(c, c->stack));
2008-09-22 14:28:18 +00:00
memset(state->reads, 0, sizeof(StubReadPair) * frameFootprint(c, c->stack));
link->junctionState = state;
2008-11-01 19:14:13 +00:00
for (FrameIterator it(c, c->stack, c->locals); it.hasMore();) {
FrameIterator::Element e = it.next(c);
setStubRead(c, state->reads + e.localIndex, e.value);
}
}
void
updateJunctionReads(Context* c, JunctionState* state)
{
for (FrameIterator it(c, c->stack, c->locals); it.hasMore();) {
FrameIterator::Element e = it.next(c);
StubReadPair* p = state->reads + e.localIndex;
if (p->value and p->read->read == 0) {
Read* r = live(c, e.value);
2008-12-12 01:09:36 +00:00
if (r) {
if (DebugReads) {
fprintf(stderr, "stub read %p for %p valid: %p\n",
p->read, p->value, r);
}
2008-12-12 01:09:36 +00:00
p->read->read = r;
}
}
}
for (unsigned i = 0; i < frameFootprint(c, c->stack); ++i) {
StubReadPair* p = state->reads + i;
if (p->value and p->read->read == 0) {
if (DebugReads) {
fprintf(stderr, "stub read %p for %p invalid\n", p->read, p->value);
}
p->read->valid_ = false;
}
}
}
2008-08-16 17:45:36 +00:00
LogicalInstruction*
next(Context* c, LogicalInstruction* i)
{
for (unsigned n = i->index + 1; n < c->logicalCodeLength; ++n) {
i = c->logicalCode[n];
if (i) return i;
2008-08-16 17:45:36 +00:00
}
return 0;
}
2008-07-23 23:58:29 +00:00
2008-08-16 17:45:36 +00:00
class Block {
public:
Block(Event* head):
head(head), nextBlock(0), nextInstruction(0), assemblerBlock(0), start(0)
2008-08-16 17:45:36 +00:00
{ }
2008-07-23 23:58:29 +00:00
2008-08-16 17:45:36 +00:00
Event* head;
Block* nextBlock;
2008-08-16 17:45:36 +00:00
LogicalInstruction* nextInstruction;
2008-09-07 20:12:11 +00:00
Assembler::Block* assemblerBlock;
2008-08-16 17:45:36 +00:00
unsigned start;
};
2008-07-23 23:58:29 +00:00
2008-08-16 17:45:36 +00:00
Block*
block(Context* c, Event* head)
{
return new(c->zone) Block(head);
2008-08-16 17:45:36 +00:00
}
void
compile(Context* c, uintptr_t stackOverflowHandler, unsigned stackLimitOffset)
2008-08-16 17:45:36 +00:00
{
if (c->logicalCode[c->logicalIp]->lastEvent == 0) {
appendDummy(c);
}
2008-08-16 17:45:36 +00:00
Assembler* a = c->assembler;
Block* firstBlock = block(c, c->firstEvent);
Block* block = firstBlock;
if (stackOverflowHandler) {
a->checkStackOverflow(stackOverflowHandler, stackLimitOffset);
}
a->allocateFrame(c->alignedFrameSize);
2008-08-16 17:45:36 +00:00
for (Event* e = c->firstEvent; e; e = e->next) {
if (DebugCompile) {
fprintf(stderr,
" -- compile %s at %d with %d preds %d succs %d stack\n",
e->name(), e->logicalInstruction->index,
countPredecessors(e->predecessors),
countSuccessors(e->successors),
e->stackBefore ? e->stackBefore->index + 1 : 0);
}
2008-10-15 00:45:31 +00:00
e->block = block;
c->stack = e->stackBefore;
c->locals = e->localsBefore;
if (e->logicalInstruction->machineOffset == 0) {
e->logicalInstruction->machineOffset = a->offset();
}
2008-09-07 20:12:11 +00:00
if (e->predecessors) {
2008-10-15 00:45:31 +00:00
visit(c, lastPredecessor(e->predecessors));
Event* first = e->predecessors->predecessor;
if (e->predecessors->nextPredecessor) {
for (Link* pl = e->predecessors;
pl->nextPredecessor;
pl = pl->nextPredecessor)
{
updateJunctionReads(c, pl->junctionState);
}
2008-11-02 22:25:51 +00:00
if (DebugControl) {
fprintf(stderr, "set sites to junction sites %p at %d\n",
first->junctionSites, first->logicalInstruction->index);
}
setSites(c, e, first->junctionSites);
removeBuddies(c);
} else if (first->successors->nextSuccessor) {
2008-11-02 22:25:51 +00:00
if (DebugControl) {
fprintf(stderr, "restore snapshots %p at %d\n",
first->snapshots, first->logicalInstruction->index);
}
2008-11-01 22:16:18 +00:00
restore(c, e, first->snapshots);
2008-09-07 20:12:11 +00:00
}
2008-08-16 17:45:36 +00:00
}
unsigned footprint = frameFootprint(c, e->stackAfter);
RUNTIME_ARRAY(SiteRecord, frozenRecords, footprint);
SiteRecordList frozen(RUNTIME_ARRAY_BODY(frozenRecords), footprint);
2008-08-16 17:45:36 +00:00
bool branch = e->isBranch();
if (branch and e->successors) {
populateSiteTables(c, e, &frozen);
}
populateSources(c, e);
if (branch and e->successors) {
captureBranchSnapshots(c, e);
}
thaw(c, &frozen);
2008-08-16 17:45:36 +00:00
e->compile(c);
if ((not branch) and e->successors) {
populateSiteTables(c, e, &frozen);
captureBranchSnapshots(c, e);
thaw(c, &frozen);
2008-07-23 23:58:29 +00:00
}
2008-10-15 00:45:31 +00:00
if (e->visitLinks) {
2013-02-14 01:18:51 +00:00
for (Cell<Link>* cell = reverseDestroy(e->visitLinks); cell; cell = cell->next) {
visit(c, cell->value);
2008-10-15 00:45:31 +00:00
}
e->visitLinks = 0;
}
2008-08-16 17:45:36 +00:00
2008-07-23 23:58:29 +00:00
for (CodePromise* p = e->promises; p; p = p->next) {
p->offset = a->offset();
2008-04-17 02:55:38 +00:00
}
2008-09-20 23:42:46 +00:00
a->endEvent();
LogicalInstruction* nextInstruction = next(c, e->logicalInstruction);
if (e->next == 0
or (e->next->logicalInstruction != e->logicalInstruction
and (e->next->logicalInstruction != nextInstruction
or e != e->logicalInstruction->lastEvent)))
{
Block* b = e->logicalInstruction->firstEvent->block;
while (b->nextBlock) {
b = b->nextBlock;
}
if (b != block) {
b->nextBlock = block;
}
2008-09-20 23:42:46 +00:00
block->nextInstruction = nextInstruction;
block->assemblerBlock = a->endBlock(e->next != 0);
2008-09-20 23:42:46 +00:00
if (e->next) {
2013-02-13 19:11:47 +00:00
block = compiler::block(c, e->next);
2008-08-16 17:45:36 +00:00
}
}
2008-04-17 02:55:38 +00:00
}
2008-08-16 17:45:36 +00:00
c->firstBlock = firstBlock;
}
void
2008-11-02 20:35:35 +00:00
restore(Context* c, ForkState* state)
{
2008-09-20 23:42:46 +00:00
for (unsigned i = 0; i < state->readCount; ++i) {
2008-11-02 20:35:35 +00:00
ForkElement* p = state->elements + i;
2008-09-20 23:42:46 +00:00
p->value->lastRead = p->read;
p->read->allocateTarget(c);
}
}
void
addForkElement(Context* c, Value* v, ForkState* state, unsigned index)
{
MultiRead* r = multiRead(c);
if (DebugReads) {
fprintf(stderr, "add multi read %p to %p\n", r, v);
}
2013-02-14 02:33:40 +00:00
// TODO: this is rather icky looking... but despite how it looks, it will not cause an NPE
((Event*)0)->addRead(c, v, r);
2008-11-02 20:35:35 +00:00
ForkElement* p = state->elements + index;
2008-11-02 20:35:35 +00:00
p->value = v;
p->read = r;
}
ForkState*
saveState(Context* c)
{
if (c->logicalCode[c->logicalIp]->lastEvent == 0) {
appendDummy(c);
}
unsigned elementCount = frameFootprint(c, c->stack) + count(c->saved);
ForkState* state = new
2008-09-22 14:28:18 +00:00
(c->zone->allocate
(sizeof(ForkState) + (sizeof(ForkElement) * elementCount)))
ForkState(c->stack, c->locals, c->saved, c->predecessor, c->logicalIp);
2008-09-20 23:42:46 +00:00
if (c->predecessor) {
c->forkState = state;
2008-09-20 23:42:46 +00:00
unsigned count = 0;
2008-11-01 19:14:13 +00:00
for (FrameIterator it(c, c->stack, c->locals); it.hasMore();) {
FrameIterator::Element e = it.next(c);
addForkElement(c, e.value, state, count++);
}
2008-11-02 20:35:35 +00:00
2013-02-14 01:18:51 +00:00
for (Cell<Value>* sv = c->saved; sv; sv = sv->next) {
addForkElement(c, sv->value, state, count++);
}
2008-09-20 23:42:46 +00:00
state->readCount = count;
2008-04-20 19:35:36 +00:00
}
c->saved = 0;
return state;
2008-04-20 19:35:36 +00:00
}
2008-02-11 17:21:41 +00:00
void
restoreState(Context* c, ForkState* s)
2007-12-11 23:52:28 +00:00
{
if (c->logicalCode[c->logicalIp]->lastEvent == 0) {
appendDummy(c);
}
c->stack = s->stack;
c->locals = s->locals;
2008-09-20 23:42:46 +00:00
c->predecessor = s->predecessor;
2008-09-22 14:28:18 +00:00
c->logicalIp = s->logicalIp;
2008-09-20 23:42:46 +00:00
if (c->predecessor) {
c->forkState = s;
2008-11-02 20:35:35 +00:00
restore(c, s);
2008-09-07 20:12:11 +00:00
}
}
2008-11-01 19:14:13 +00:00
Value*
maybeBuddy(Context* c, Value* v)
2008-11-01 19:14:13 +00:00
{
2009-01-03 00:44:47 +00:00
if (v->home >= 0) {
Value* n = value(c, v->type);
appendBuddy(c, v, n);
2008-11-01 19:14:13 +00:00
return n;
} else {
return v;
}
}
void
linkLocals(Context* c, Local* oldLocals, Local* newLocals)
{
for (int i = 0; i < static_cast<int>(c->localFootprint); ++i) {
Local* local = oldLocals + i;
if (local->value) {
int highOffset = c->arch->bigEndian() ? 1 : -1;
if (i + highOffset >= 0
and i + highOffset < static_cast<int>(c->localFootprint)
and local->value->nextWord == local[highOffset].value)
{
Value* v = newLocals[i].value;
Value* next = newLocals[i + highOffset].value;
v->nextWord = next;
next->nextWord = v;
next->wordIndex = 1;
}
}
}
}
class Client: public Assembler::Client {
public:
Client(Context* c): c(c) { }
2008-05-06 21:13:02 +00:00
virtual int acquireTemporary(uint32_t mask) {
unsigned cost;
int r = pickRegisterTarget(c, 0, mask, &cost);
expect(c, cost < Target::Impossible);
2008-04-30 15:44:17 +00:00
save(r);
2013-02-13 19:11:47 +00:00
c->registerResources[r].increment(c);
return r;
}
virtual void releaseTemporary(int r) {
2013-02-13 19:11:47 +00:00
c->registerResources[r].decrement(c);
}
virtual void save(int r) {
2009-01-03 00:44:47 +00:00
RegisterResource* reg = c->registerResources + r;
2008-12-24 20:35:43 +00:00
assert(c, reg->referenceCount == 0);
assert(c, reg->freezeCount == 0);
assert(c, not reg->reserved);
if (reg->value) {
steal(c, reg, 0);
}
}
Context* c;
};
2007-12-08 23:22:13 +00:00
class MyCompiler: public Compiler {
public:
MyCompiler(System* s, Assembler* assembler, Zone* zone,
Compiler::Client* compilerClient):
c(s, assembler, zone, compilerClient), client(&c)
{
assembler->setClient(&client);
}
2007-12-08 23:22:13 +00:00
virtual State* saveState() {
2013-02-13 19:11:47 +00:00
State* s = compiler::saveState(&c);
restoreState(s);
return s;
}
virtual void restoreState(State* state) {
2013-02-13 19:11:47 +00:00
compiler::restoreState(&c, static_cast<ForkState*>(state));
}
virtual Subroutine* startSubroutine() {
return c.subroutine = new(c.zone) MySubroutine;
}
virtual void returnFromSubroutine(Subroutine* subroutine, Operand* address) {
appendSaveLocals(&c);
appendJump(&c, lir::Jump, static_cast<Value*>(address), false, true);
2013-02-13 19:11:47 +00:00
static_cast<MySubroutine*>(subroutine)->forkState = compiler::saveState(&c);
}
virtual void linkSubroutine(Subroutine* subroutine) {
Local* oldLocals = c.locals;
restoreState(static_cast<MySubroutine*>(subroutine)->forkState);
linkLocals(&c, oldLocals, c.locals);
}
virtual void init(unsigned logicalCodeLength, unsigned parameterFootprint,
unsigned localFootprint, unsigned alignedFrameSize)
{
2008-02-11 17:21:41 +00:00
c.logicalCodeLength = logicalCodeLength;
c.parameterFootprint = parameterFootprint;
c.localFootprint = localFootprint;
c.alignedFrameSize = alignedFrameSize;
unsigned frameResourceCount = totalFrameSize(&c);
c.frameResources = static_cast<FrameResource*>
(c.zone->allocate(sizeof(FrameResource) * frameResourceCount));
for (unsigned i = 0; i < frameResourceCount; ++i) {
new (c.frameResources + i) FrameResource;
}
2009-04-22 01:39:25 +00:00
unsigned base = frameBase(&c);
c.frameResources[base + c.arch->returnAddressOffset()].reserved = true;
support stack unwinding without using a frame pointer Previously, we unwound the stack by following the chain of frame pointers for normal returns, stack trace creation, and exception unwinding. On x86, this required reserving EBP/RBP for frame pointer duties, making it unavailable for general computation and requiring that it be explicitly saved and restored on entry and exit, respectively. On PowerPC, we use an ABI that makes the stack pointer double as a frame pointer, so it doesn't cost us anything. We've been using the same convention on ARM, but it doesn't match the native calling convention, which makes it unusable when we want to call native code from Java and pass arguments on the stack. So far, the ARM calling convention mismatch hasn't been an issue because we've never passed more arguments from Java to native code than would fit in registers. However, we must now pass an extra argument (the thread pointer) to e.g. divideLong so it can throw an exception on divide by zero, which means the last argument must be passed on the stack. This will clobber the linkage area we've been using to hold the frame pointer, so we need to stop using it. One solution would be to use the same convention on ARM as we do on x86, but this would introduce the same overhead of making a register unavailable for general use and extra code at method entry and exit. Instead, this commit removes the need for a frame pointer. Unwinding involves consulting a map of instruction offsets to frame sizes which is generated at compile time. This is necessary because stack trace creation can happen at any time due to Thread.getStackTrace being called by another thread, and the frame size varies during the execution of a method. So far, only x86(_64) is working, and continuations and tail call optimization are probably broken. More to come.
2011-01-17 02:05:05 +00:00
c.frameResources[base + c.arch->framePointerOffset()].reserved
= UseFramePointer;
// leave room for logical instruction -1
unsigned codeSize = sizeof(LogicalInstruction*) * (logicalCodeLength + 1);
2008-08-16 17:45:36 +00:00
c.logicalCode = static_cast<LogicalInstruction**>
(c.zone->allocate(codeSize));
memset(c.logicalCode, 0, codeSize);
c.logicalCode++;
2008-09-24 00:01:42 +00:00
c.locals = static_cast<Local*>
(c.zone->allocate(sizeof(Local) * localFootprint));
2008-09-24 00:01:42 +00:00
memset(c.locals, 0, sizeof(Local) * localFootprint);
c.logicalCode[-1] = new
(c.zone->allocate(sizeof(LogicalInstruction)))
LogicalInstruction(-1, c.stack, c.locals);
}
2008-02-11 17:21:41 +00:00
virtual void visitLogicalIp(unsigned logicalIp) {
assert(&c, logicalIp < c.logicalCodeLength);
if (c.logicalCode[c.logicalIp]->lastEvent == 0) {
appendDummy(&c);
}
Event* e = c.logicalCode[logicalIp]->firstEvent;
2008-10-04 17:26:35 +00:00
Event* p = c.predecessor;
if (p) {
if (DebugAppend) {
fprintf(stderr, "visit %d pred %d\n", logicalIp,
p->logicalInstruction->index);
}
2008-10-04 17:26:35 +00:00
p->stackAfter = c.stack;
p->localsAfter = c.locals;
2013-02-13 19:11:47 +00:00
Link* link = compiler::link
(&c, p, e->predecessors, e, p->successors, c.forkState);
e->predecessors = link;
p->successors = link;
2008-10-15 00:45:31 +00:00
c.lastEvent->visitLinks = cons(&c, link, c.lastEvent->visitLinks);
if (DebugAppend) {
fprintf(stderr, "populate junction reads for %d to %d\n",
p->logicalInstruction->index, logicalIp);
}
2008-12-12 01:09:36 +00:00
populateJunctionReads(&c, link);
}
if (c.subroutine) {
c.subroutine->forkState
= c.logicalCode[logicalIp]->subroutine->forkState;
c.subroutine = 0;
}
2008-12-12 01:09:36 +00:00
c.forkState = 0;
2007-12-08 23:22:13 +00:00
}
2008-02-11 17:21:41 +00:00
virtual void startLogicalIp(unsigned logicalIp) {
assert(&c, logicalIp < c.logicalCodeLength);
assert(&c, c.logicalCode[logicalIp] == 0);
if (c.logicalCode[c.logicalIp]->lastEvent == 0) {
appendDummy(&c);
}
2008-04-20 19:35:36 +00:00
2008-10-06 00:50:59 +00:00
Event* p = c.predecessor;
if (p) {
p->stackAfter = c.stack;
p->localsAfter = c.locals;
}
c.logicalCode[logicalIp] = new
(c.zone->allocate(sizeof(LogicalInstruction)))
LogicalInstruction(logicalIp, c.stack, c.locals);
2008-04-20 19:35:36 +00:00
2009-07-08 14:18:40 +00:00
bool startSubroutine = c.subroutine != 0;
if (startSubroutine) {
c.logicalCode[logicalIp]->subroutine = c.subroutine;
c.subroutine = 0;
2009-07-08 14:18:40 +00:00
}
c.logicalIp = logicalIp;
if (startSubroutine) {
// assume all local variables are initialized on entry to a
// subroutine, since other calls to the subroutine may
// initialize them:
unsigned sizeInBytes = sizeof(Local) * c.localFootprint;
Local* newLocals = static_cast<Local*>(c.zone->allocate(sizeInBytes));
memcpy(newLocals, c.locals, sizeInBytes);
c.locals = newLocals;
for (unsigned li = 0; li < c.localFootprint; ++li) {
Local* local = c.locals + li;
if (local->value == 0) {
initLocal(1, li, IntegerType);
}
}
}
}
2008-02-11 17:21:41 +00:00
virtual Promise* machineIp(unsigned logicalIp) {
return new(c.zone) IpPromise(&c, logicalIp);
}
2008-02-11 17:21:41 +00:00
virtual Promise* poolAppend(intptr_t value) {
return poolAppendPromise(resolved(&c, value));
}
2008-02-11 17:21:41 +00:00
virtual Promise* poolAppendPromise(Promise* value) {
Promise* p = new(c.zone) PoolPromise(&c, c.constantCount);
2007-12-08 23:22:13 +00:00
ConstantPoolNode* constant = new (c.zone) ConstantPoolNode(value);
2007-12-16 00:24:15 +00:00
2008-02-11 17:21:41 +00:00
if (c.firstConstant) {
c.lastConstant->next = constant;
} else {
c.firstConstant = constant;
2007-12-16 00:24:15 +00:00
}
2008-02-11 17:21:41 +00:00
c.lastConstant = constant;
++ c.constantCount;
2008-02-11 17:21:41 +00:00
return p;
2007-12-08 23:22:13 +00:00
}
virtual Operand* constant(int64_t value, Compiler::OperandType type) {
return promiseConstant(resolved(&c, value), type);
2007-12-08 23:22:13 +00:00
}
virtual Operand* promiseConstant(Promise* value, Compiler::OperandType type) {
2013-02-13 19:11:47 +00:00
return compiler::value
(&c, valueType(&c, type), compiler::constantSite(&c, value));
2007-12-08 23:22:13 +00:00
}
2008-02-11 17:21:41 +00:00
virtual Operand* address(Promise* address) {
2013-02-13 19:11:47 +00:00
return value(&c, lir::ValueGeneral, compiler::addressSite(&c, address));
2007-12-08 23:22:13 +00:00
}
2008-02-11 17:21:41 +00:00
virtual Operand* memory(Operand* base,
OperandType type,
2008-02-11 17:21:41 +00:00
int displacement = 0,
Operand* index = 0,
unsigned scale = 1)
2008-02-11 17:21:41 +00:00
{
Value* result = value(&c, valueType(&c, type));
2008-03-15 23:54:20 +00:00
2008-04-17 20:48:26 +00:00
appendMemory(&c, static_cast<Value*>(base), displacement,
static_cast<Value*>(index), scale, result);
2008-03-15 23:54:20 +00:00
return result;
2007-12-08 23:22:13 +00:00
}
virtual Operand* register_(int number) {
2013-02-13 19:11:47 +00:00
return compiler::register_(&c, number);
2007-12-08 23:22:13 +00:00
}
Promise* machineIp() {
2013-02-14 02:33:40 +00:00
return c.logicalCode[c.logicalIp]->lastEvent->makeCodePromise(&c);
}
virtual void push(unsigned footprint UNUSED) {
assert(&c, footprint == 1);
Value* v = value(&c, lir::ValueGeneral);
2013-02-13 19:11:47 +00:00
Stack* s = compiler::stack(&c, v, c.stack);
v->home = frameIndex(&c, s->index + c.localFootprint);
2009-01-03 00:44:47 +00:00
c.stack = s;
}
2008-11-02 22:25:51 +00:00
virtual void push(unsigned footprint, Operand* value) {
2013-02-13 19:11:47 +00:00
compiler::push(&c, footprint, static_cast<Value*>(value));
2007-12-22 00:26:55 +00:00
}
2007-12-09 22:45:43 +00:00
virtual void save(unsigned footprint, Operand* value) {
c.saved = cons(&c, static_cast<Value*>(value), c.saved);
if (TargetBytesPerWord == 4 and footprint > 1) {
assert(&c, footprint == 2);
assert(&c, static_cast<Value*>(value)->nextWord);
2009-01-30 01:36:19 +00:00
save(1, static_cast<Value*>(value)->nextWord);
}
}
2008-11-02 22:25:51 +00:00
virtual Operand* pop(unsigned footprint) {
2013-02-13 19:11:47 +00:00
return compiler::pop(&c, footprint);
2007-12-08 23:22:13 +00:00
}
2008-07-05 20:21:13 +00:00
virtual void pushed() {
Value* v = value(&c, lir::ValueGeneral);
2008-09-25 00:48:32 +00:00
appendFrameSite
(&c, v, frameIndex
(&c, (c.stack ? c.stack->index : 0) + c.localFootprint));
2008-09-25 00:48:32 +00:00
2013-02-13 19:11:47 +00:00
Stack* s = compiler::stack(&c, v, c.stack);
v->home = frameIndex(&c, s->index + c.localFootprint);
2009-01-03 00:44:47 +00:00
c.stack = s;
}
2009-01-30 01:36:19 +00:00
virtual void popped(unsigned footprint) {
2009-05-15 02:08:01 +00:00
for (; footprint; -- footprint) {
assert(&c, c.stack->value == 0 or c.stack->value->home >= 0);
2009-01-30 01:36:19 +00:00
2009-05-15 02:08:01 +00:00
if (DebugFrame) {
fprintf(stderr, "popped %p\n", c.stack->value);
}
c.stack = c.stack->next;
2009-01-30 01:36:19 +00:00
}
2008-07-05 20:21:13 +00:00
}
2009-05-15 02:08:01 +00:00
virtual unsigned topOfStack() {
return c.stack->index;
2008-11-11 00:07:44 +00:00
}
virtual Operand* peek(unsigned footprint, unsigned index) {
Stack* s = c.stack;
for (unsigned i = index; i > 0; --i) {
s = s->next;
}
if (footprint > 1) {
assert(&c, footprint == 2);
bool bigEndian = c.arch->bigEndian();
#ifndef NDEBUG
Stack* low;
Stack* high;
if (bigEndian) {
2009-05-03 20:57:11 +00:00
high = s;
low = s->next;
} else {
low = s;
high = s->next;
}
assert(&c, (TargetBytesPerWord == 8
and low->value->nextWord == low->value and high->value == 0)
or (TargetBytesPerWord == 4
and low->value->nextWord == high->value));
#endif // not NDEBUG
if (bigEndian) {
s = s->next;
}
}
2009-01-30 01:43:46 +00:00
2008-04-17 20:48:26 +00:00
return s->value;
}
2008-02-11 17:21:41 +00:00
virtual Operand* call(Operand* address,
unsigned flags,
TraceHandler* traceHandler,
2008-04-18 04:16:20 +00:00
unsigned resultSize,
OperandType resultType,
2008-02-11 17:21:41 +00:00
unsigned argumentCount,
...)
{
va_list a; va_start(a, argumentCount);
2007-12-11 00:48:09 +00:00
bool bigEndian = c.arch->bigEndian();
2008-02-11 17:21:41 +00:00
unsigned footprint = 0;
unsigned size = TargetBytesPerWord;
RUNTIME_ARRAY(Value*, arguments, argumentCount);
int index = 0;
2008-02-11 17:21:41 +00:00
for (unsigned i = 0; i < argumentCount; ++i) {
2008-04-17 20:48:26 +00:00
Value* o = va_arg(a, Value*);
if (o) {
if (bigEndian and size > TargetBytesPerWord) {
RUNTIME_ARRAY_BODY(arguments)[index++] = o->nextWord;
}
RUNTIME_ARRAY_BODY(arguments)[index] = o;
if ((not bigEndian) and size > TargetBytesPerWord) {
RUNTIME_ARRAY_BODY(arguments)[++index] = o->nextWord;
}
size = TargetBytesPerWord;
2008-04-17 20:48:26 +00:00
++ index;
} else {
size = 8;
}
++ footprint;
}
2007-12-08 23:22:13 +00:00
2008-02-11 17:21:41 +00:00
va_end(a);
Stack* argumentStack = c.stack;
for (int i = index - 1; i >= 0; --i) {
2013-02-13 19:11:47 +00:00
argumentStack = compiler::stack
(&c, RUNTIME_ARRAY_BODY(arguments)[i], argumentStack);
2008-04-18 03:47:42 +00:00
}
Value* result = value(&c, valueType(&c, resultType));
2008-07-05 20:21:13 +00:00
appendCall(&c, static_cast<Value*>(address), flags, traceHandler, result,
resultSize, argumentStack, index, 0);
2008-07-05 20:21:13 +00:00
return result;
}
virtual Operand* stackCall(Operand* address,
unsigned flags,
TraceHandler* traceHandler,
unsigned resultSize,
OperandType resultType,
2008-07-05 20:21:13 +00:00
unsigned argumentFootprint)
{
Value* result = value(&c, valueType(&c, resultType));
2008-07-05 20:21:13 +00:00
appendCall(&c, static_cast<Value*>(address), flags, traceHandler, result,
resultSize, c.stack, 0, argumentFootprint);
2008-02-11 17:21:41 +00:00
return result;
}
virtual void return_(unsigned size, Operand* value) {
2008-04-17 20:48:26 +00:00
appendReturn(&c, size, static_cast<Value*>(value));
}
virtual void initLocal(unsigned footprint, unsigned index, OperandType type)
{
assert(&c, index + footprint <= c.localFootprint);
2008-09-24 00:01:42 +00:00
Value* v = value(&c, valueType(&c, type));
2008-11-02 22:25:51 +00:00
if (footprint > 1) {
assert(&c, footprint == 2);
unsigned highIndex;
unsigned lowIndex;
if (c.arch->bigEndian()) {
highIndex = index + 1;
lowIndex = index;
} else {
lowIndex = index + 1;
highIndex = index;
}
if (TargetBytesPerWord == 4) {
initLocal(1, highIndex, type);
2009-09-26 19:43:44 +00:00
Value* next = c.locals[highIndex].value;
v->nextWord = next;
next->nextWord = v;
next->wordIndex = 1;
}
index = lowIndex;
}
2008-11-02 22:25:51 +00:00
if (DebugFrame) {
fprintf(stderr, "init local %p at %d (%d)\n",
v, index, frameIndex(&c, index));
2008-11-02 22:25:51 +00:00
}
appendFrameSite(&c, v, frameIndex(&c, index));
2008-09-24 00:01:42 +00:00
Local* local = c.locals + index;
local->value = v;
v->home = frameIndex(&c, index);
}
2008-09-25 00:48:32 +00:00
virtual void initLocalsFromLogicalIp(unsigned logicalIp) {
assert(&c, logicalIp < c.logicalCodeLength);
unsigned footprint = sizeof(Local) * c.localFootprint;
Local* newLocals = static_cast<Local*>(c.zone->allocate(footprint));
memset(newLocals, 0, footprint);
c.locals = newLocals;
2008-09-24 00:01:42 +00:00
2008-09-25 00:48:32 +00:00
Event* e = c.logicalCode[logicalIp]->firstEvent;
for (int i = 0; i < static_cast<int>(c.localFootprint); ++i) {
Local* local = e->locals() + i;
2008-09-25 00:48:32 +00:00
if (local->value) {
initLocal
(1, i, local->value->type == lir::ValueGeneral ? IntegerType : FloatType);
}
}
linkLocals(&c, e->locals(), newLocals);
2008-09-25 00:48:32 +00:00
}
2008-11-02 22:25:51 +00:00
virtual void storeLocal(unsigned footprint, Operand* src, unsigned index) {
2013-02-13 19:11:47 +00:00
compiler::storeLocal(&c, footprint, static_cast<Value*>(src), index, true);
}
virtual Operand* loadLocal(unsigned footprint, unsigned index) {
2013-02-13 19:11:47 +00:00
return compiler::loadLocal(&c, footprint, index);
}
virtual void saveLocals() {
appendSaveLocals(&c);
}
virtual void checkBounds(Operand* object, unsigned lengthOffset,
Operand* index, intptr_t handler)
{
appendBoundsCheck(&c, static_cast<Value*>(object), lengthOffset,
static_cast<Value*>(index), handler);
}
virtual void store(unsigned srcSize, Operand* src, unsigned dstSize,
Operand* dst)
{
appendMove(&c, lir::Move, srcSize, srcSize, static_cast<Value*>(src),
dstSize, static_cast<Value*>(dst));
}
virtual Operand* load(unsigned srcSize, unsigned srcSelectSize, Operand* src,
unsigned dstSize)
{
assert(&c, dstSize >= TargetBytesPerWord);
2007-12-08 23:22:13 +00:00
Value* dst = value(&c, static_cast<Value*>(src)->type);
appendMove(&c, lir::Move, srcSize, srcSelectSize, static_cast<Value*>(src),
dstSize, dst);
2008-02-11 17:21:41 +00:00
return dst;
2007-12-08 23:22:13 +00:00
}
virtual Operand* loadz(unsigned srcSize, unsigned srcSelectSize,
Operand* src, unsigned dstSize)
{
assert(&c, dstSize >= TargetBytesPerWord);
Value* dst = value(&c, static_cast<Value*>(src)->type);
appendMove(&c, lir::MoveZ, srcSize, srcSelectSize, static_cast<Value*>(src),
dstSize, dst);
2008-02-11 17:21:41 +00:00
return dst;
2007-12-08 23:22:13 +00:00
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfEqual(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfEqual, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfNotEqual(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfNotEqual, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
2007-12-08 23:22:13 +00:00
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfLess(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfLess, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfGreater(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfGreater, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
2007-12-08 23:22:13 +00:00
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfLessOrEqual(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfLessOrEqual, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
2007-12-08 23:22:13 +00:00
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfGreaterOrEqual(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfGreaterOrEqual, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
2007-12-08 23:22:13 +00:00
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatEqual(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatEqual, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
2007-12-08 23:22:13 +00:00
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatNotEqual(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatNotEqual, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatLess(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatLess, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatGreater(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatGreater, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatLessOrEqual(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatLessOrEqual, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatGreaterOrEqual(unsigned size, Operand* a, Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatGreaterOrEqual, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatLessOrUnordered(unsigned size, Operand* a,
Operand* b, Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatLessOrUnordered, size, static_cast<Value*>(a),
2009-10-07 00:50:32 +00:00
static_cast<Value*>(b), static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatGreaterOrUnordered(unsigned size, Operand* a,
Operand* b, Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatGreaterOrUnordered, size,
2009-10-07 00:50:32 +00:00
static_cast<Value*>(a), static_cast<Value*>(b),
static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatLessOrEqualOrUnordered(unsigned size, Operand* a,
Operand* b, Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatLessOrEqualOrUnordered, size,
2009-10-07 00:50:32 +00:00
static_cast<Value*>(a), static_cast<Value*>(b),
static_cast<Value*>(address));
}
2009-10-07 00:50:32 +00:00
virtual void jumpIfFloatGreaterOrEqualOrUnordered(unsigned size, Operand* a,
Operand* b,
Operand* address)
{
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
2009-10-07 00:50:32 +00:00
appendBranch(&c, lir::JumpIfFloatGreaterOrEqualOrUnordered, size,
2009-10-07 00:50:32 +00:00
static_cast<Value*>(a), static_cast<Value*>(b),
static_cast<Value*>(address));
}
2008-02-11 17:21:41 +00:00
virtual void jmp(Operand* address) {
appendJump(&c, lir::Jump, static_cast<Value*>(address));
2007-12-09 22:45:43 +00:00
}
virtual void exit(Operand* address) {
appendJump(&c, lir::Jump, static_cast<Value*>(address), true);
}
virtual Operand* add(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::Add, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
2007-12-09 22:45:43 +00:00
}
virtual Operand* sub(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::Subtract, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
}
virtual Operand* mul(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::Multiply, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
2007-12-08 23:22:13 +00:00
}
virtual Operand* div(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::Divide, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
2007-12-22 00:26:55 +00:00
}
virtual Operand* rem(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral
and static_cast<Value*>(b)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::Remainder, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
}
virtual Operand* fadd(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
static_cast<Value*>(a)->type = static_cast<Value*>(b)->type = lir::ValueFloat;
appendCombine(&c, lir::FloatAdd, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
return result;
}
virtual Operand* fsub(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
static_cast<Value*>(a)->type = static_cast<Value*>(b)->type = lir::ValueFloat;
appendCombine(&c, lir::FloatSubtract, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
return result;
}
virtual Operand* fmul(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
static_cast<Value*>(a)->type = static_cast<Value*>(b)->type = lir::ValueFloat;
appendCombine(&c, lir::FloatMultiply, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
return result;
}
virtual Operand* fdiv(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
appendCombine(&c, lir::FloatDivide, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
return result;
}
virtual Operand* frem(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat
and static_cast<Value*>(b)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
appendCombine(&c, lir::FloatRemainder, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
return result;
}
virtual Operand* shl(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::ShiftLeft, TargetBytesPerWord, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
}
virtual Operand* shr(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::ShiftRight, TargetBytesPerWord, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
}
virtual Operand* ushr(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine
(&c, lir::UnsignedShiftRight, TargetBytesPerWord, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
2007-12-08 23:22:13 +00:00
}
virtual Operand* and_(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::And, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
2007-12-08 23:22:13 +00:00
}
virtual Operand* or_(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::Or, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
}
virtual Operand* xor_(unsigned size, Operand* a, Operand* b) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendCombine(&c, lir::Xor, size, static_cast<Value*>(a),
size, static_cast<Value*>(b), size, result);
2008-02-11 17:21:41 +00:00
return result;
2007-12-08 23:22:13 +00:00
}
virtual Operand* neg(unsigned size, Operand* a) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendTranslate(&c, lir::Negate, size, static_cast<Value*>(a), size, result);
return result;
}
virtual Operand* fneg(unsigned size, Operand* a) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
appendTranslate(&c, lir::FloatNegate, size, static_cast<Value*>(a), size, result);
return result;
}
virtual Operand* abs(unsigned size, Operand* a) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueGeneral);
appendTranslate(&c, lir::Absolute, size, static_cast<Value*>(a), size, result);
return result;
}
virtual Operand* fabs(unsigned size, Operand* a) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
appendTranslate
(&c, lir::FloatAbsolute, size, static_cast<Value*>(a), size, result);
return result;
}
virtual Operand* fsqrt(unsigned size, Operand* a) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
appendTranslate
(&c, lir::FloatSquareRoot, size, static_cast<Value*>(a), size, result);
return result;
}
virtual Operand* f2f(unsigned aSize, unsigned resSize, Operand* a) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueFloat);
appendTranslate
(&c, lir::Float2Float, aSize, static_cast<Value*>(a), resSize, result);
return result;
}
virtual Operand* f2i(unsigned aSize, unsigned resSize, Operand* a) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueFloat);
Value* result = value(&c, lir::ValueGeneral);
appendTranslate
(&c, lir::Float2Int, aSize, static_cast<Value*>(a), resSize, result);
return result;
}
virtual Operand* i2f(unsigned aSize, unsigned resSize, Operand* a) {
assert(&c, static_cast<Value*>(a)->type == lir::ValueGeneral);
Value* result = value(&c, lir::ValueFloat);
appendTranslate
(&c, lir::Int2Float, aSize, static_cast<Value*>(a), resSize, result);
2008-02-11 17:21:41 +00:00
return result;
2007-12-08 23:22:13 +00:00
}
fix a couple of subtle Thread.getStackTrace bugs The first problem was that, on x86, we failed to properly keep track of whether to expect the return address to be on the stack or not when unwinding through a frame. We were relying on a "stackLimit" pointer to tell us whether we were looking at the most recently-called frame by comparing it with the stack pointer for that frame. That was inaccurate in the case of a thread executing at the beginning of a method before a new frame is allocated, in which case the most recent two frames share a stack pointer, confusing the unwinder. The solution involves keeping track of how many frames we've looked at while walking the stack. The other problem was that compareIpToMethodBounds assumed every method was followed by at least one byte of padding before the next method started. That assumption was usually valid because we were storing the size following method code prior to the code itself. However, the last method of an AOT-compiled code image is not followed by any such method header and may instead be followed directly by native code with no intervening padding. In that case, we risk interpreting that native code as part of the preceding method, with potentially bizarre results. The reason for the compareIpToMethodBounds assumption was that methods which throw exceptions as their last instruction generate a non-returning call, which nonetheless push a return address on the stack which points past the end of the method, and the unwinder needs to know that return address belongs to that method. A better solution is to add an extra trap instruction to the end of such methods, which is what this patch does.
2012-05-05 00:35:13 +00:00
virtual void trap() {
appendOperation(&c, lir::Trap);
fix a couple of subtle Thread.getStackTrace bugs The first problem was that, on x86, we failed to properly keep track of whether to expect the return address to be on the stack or not when unwinding through a frame. We were relying on a "stackLimit" pointer to tell us whether we were looking at the most recently-called frame by comparing it with the stack pointer for that frame. That was inaccurate in the case of a thread executing at the beginning of a method before a new frame is allocated, in which case the most recent two frames share a stack pointer, confusing the unwinder. The solution involves keeping track of how many frames we've looked at while walking the stack. The other problem was that compareIpToMethodBounds assumed every method was followed by at least one byte of padding before the next method started. That assumption was usually valid because we were storing the size following method code prior to the code itself. However, the last method of an AOT-compiled code image is not followed by any such method header and may instead be followed directly by native code with no intervening padding. In that case, we risk interpreting that native code as part of the preceding method, with potentially bizarre results. The reason for the compareIpToMethodBounds assumption was that methods which throw exceptions as their last instruction generate a non-returning call, which nonetheless push a return address on the stack which points past the end of the method, and the unwinder needs to know that return address belongs to that method. A better solution is to add an extra trap instruction to the end of such methods, which is what this patch does.
2012-05-05 00:35:13 +00:00
}
2009-03-03 03:18:15 +00:00
virtual void loadBarrier() {
appendOperation(&c, lir::LoadBarrier);
2009-03-03 03:18:15 +00:00
}
virtual void storeStoreBarrier() {
appendOperation(&c, lir::StoreStoreBarrier);
2009-03-03 03:18:15 +00:00
}
virtual void storeLoadBarrier() {
appendOperation(&c, lir::StoreLoadBarrier);
2009-03-03 03:18:15 +00:00
}
virtual void compile(uintptr_t stackOverflowHandler,
unsigned stackLimitOffset)
{
2013-02-13 19:11:47 +00:00
compiler::compile(&c, stackOverflowHandler, stackLimitOffset);
}
virtual unsigned resolve(uint8_t* dst) {
c.machineCode = dst;
c.assembler->setDestination(dst);
Block* block = c.firstBlock;
while (block->nextBlock or block->nextInstruction) {
Block* next = block->nextBlock
? block->nextBlock
: block->nextInstruction->firstEvent->block;
next->start = block->assemblerBlock->resolve
(block->start, next->assemblerBlock);
block = next;
}
return c.machineCodeSize = block->assemblerBlock->resolve
(block->start, 0) + c.assembler->footerSize();
2007-12-11 23:52:28 +00:00
}
virtual unsigned poolSize() {
return c.constantCount * TargetBytesPerWord;
2007-12-11 00:48:09 +00:00
}
virtual void write() {
c.assembler->write();
2007-12-11 23:52:28 +00:00
2008-02-11 17:21:41 +00:00
int i = 0;
for (ConstantPoolNode* n = c.firstConstant; n; n = n->next) {
target_intptr_t* target = reinterpret_cast<target_intptr_t*>
(c.machineCode + pad(c.machineCodeSize, TargetBytesPerWord) + i);
if (n->promise->resolved()) {
*target = targetVW(n->promise->value());
} else {
class Listener: public Promise::Listener {
public:
Listener(target_intptr_t* target): target(target){ }
virtual bool resolve(int64_t value, void** location) {
*target = targetVW(value);
if (location) *location = target;
return true;
}
target_intptr_t* target;
};
new (n->promise->listen(sizeof(Listener))) Listener(target);
}
i += TargetBytesPerWord;
2007-12-16 00:24:15 +00:00
}
2007-12-08 23:22:13 +00:00
}
virtual void dispose() {
2008-02-11 17:21:41 +00:00
// ignore
2007-12-08 23:22:13 +00:00
}
2007-12-09 20:03:21 +00:00
Context c;
2013-02-13 19:11:47 +00:00
compiler::Client client;
2007-12-08 23:22:13 +00:00
};
2013-02-13 19:11:47 +00:00
} // namespace compiler
2007-12-08 23:22:13 +00:00
Compiler*
2008-05-06 21:13:02 +00:00
makeCompiler(System* system, Assembler* assembler, Zone* zone,
Compiler::Client* client)
2007-12-08 23:22:13 +00:00
{
2013-02-13 19:11:47 +00:00
return new(zone) compiler::MyCompiler(system, assembler, zone, client);
2007-12-08 23:22:13 +00:00
}
} // namespace codegen
} // namespace avian