mirror of
https://github.com/corda/corda.git
synced 2025-01-07 13:38:47 +00:00
rename assert to assertT, to avoid conflict with std assert macro
This commit is contained in:
parent
7ea4b584b8
commit
263e349cae
@ -168,18 +168,18 @@ sysAbort(System* s)
|
|||||||
|
|
||||||
// #ifdef NDEBUG
|
// #ifdef NDEBUG
|
||||||
|
|
||||||
// # define assert(a, b)
|
// # define assertT(a, b)
|
||||||
// # define vm_assert(a, b)
|
// # define vm_assert(a, b)
|
||||||
|
|
||||||
// #else // not NDEBUG
|
// #else // not NDEBUG
|
||||||
|
|
||||||
// inline void
|
// inline void
|
||||||
// assert(System* s, bool v)
|
// assertT(System* s, bool v)
|
||||||
// {
|
// {
|
||||||
// expect(s, v);
|
// expect(s, v);
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// # define vm_assert(a, b) vm::assert(a, b)
|
// # define vm_assert(a, b) vm::assertT(a, b)
|
||||||
|
|
||||||
// #endif // not NDEBUG
|
// #endif // not NDEBUG
|
||||||
|
|
||||||
|
@ -42,10 +42,10 @@ inline void expect(T t, bool v) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef NDEBUG
|
#ifdef NDEBUG
|
||||||
#define assert(t, v)
|
#define assertT(t, v)
|
||||||
#else
|
#else
|
||||||
template<class T>
|
template<class T>
|
||||||
inline void assert(T t, bool v) {
|
inline void assertT(T t, bool v) {
|
||||||
expect(t, v);
|
expect(t, v);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -50,7 +50,7 @@ class Vector {
|
|||||||
void ensure(size_t space)
|
void ensure(size_t space)
|
||||||
{
|
{
|
||||||
if (position + space > data.count) {
|
if (position + space > data.count) {
|
||||||
assert(a, minimumCapacity > 0);
|
assertT(a, minimumCapacity > 0);
|
||||||
|
|
||||||
size_t newCapacity = avian::util::max(
|
size_t newCapacity = avian::util::max(
|
||||||
position + space, avian::util::max(minimumCapacity, data.count * 2));
|
position + space, avian::util::max(minimumCapacity, data.count * 2));
|
||||||
@ -64,13 +64,13 @@ class Vector {
|
|||||||
|
|
||||||
void get(size_t offset, void* dst, size_t size)
|
void get(size_t offset, void* dst, size_t size)
|
||||||
{
|
{
|
||||||
assert(a, offset + size <= position);
|
assertT(a, offset + size <= position);
|
||||||
memcpy(dst, data.begin() + offset, size);
|
memcpy(dst, data.begin() + offset, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set(size_t offset, const void* src, size_t size)
|
void set(size_t offset, const void* src, size_t size)
|
||||||
{
|
{
|
||||||
assert(a, offset + size <= position);
|
assertT(a, offset + size <= position);
|
||||||
memcpy(data.begin() + offset, src, size);
|
memcpy(data.begin() + offset, src, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ class Vector {
|
|||||||
|
|
||||||
void set2(size_t offset, uint16_t v)
|
void set2(size_t offset, uint16_t v)
|
||||||
{
|
{
|
||||||
assert(a, offset <= position - 2);
|
assertT(a, offset <= position - 2);
|
||||||
memcpy(data.begin() + offset, &v, 2);
|
memcpy(data.begin() + offset, &v, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,7 +157,7 @@ class Vector {
|
|||||||
template <class T>
|
template <class T>
|
||||||
T* peek(size_t offset)
|
T* peek(size_t offset)
|
||||||
{
|
{
|
||||||
assert(a, offset + sizeof(T) <= position);
|
assertT(a, offset + sizeof(T) <= position);
|
||||||
return reinterpret_cast<T*>(data.begin() + offset);
|
return reinterpret_cast<T*>(data.begin() + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1872,7 +1872,7 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
|
|||||||
inline object
|
inline object
|
||||||
allocateSmall(Thread* t, unsigned sizeInBytes)
|
allocateSmall(Thread* t, unsigned sizeInBytes)
|
||||||
{
|
{
|
||||||
assert(t, t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
assertT(t, t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||||
<= ThreadHeapSizeInWords);
|
<= ThreadHeapSizeInWords);
|
||||||
|
|
||||||
object o = reinterpret_cast<object>(t->heap + t->heapIndex);
|
object o = reinterpret_cast<object>(t->heap + t->heapIndex);
|
||||||
@ -1891,7 +1891,7 @@ allocate(Thread* t, unsigned sizeInBytes, bool objectMask)
|
|||||||
{
|
{
|
||||||
return allocate2(t, sizeInBytes, objectMask);
|
return allocate2(t, sizeInBytes, objectMask);
|
||||||
} else {
|
} else {
|
||||||
assert(t, t->criticalLevel == 0);
|
assertT(t, t->criticalLevel == 0);
|
||||||
return allocateSmall(t, sizeInBytes);
|
return allocateSmall(t, sizeInBytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1957,7 +1957,7 @@ instanceOf(Thread* t, GcClass* class_, object o);
|
|||||||
template <class T>
|
template <class T>
|
||||||
T* GcObject::as(Thread* t UNUSED)
|
T* GcObject::as(Thread* t UNUSED)
|
||||||
{
|
{
|
||||||
assert(t,
|
assertT(t,
|
||||||
t->m->unsafe
|
t->m->unsafe
|
||||||
|| instanceOf(t, reinterpret_cast<GcClass*>(arrayBodyUnsafe(t, t->m->types, T::Type)), reinterpret_cast<object>(this)));
|
|| instanceOf(t, reinterpret_cast<GcClass*>(arrayBodyUnsafe(t, t->m->types, T::Type)), reinterpret_cast<object>(this)));
|
||||||
return static_cast<T*>(this);
|
return static_cast<T*>(this);
|
||||||
@ -1975,7 +1975,7 @@ T* cast(Thread* t UNUSED, object o)
|
|||||||
if(o == 0) {
|
if(o == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
assert(t,
|
assertT(t,
|
||||||
t->m->unsafe || instanceOf(t,
|
t->m->unsafe || instanceOf(t,
|
||||||
reinterpret_cast<GcClass*>(arrayBodyUnsafe(
|
reinterpret_cast<GcClass*>(arrayBodyUnsafe(
|
||||||
t, t->m->types, T::Type)),
|
t, t->m->types, T::Type)),
|
||||||
@ -2037,7 +2037,7 @@ addThread(Thread* t, Thread* p)
|
|||||||
{
|
{
|
||||||
ACQUIRE_RAW(t, t->m->stateLock);
|
ACQUIRE_RAW(t, t->m->stateLock);
|
||||||
|
|
||||||
assert(t, p->state == Thread::NoState);
|
assertT(t, p->state == Thread::NoState);
|
||||||
expect(t, t->state == Thread::ActiveState || t->state == Thread::ExclusiveState || t->state == Thread::NoState);
|
expect(t, t->state == Thread::ActiveState || t->state == Thread::ExclusiveState || t->state == Thread::NoState);
|
||||||
|
|
||||||
p->state = Thread::IdleState;
|
p->state = Thread::IdleState;
|
||||||
@ -2057,7 +2057,7 @@ removeThread(Thread* t, Thread* p)
|
|||||||
{
|
{
|
||||||
ACQUIRE_RAW(t, t->m->stateLock);
|
ACQUIRE_RAW(t, t->m->stateLock);
|
||||||
|
|
||||||
assert(t, p->state == Thread::IdleState);
|
assertT(t, p->state == Thread::IdleState);
|
||||||
|
|
||||||
-- t->m->liveCount;
|
-- t->m->liveCount;
|
||||||
-- t->m->threadCount;
|
-- t->m->threadCount;
|
||||||
@ -2205,7 +2205,7 @@ hashTaken(Thread*, object o)
|
|||||||
inline unsigned
|
inline unsigned
|
||||||
baseSize(Thread* t UNUSED, object o, GcClass* class_)
|
baseSize(Thread* t UNUSED, object o, GcClass* class_)
|
||||||
{
|
{
|
||||||
assert(t, class_->fixedSize() >= BytesPerWord);
|
assertT(t, class_->fixedSize() >= BytesPerWord);
|
||||||
|
|
||||||
return ceilingDivide(class_->fixedSize(), BytesPerWord)
|
return ceilingDivide(class_->fixedSize(), BytesPerWord)
|
||||||
+ ceilingDivide(class_->arrayElementSize()
|
+ ceilingDivide(class_->arrayElementSize()
|
||||||
@ -2228,11 +2228,11 @@ makeTrace(Thread* t)
|
|||||||
inline object
|
inline object
|
||||||
makeNew(Thread* t, GcClass* class_)
|
makeNew(Thread* t, GcClass* class_)
|
||||||
{
|
{
|
||||||
assert(t, t->state == Thread::NoState or t->state == Thread::ActiveState);
|
assertT(t, t->state == Thread::NoState or t->state == Thread::ActiveState);
|
||||||
|
|
||||||
PROTECT(t, class_);
|
PROTECT(t, class_);
|
||||||
unsigned sizeInBytes = pad(class_->fixedSize());
|
unsigned sizeInBytes = pad(class_->fixedSize());
|
||||||
assert(t, sizeInBytes);
|
assertT(t, sizeInBytes);
|
||||||
object instance = allocate(t, sizeInBytes, class_->objectMask());
|
object instance = allocate(t, sizeInBytes, class_->objectMask());
|
||||||
setObjectClass(t, instance, class_);
|
setObjectClass(t, instance, class_);
|
||||||
|
|
||||||
@ -2357,7 +2357,7 @@ frameMethod(Thread* t, int frame);
|
|||||||
inline uintptr_t&
|
inline uintptr_t&
|
||||||
extendedWord(Thread* t UNUSED, object o, unsigned baseSize)
|
extendedWord(Thread* t UNUSED, object o, unsigned baseSize)
|
||||||
{
|
{
|
||||||
assert(t, objectExtended(t, o));
|
assertT(t, objectExtended(t, o));
|
||||||
return fieldAtOffset<uintptr_t>(o, baseSize * BytesPerWord);
|
return fieldAtOffset<uintptr_t>(o, baseSize * BytesPerWord);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2370,8 +2370,8 @@ extendedSize(Thread* t, object o, unsigned baseSize)
|
|||||||
inline void
|
inline void
|
||||||
markHashTaken(Thread* t, object o)
|
markHashTaken(Thread* t, object o)
|
||||||
{
|
{
|
||||||
assert(t, not objectExtended(t, o));
|
assertT(t, not objectExtended(t, o));
|
||||||
assert(t, not objectFixed(t, o));
|
assertT(t, not objectFixed(t, o));
|
||||||
|
|
||||||
ACQUIRE_RAW(t, t->m->heapLock);
|
ACQUIRE_RAW(t, t->m->heapLock);
|
||||||
|
|
||||||
@ -2498,7 +2498,7 @@ class MethodSpecIterator {
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
const char* next() {
|
const char* next() {
|
||||||
assert(t, *s != ')');
|
assertT(t, *s != ')');
|
||||||
|
|
||||||
const char* p = s;
|
const char* p = s;
|
||||||
|
|
||||||
@ -2535,7 +2535,7 @@ class MethodSpecIterator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const char* returnSpec() {
|
const char* returnSpec() {
|
||||||
assert(t, *s == ')');
|
assertT(t, *s == ')');
|
||||||
return s + 1;
|
return s + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2786,8 +2786,8 @@ dumpHeap(Thread* t, FILE* out);
|
|||||||
inline void NO_RETURN
|
inline void NO_RETURN
|
||||||
throw_(Thread* t, object e)
|
throw_(Thread* t, object e)
|
||||||
{
|
{
|
||||||
assert(t, t->exception == 0);
|
assertT(t, t->exception == 0);
|
||||||
assert(t, e);
|
assertT(t, e);
|
||||||
|
|
||||||
expect(t, not t->checkpoint->noThrow);
|
expect(t, not t->checkpoint->noThrow);
|
||||||
|
|
||||||
@ -2897,7 +2897,7 @@ findVirtualMethod(Thread* t, GcMethod* method, GcClass* class_)
|
|||||||
inline GcMethod*
|
inline GcMethod*
|
||||||
findInterfaceMethod(Thread* t, GcMethod* method, GcClass* class_)
|
findInterfaceMethod(Thread* t, GcMethod* method, GcClass* class_)
|
||||||
{
|
{
|
||||||
assert(t, (class_->vmFlags() & BootstrapFlag) == 0);
|
assertT(t, (class_->vmFlags() & BootstrapFlag) == 0);
|
||||||
|
|
||||||
object interface = method->class_();
|
object interface = method->class_();
|
||||||
object itable = class_->interfaceTable();
|
object itable = class_->interfaceTable();
|
||||||
@ -2913,17 +2913,17 @@ findInterfaceMethod(Thread* t, GcMethod* method, GcClass* class_)
|
|||||||
inline unsigned
|
inline unsigned
|
||||||
objectArrayLength(Thread* t UNUSED, object array)
|
objectArrayLength(Thread* t UNUSED, object array)
|
||||||
{
|
{
|
||||||
assert(t, objectClass(t, array)->fixedSize() == BytesPerWord * 2);
|
assertT(t, objectClass(t, array)->fixedSize() == BytesPerWord * 2);
|
||||||
assert(t, objectClass(t, array)->arrayElementSize() == BytesPerWord);
|
assertT(t, objectClass(t, array)->arrayElementSize() == BytesPerWord);
|
||||||
return fieldAtOffset<uintptr_t>(array, BytesPerWord);
|
return fieldAtOffset<uintptr_t>(array, BytesPerWord);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline object&
|
inline object&
|
||||||
objectArrayBody(Thread* t UNUSED, object array, unsigned index)
|
objectArrayBody(Thread* t UNUSED, object array, unsigned index)
|
||||||
{
|
{
|
||||||
assert(t, objectClass(t, array)->fixedSize() == BytesPerWord * 2);
|
assertT(t, objectClass(t, array)->fixedSize() == BytesPerWord * 2);
|
||||||
assert(t, objectClass(t, array)->arrayElementSize() == BytesPerWord);
|
assertT(t, objectClass(t, array)->arrayElementSize() == BytesPerWord);
|
||||||
assert(t, objectClass(t, array)->objectMask()
|
assertT(t, objectClass(t, array)->objectMask()
|
||||||
== classObjectMask(t, arrayBody
|
== classObjectMask(t, arrayBody
|
||||||
(t, t->m->types, GcArray::Type)));
|
(t, t->m->types, GcArray::Type)));
|
||||||
return fieldAtOffset<object>(array, ArrayBody + (index * BytesPerWord));
|
return fieldAtOffset<object>(array, ArrayBody + (index * BytesPerWord));
|
||||||
@ -2953,7 +2953,7 @@ releaseSystem(Thread* t, Thread* target)
|
|||||||
{
|
{
|
||||||
ACQUIRE_RAW(t, t->m->stateLock);
|
ACQUIRE_RAW(t, t->m->stateLock);
|
||||||
|
|
||||||
assert(t, t->state != Thread::JoinedState);
|
assertT(t, t->state != Thread::JoinedState);
|
||||||
|
|
||||||
atomicAnd(&(target->flags), ~Thread::SystemFlag);
|
atomicAnd(&(target->flags), ~Thread::SystemFlag);
|
||||||
}
|
}
|
||||||
@ -3093,7 +3093,7 @@ monitorAcquire(Thread* t, object monitor, object node = 0)
|
|||||||
++ monitorDepth(t, monitor);
|
++ monitorDepth(t, monitor);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, monitorOwner(t, monitor) == t);
|
assertT(t, monitorOwner(t, monitor) == t);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void
|
inline void
|
||||||
@ -3121,7 +3121,7 @@ monitorRelease(Thread* t, object monitor)
|
|||||||
inline void
|
inline void
|
||||||
monitorAppendWait(Thread* t, object monitor)
|
monitorAppendWait(Thread* t, object monitor)
|
||||||
{
|
{
|
||||||
assert(t, monitorOwner(t, monitor) == t);
|
assertT(t, monitorOwner(t, monitor) == t);
|
||||||
|
|
||||||
expect(t, (t->flags & Thread::WaitingFlag) == 0);
|
expect(t, (t->flags & Thread::WaitingFlag) == 0);
|
||||||
expect(t, t->waitNext == 0);
|
expect(t, t->waitNext == 0);
|
||||||
@ -3140,7 +3140,7 @@ monitorAppendWait(Thread* t, object monitor)
|
|||||||
inline void
|
inline void
|
||||||
monitorRemoveWait(Thread* t, object monitor)
|
monitorRemoveWait(Thread* t, object monitor)
|
||||||
{
|
{
|
||||||
assert(t, monitorOwner(t, monitor) == t);
|
assertT(t, monitorOwner(t, monitor) == t);
|
||||||
|
|
||||||
Thread* previous = 0;
|
Thread* previous = 0;
|
||||||
for (Thread* current = static_cast<Thread*>(monitorWaitHead(t, monitor));
|
for (Thread* current = static_cast<Thread*>(monitorWaitHead(t, monitor));
|
||||||
@ -3154,7 +3154,7 @@ monitorRemoveWait(Thread* t, object monitor)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (t == monitorWaitTail(t, monitor)) {
|
if (t == monitorWaitTail(t, monitor)) {
|
||||||
assert(t, t->waitNext == 0);
|
assertT(t, t->waitNext == 0);
|
||||||
monitorWaitTail(t, monitor) = previous;
|
monitorWaitTail(t, monitor) = previous;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3173,7 +3173,7 @@ monitorRemoveWait(Thread* t, object monitor)
|
|||||||
inline bool
|
inline bool
|
||||||
monitorFindWait(Thread* t, object monitor)
|
monitorFindWait(Thread* t, object monitor)
|
||||||
{
|
{
|
||||||
assert(t, monitorOwner(t, monitor) == t);
|
assertT(t, monitorOwner(t, monitor) == t);
|
||||||
|
|
||||||
for (Thread* current = static_cast<Thread*>(monitorWaitHead(t, monitor));
|
for (Thread* current = static_cast<Thread*>(monitorWaitHead(t, monitor));
|
||||||
current; current = current->waitNext)
|
current; current = current->waitNext)
|
||||||
@ -3225,7 +3225,7 @@ monitorWait(Thread* t, object monitor, int64_t time)
|
|||||||
expect(t, not monitorFindWait(t, monitor));
|
expect(t, not monitorFindWait(t, monitor));
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, monitorOwner(t, monitor) == t);
|
assertT(t, monitorOwner(t, monitor) == t);
|
||||||
|
|
||||||
return interrupted;
|
return interrupted;
|
||||||
}
|
}
|
||||||
@ -3233,7 +3233,7 @@ monitorWait(Thread* t, object monitor, int64_t time)
|
|||||||
inline Thread*
|
inline Thread*
|
||||||
monitorPollWait(Thread* t, object monitor)
|
monitorPollWait(Thread* t, object monitor)
|
||||||
{
|
{
|
||||||
assert(t, monitorOwner(t, monitor) == t);
|
assertT(t, monitorOwner(t, monitor) == t);
|
||||||
|
|
||||||
Thread* next = static_cast<Thread*>(monitorWaitHead(t, monitor));
|
Thread* next = static_cast<Thread*>(monitorWaitHead(t, monitor));
|
||||||
|
|
||||||
@ -3245,7 +3245,7 @@ monitorPollWait(Thread* t, object monitor)
|
|||||||
monitorWaitTail(t, monitor) = 0;
|
monitorWaitTail(t, monitor) = 0;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(t, monitorWaitTail(t, monitor) == 0);
|
assertT(t, monitorWaitTail(t, monitor) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return next;
|
return next;
|
||||||
@ -3502,7 +3502,7 @@ singletonCount(Thread* t, GcSingleton* singleton)
|
|||||||
inline uint32_t*
|
inline uint32_t*
|
||||||
singletonMask(Thread* t, GcSingleton* singleton)
|
singletonMask(Thread* t, GcSingleton* singleton)
|
||||||
{
|
{
|
||||||
assert(t, singleton->length());
|
assertT(t, singleton->length());
|
||||||
return reinterpret_cast<uint32_t*>
|
return reinterpret_cast<uint32_t*>
|
||||||
(&singletonBody(t, reinterpret_cast<object>(singleton), singletonCount(t, singleton)));
|
(&singletonBody(t, reinterpret_cast<object>(singleton), singletonCount(t, singleton)));
|
||||||
}
|
}
|
||||||
@ -3523,7 +3523,7 @@ singletonMarkObject(Thread* t, GcSingleton* singleton, unsigned index)
|
|||||||
inline bool
|
inline bool
|
||||||
singletonIsObject(Thread* t, GcSingleton* singleton, unsigned index)
|
singletonIsObject(Thread* t, GcSingleton* singleton, unsigned index)
|
||||||
{
|
{
|
||||||
assert(t, index < singletonCount(t, singleton));
|
assertT(t, index < singletonCount(t, singleton));
|
||||||
|
|
||||||
return (singletonMask(t, singleton)[(index + 2) / 32]
|
return (singletonMask(t, singleton)[(index + 2) / 32]
|
||||||
& (static_cast<uint32_t>(1) << ((index + 2) % 32))) != 0;
|
& (static_cast<uint32_t>(1) << ((index + 2) % 32))) != 0;
|
||||||
@ -3532,14 +3532,14 @@ singletonIsObject(Thread* t, GcSingleton* singleton, unsigned index)
|
|||||||
inline object&
|
inline object&
|
||||||
singletonObject(Thread* t, GcSingleton* singleton, unsigned index)
|
singletonObject(Thread* t, GcSingleton* singleton, unsigned index)
|
||||||
{
|
{
|
||||||
assert(t, singletonIsObject(t, singleton, index));
|
assertT(t, singletonIsObject(t, singleton, index));
|
||||||
return reinterpret_cast<object&>(singletonBody(t, reinterpret_cast<object>(singleton), index));
|
return reinterpret_cast<object&>(singletonBody(t, reinterpret_cast<object>(singleton), index));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline uintptr_t&
|
inline uintptr_t&
|
||||||
singletonValue(Thread* t, GcSingleton* singleton, unsigned index)
|
singletonValue(Thread* t, GcSingleton* singleton, unsigned index)
|
||||||
{
|
{
|
||||||
assert(t, not singletonIsObject(t, singleton, index));
|
assertT(t, not singletonIsObject(t, singleton, index));
|
||||||
return singletonBody(t, reinterpret_cast<object>(singleton), index);
|
return singletonBody(t, reinterpret_cast<object>(singleton), index);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3547,7 +3547,7 @@ inline GcSingleton*
|
|||||||
makeSingletonOfSize(Thread* t, unsigned count)
|
makeSingletonOfSize(Thread* t, unsigned count)
|
||||||
{
|
{
|
||||||
GcSingleton* o = makeSingleton(t, count + singletonMaskSize(count));
|
GcSingleton* o = makeSingleton(t, count + singletonMaskSize(count));
|
||||||
assert(t, o->length() == count + singletonMaskSize(t, o));
|
assertT(t, o->length() == count + singletonMaskSize(t, o));
|
||||||
if (count) {
|
if (count) {
|
||||||
singletonMask(t, o)[0] = 1;
|
singletonMask(t, o)[0] = 1;
|
||||||
}
|
}
|
||||||
|
@ -1556,7 +1556,7 @@ Avian_dalvik_system_VMStack_getClasses
|
|||||||
|
|
||||||
object c = getJClass(t, cast<GcClass>(t, walker->method()->class_()));
|
object c = getJClass(t, cast<GcClass>(t, walker->method()->class_()));
|
||||||
|
|
||||||
assert(t, counter - 2 < objectArrayLength(t, array));
|
assertT(t, counter - 2 < objectArrayLength(t, array));
|
||||||
|
|
||||||
set(t, array, ArrayBody + ((counter - 2) * BytesPerWord), c);
|
set(t, array, ArrayBody + ((counter - 2) * BytesPerWord), c);
|
||||||
|
|
||||||
|
@ -4283,7 +4283,7 @@ jvmGetClassDeclaredMethods(Thread* t, uintptr_t* arguments)
|
|||||||
{
|
{
|
||||||
object method = makeJmethod(t, vmMethod, i);
|
object method = makeJmethod(t, vmMethod, i);
|
||||||
|
|
||||||
assert(t, ai < objectArrayLength(t, array));
|
assertT(t, ai < objectArrayLength(t, array));
|
||||||
|
|
||||||
set(t, array, ArrayBody + ((ai++) * BytesPerWord), method);
|
set(t, array, ArrayBody + ((ai++) * BytesPerWord), method);
|
||||||
}
|
}
|
||||||
@ -4328,12 +4328,12 @@ jvmGetClassDeclaredFields(Thread* t, uintptr_t* arguments)
|
|||||||
if ((not publicOnly) or (fieldFlags(t, vmField) & ACC_PUBLIC)) {
|
if ((not publicOnly) or (fieldFlags(t, vmField) & ACC_PUBLIC)) {
|
||||||
object field = makeJfield(t, vmField, i);
|
object field = makeJfield(t, vmField, i);
|
||||||
|
|
||||||
assert(t, ai < objectArrayLength(t, array));
|
assertT(t, ai < objectArrayLength(t, array));
|
||||||
|
|
||||||
set(t, array, ArrayBody + ((ai++) * BytesPerWord), field);
|
set(t, array, ArrayBody + ((ai++) * BytesPerWord), field);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(t, ai == objectArrayLength(t, array));
|
assertT(t, ai == objectArrayLength(t, array));
|
||||||
|
|
||||||
return reinterpret_cast<uint64_t>(makeLocalReference(t, array));
|
return reinterpret_cast<uint64_t>(makeLocalReference(t, array));
|
||||||
} else {
|
} else {
|
||||||
@ -4381,7 +4381,7 @@ jvmGetClassDeclaredConstructors(Thread* t, uintptr_t* arguments)
|
|||||||
{
|
{
|
||||||
object method = makeJconstructor(t, vmMethod, i);
|
object method = makeJconstructor(t, vmMethod, i);
|
||||||
|
|
||||||
assert(t, ai < objectArrayLength(t, array));
|
assertT(t, ai < objectArrayLength(t, array));
|
||||||
|
|
||||||
set(t, array, ArrayBody + ((ai++) * BytesPerWord), method);
|
set(t, array, ArrayBody + ((ai++) * BytesPerWord), method);
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ class ConstantPoolNode {
|
|||||||
Read*
|
Read*
|
||||||
live(Context* c UNUSED, Value* v)
|
live(Context* c UNUSED, Value* v)
|
||||||
{
|
{
|
||||||
assert(c, v->buddy->hasBuddy(c, v));
|
assertT(c, v->buddy->hasBuddy(c, v));
|
||||||
|
|
||||||
Value* p = v;
|
Value* p = v;
|
||||||
do {
|
do {
|
||||||
@ -94,7 +94,7 @@ void
|
|||||||
deadWord(Context* c, Value* v)
|
deadWord(Context* c, Value* v)
|
||||||
{
|
{
|
||||||
Value* nextWord = v->nextWord;
|
Value* nextWord = v->nextWord;
|
||||||
assert(c, nextWord != v);
|
assertT(c, nextWord != v);
|
||||||
|
|
||||||
for (SiteIterator it(c, v, true, false); it.hasMore();) {
|
for (SiteIterator it(c, v, true, false); it.hasMore();) {
|
||||||
Site* s = it.next();
|
Site* s = it.next();
|
||||||
@ -109,8 +109,8 @@ deadWord(Context* c, Value* v)
|
|||||||
void
|
void
|
||||||
deadBuddy(Context* c, Value* v, Read* r UNUSED)
|
deadBuddy(Context* c, Value* v, Read* r UNUSED)
|
||||||
{
|
{
|
||||||
assert(c, v->buddy != v);
|
assertT(c, v->buddy != v);
|
||||||
assert(c, r);
|
assertT(c, r);
|
||||||
|
|
||||||
if (DebugBuddies) {
|
if (DebugBuddies) {
|
||||||
fprintf(stderr, "remove dead buddy %p from", v);
|
fprintf(stderr, "remove dead buddy %p from", v);
|
||||||
@ -120,7 +120,7 @@ deadBuddy(Context* c, Value* v, Read* r UNUSED)
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, v->buddy);
|
assertT(c, v->buddy);
|
||||||
|
|
||||||
Value* next = v->buddy;
|
Value* next = v->buddy;
|
||||||
v->buddy = v;
|
v->buddy = v;
|
||||||
@ -128,7 +128,7 @@ deadBuddy(Context* c, Value* v, Read* r UNUSED)
|
|||||||
while (p->buddy != v) p = p->buddy;
|
while (p->buddy != v) p = p->buddy;
|
||||||
p->buddy = next;
|
p->buddy = next;
|
||||||
|
|
||||||
assert(c, p->buddy);
|
assertT(c, p->buddy);
|
||||||
|
|
||||||
for (SiteIterator it(c, v, false, false); it.hasMore();) {
|
for (SiteIterator it(c, v, false, false); it.hasMore();) {
|
||||||
Site* s = it.next();
|
Site* s = it.next();
|
||||||
@ -141,7 +141,7 @@ deadBuddy(Context* c, Value* v, Read* r UNUSED)
|
|||||||
void
|
void
|
||||||
popRead(Context* c, Event* e UNUSED, Value* v)
|
popRead(Context* c, Event* e UNUSED, Value* v)
|
||||||
{
|
{
|
||||||
assert(c, e == v->reads->event);
|
assertT(c, e == v->reads->event);
|
||||||
|
|
||||||
if (DebugReads) {
|
if (DebugReads) {
|
||||||
fprintf(stderr, "pop read %p from %p next %p event %p (%s)\n",
|
fprintf(stderr, "pop read %p from %p next %p event %p (%s)\n",
|
||||||
@ -197,13 +197,13 @@ sitesToString(Context* c, Site* sites, char* buffer, unsigned size)
|
|||||||
total += s->toString(c, buffer + total, size - total);
|
total += s->toString(c, buffer + total, size - total);
|
||||||
|
|
||||||
if (s->next) {
|
if (s->next) {
|
||||||
assert(c, size > total + 2);
|
assertT(c, size > total + 2);
|
||||||
memcpy(buffer + total, ", ", 2);
|
memcpy(buffer + total, ", ", 2);
|
||||||
total += 2;
|
total += 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, size > total);
|
assertT(c, size > total);
|
||||||
buffer[total] = 0;
|
buffer[total] = 0;
|
||||||
|
|
||||||
return total;
|
return total;
|
||||||
@ -216,7 +216,7 @@ sitesToString(Context* c, Value* v, char* buffer, unsigned size)
|
|||||||
Value* p = v;
|
Value* p = v;
|
||||||
do {
|
do {
|
||||||
if (total) {
|
if (total) {
|
||||||
assert(c, size > total + 2);
|
assertT(c, size > total + 2);
|
||||||
memcpy(buffer + total, "; ", 2);
|
memcpy(buffer + total, "; ", 2);
|
||||||
total += 2;
|
total += 2;
|
||||||
}
|
}
|
||||||
@ -586,7 +586,7 @@ acceptForResolve(Context* c, Site* s, Read* read, const SiteMask& mask)
|
|||||||
if (s->type(c) == lir::RegisterOperand) {
|
if (s->type(c) == lir::RegisterOperand) {
|
||||||
return c->availableGeneralRegisterCount > ResolveRegisterReserveCount;
|
return c->availableGeneralRegisterCount > ResolveRegisterReserveCount;
|
||||||
} else {
|
} else {
|
||||||
assert(c, s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex)));
|
assertT(c, s->match(c, SiteMask(1 << lir::MemoryOperand, 0, AnyFrameIndex)));
|
||||||
|
|
||||||
return isHome(read->value, offsetToFrameIndex
|
return isHome(read->value, offsetToFrameIndex
|
||||||
(c, static_cast<MemorySite*>(s)->offset));
|
(c, static_cast<MemorySite*>(s)->offset));
|
||||||
@ -606,7 +606,7 @@ move(Context* c, Value* value, Site* src, Site* dst)
|
|||||||
srcb, dstb, value, value);
|
srcb, dstb, value, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, value->findSite(dst));
|
assertT(c, value->findSite(dst));
|
||||||
|
|
||||||
src->freeze(c, value);
|
src->freeze(c, value);
|
||||||
dst->freeze(c, value);
|
dst->freeze(c, value);
|
||||||
@ -662,7 +662,7 @@ void
|
|||||||
apply(Context* c, lir::UnaryOperation op,
|
apply(Context* c, lir::UnaryOperation op,
|
||||||
unsigned s1Size, Site* s1Low, Site* s1High)
|
unsigned s1Size, Site* s1Low, Site* s1High)
|
||||||
{
|
{
|
||||||
assert(c, s1Low->type(c) == s1High->type(c));
|
assertT(c, s1Low->type(c) == s1High->type(c));
|
||||||
|
|
||||||
lir::OperandType s1Type = s1Low->type(c);
|
lir::OperandType s1Type = s1Low->type(c);
|
||||||
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
||||||
@ -676,8 +676,8 @@ apply(Context* c, lir::BinaryOperation op,
|
|||||||
unsigned s1Size, Site* s1Low, Site* s1High,
|
unsigned s1Size, Site* s1Low, Site* s1High,
|
||||||
unsigned s2Size, Site* s2Low, Site* s2High)
|
unsigned s2Size, Site* s2Low, Site* s2High)
|
||||||
{
|
{
|
||||||
assert(c, s1Low->type(c) == s1High->type(c));
|
assertT(c, s1Low->type(c) == s1High->type(c));
|
||||||
assert(c, s2Low->type(c) == s2High->type(c));
|
assertT(c, s2Low->type(c) == s2High->type(c));
|
||||||
|
|
||||||
lir::OperandType s1Type = s1Low->type(c);
|
lir::OperandType s1Type = s1Low->type(c);
|
||||||
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
||||||
@ -696,9 +696,9 @@ apply(Context* c, lir::TernaryOperation op,
|
|||||||
unsigned s2Size, Site* s2Low, Site* s2High,
|
unsigned s2Size, Site* s2Low, Site* s2High,
|
||||||
unsigned s3Size, Site* s3Low, Site* s3High)
|
unsigned s3Size, Site* s3Low, Site* s3High)
|
||||||
{
|
{
|
||||||
assert(c, s1Low->type(c) == s1High->type(c));
|
assertT(c, s1Low->type(c) == s1High->type(c));
|
||||||
assert(c, s2Low->type(c) == s2High->type(c));
|
assertT(c, s2Low->type(c) == s2High->type(c));
|
||||||
assert(c, s3Low->type(c) == s3High->type(c));
|
assertT(c, s3Low->type(c) == s3High->type(c));
|
||||||
|
|
||||||
lir::OperandType s1Type = s1Low->type(c);
|
lir::OperandType s1Type = s1Low->type(c);
|
||||||
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
OperandUnion s1Union; asAssemblerOperand(c, s1Low, s1High, &s1Union);
|
||||||
@ -819,8 +819,8 @@ void maybeMove(Context* c,
|
|||||||
src.registerMask &= c->regFile->generalRegisters.mask;
|
src.registerMask &= c->regFile->generalRegisters.mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, thunk == 0);
|
assertT(c, thunk == 0);
|
||||||
assert(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand));
|
assertT(c, dstMask.typeMask & src.typeMask & (1 << lir::RegisterOperand));
|
||||||
|
|
||||||
Site* tmpTarget = freeRegisterSite
|
Site* tmpTarget = freeRegisterSite
|
||||||
(c, dstMask.registerMask & src.registerMask);
|
(c, dstMask.registerMask & src.registerMask);
|
||||||
@ -927,7 +927,7 @@ pickSiteOrMove(Context* c, Value* src, Value* dst, Site* nextWord,
|
|||||||
s = maybeMove(c, read, false, true);
|
s = maybeMove(c, read, false, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(c, s);
|
assertT(c, s);
|
||||||
|
|
||||||
addBuddy(src, dst);
|
addBuddy(src, dst);
|
||||||
|
|
||||||
@ -965,7 +965,7 @@ removeBuddy(Context* c, Value* v)
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, v->buddy);
|
assertT(c, v->buddy);
|
||||||
|
|
||||||
Value* next = v->buddy;
|
Value* next = v->buddy;
|
||||||
v->buddy = v;
|
v->buddy = v;
|
||||||
@ -973,7 +973,7 @@ removeBuddy(Context* c, Value* v)
|
|||||||
while (p->buddy != v) p = p->buddy;
|
while (p->buddy != v) p = p->buddy;
|
||||||
p->buddy = next;
|
p->buddy = next;
|
||||||
|
|
||||||
assert(c, p->buddy);
|
assertT(c, p->buddy);
|
||||||
|
|
||||||
if (not live(c, next)) {
|
if (not live(c, next)) {
|
||||||
next->clearSites(c);
|
next->clearSites(c);
|
||||||
@ -1063,7 +1063,7 @@ pushWord(Context* c, Value* v)
|
|||||||
void
|
void
|
||||||
push(Context* c, unsigned footprint, Value* v)
|
push(Context* c, unsigned footprint, Value* v)
|
||||||
{
|
{
|
||||||
assert(c, footprint);
|
assertT(c, footprint);
|
||||||
|
|
||||||
bool bigEndian = c->arch->bigEndian();
|
bool bigEndian = c->arch->bigEndian();
|
||||||
|
|
||||||
@ -1075,7 +1075,7 @@ push(Context* c, unsigned footprint, Value* v)
|
|||||||
|
|
||||||
Value* high;
|
Value* high;
|
||||||
if (footprint > 1) {
|
if (footprint > 1) {
|
||||||
assert(c, footprint == 2);
|
assertT(c, footprint == 2);
|
||||||
|
|
||||||
if (TargetBytesPerWord == 4) {
|
if (TargetBytesPerWord == 4) {
|
||||||
low->maybeSplit(c);
|
low->maybeSplit(c);
|
||||||
@ -1102,7 +1102,7 @@ void
|
|||||||
popWord(Context* c)
|
popWord(Context* c)
|
||||||
{
|
{
|
||||||
Stack* s = c->stack;
|
Stack* s = c->stack;
|
||||||
assert(c, s->value == 0 or s->value->home >= 0);
|
assertT(c, s->value == 0 or s->value->home >= 0);
|
||||||
|
|
||||||
if (DebugFrame) {
|
if (DebugFrame) {
|
||||||
fprintf(stderr, "pop %p\n", s->value);
|
fprintf(stderr, "pop %p\n", s->value);
|
||||||
@ -1114,7 +1114,7 @@ popWord(Context* c)
|
|||||||
Value*
|
Value*
|
||||||
pop(Context* c, unsigned footprint)
|
pop(Context* c, unsigned footprint)
|
||||||
{
|
{
|
||||||
assert(c, footprint);
|
assertT(c, footprint);
|
||||||
|
|
||||||
Stack* s = 0;
|
Stack* s = 0;
|
||||||
|
|
||||||
@ -1125,7 +1125,7 @@ pop(Context* c, unsigned footprint)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (footprint > 1) {
|
if (footprint > 1) {
|
||||||
assert(c, footprint == 2);
|
assertT(c, footprint == 2);
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
Stack* low;
|
Stack* low;
|
||||||
@ -1138,7 +1138,7 @@ pop(Context* c, unsigned footprint)
|
|||||||
high = low->next;
|
high = low->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, (TargetBytesPerWord == 8
|
assertT(c, (TargetBytesPerWord == 8
|
||||||
and low->value->nextWord == low->value and high->value == 0)
|
and low->value->nextWord == low->value and high->value == 0)
|
||||||
or (TargetBytesPerWord == 4 and low->value->nextWord == high->value));
|
or (TargetBytesPerWord == 4 and low->value->nextWord == high->value));
|
||||||
#endif // not NDEBUG
|
#endif // not NDEBUG
|
||||||
@ -1158,7 +1158,7 @@ pop(Context* c, unsigned footprint)
|
|||||||
Value*
|
Value*
|
||||||
storeLocal(Context* c, unsigned footprint, Value* v, unsigned index, bool copy)
|
storeLocal(Context* c, unsigned footprint, Value* v, unsigned index, bool copy)
|
||||||
{
|
{
|
||||||
assert(c, index + footprint <= c->localFootprint);
|
assertT(c, index + footprint <= c->localFootprint);
|
||||||
|
|
||||||
if (copy) {
|
if (copy) {
|
||||||
unsigned sizeInBytes = sizeof(Local) * c->localFootprint;
|
unsigned sizeInBytes = sizeof(Local) * c->localFootprint;
|
||||||
@ -1169,7 +1169,7 @@ storeLocal(Context* c, unsigned footprint, Value* v, unsigned index, bool copy)
|
|||||||
|
|
||||||
Value* high;
|
Value* high;
|
||||||
if (footprint > 1) {
|
if (footprint > 1) {
|
||||||
assert(c, footprint == 2);
|
assertT(c, footprint == 2);
|
||||||
|
|
||||||
unsigned highIndex;
|
unsigned highIndex;
|
||||||
unsigned lowIndex;
|
unsigned lowIndex;
|
||||||
@ -1182,7 +1182,7 @@ storeLocal(Context* c, unsigned footprint, Value* v, unsigned index, bool copy)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (TargetBytesPerWord == 4) {
|
if (TargetBytesPerWord == 4) {
|
||||||
assert(c, v->nextWord != v);
|
assertT(c, v->nextWord != v);
|
||||||
|
|
||||||
high = storeLocal(c, 1, v->nextWord, highIndex, false);
|
high = storeLocal(c, 1, v->nextWord, highIndex, false);
|
||||||
} else {
|
} else {
|
||||||
@ -1235,18 +1235,18 @@ unsigned typeFootprint(Context* c, ir::Type type)
|
|||||||
Value* loadLocal(Context* c, ir::Type type, unsigned index)
|
Value* loadLocal(Context* c, ir::Type type, unsigned index)
|
||||||
{
|
{
|
||||||
unsigned footprint = typeFootprint(c, type);
|
unsigned footprint = typeFootprint(c, type);
|
||||||
assert(c, index + footprint <= c->localFootprint);
|
assertT(c, index + footprint <= c->localFootprint);
|
||||||
|
|
||||||
if (footprint > 1) {
|
if (footprint > 1) {
|
||||||
assert(c, footprint == 2);
|
assertT(c, footprint == 2);
|
||||||
|
|
||||||
if (not c->arch->bigEndian()) {
|
if (not c->arch->bigEndian()) {
|
||||||
++ index;
|
++ index;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, c->locals[index].value);
|
assertT(c, c->locals[index].value);
|
||||||
assert(c, c->locals[index].value->home >= 0);
|
assertT(c, c->locals[index].value->home >= 0);
|
||||||
|
|
||||||
if (DebugFrame) {
|
if (DebugFrame) {
|
||||||
fprintf(stderr, "load local %p at %d\n", c->locals[index].value, index);
|
fprintf(stderr, "load local %p at %d\n", c->locals[index].value, index);
|
||||||
@ -1295,7 +1295,7 @@ visit(Context* c, Link* link)
|
|||||||
StubReadPair* p = junctionState->reads + i;
|
StubReadPair* p = junctionState->reads + i;
|
||||||
|
|
||||||
if (p->value and p->value->reads) {
|
if (p->value and p->value->reads) {
|
||||||
assert(c, p->value->reads == p->read);
|
assertT(c, p->value->reads == p->read);
|
||||||
popRead(c, 0, p->value);
|
popRead(c, 0, p->value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1319,10 +1319,10 @@ class BuddyEvent: public Event {
|
|||||||
fprintf(stderr, "original %p buddy %p\n", original, buddy);
|
fprintf(stderr, "original %p buddy %p\n", original, buddy);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, original->hasSite(c));
|
assertT(c, original->hasSite(c));
|
||||||
|
|
||||||
assert(c, original);
|
assertT(c, original);
|
||||||
assert(c, buddy);
|
assertT(c, buddy);
|
||||||
|
|
||||||
addBuddy(original, buddy);
|
addBuddy(original, buddy);
|
||||||
|
|
||||||
@ -1466,7 +1466,7 @@ class SiteRecordList {
|
|||||||
void
|
void
|
||||||
freeze(Context* c, SiteRecordList* frozen, Site* s, Value* v)
|
freeze(Context* c, SiteRecordList* frozen, Site* s, Value* v)
|
||||||
{
|
{
|
||||||
assert(c, frozen->index < frozen->capacity);
|
assertT(c, frozen->index < frozen->capacity);
|
||||||
|
|
||||||
s->freeze(c, v);
|
s->freeze(c, v);
|
||||||
init(new (frozen->records + (frozen->index ++)) SiteRecord, s, v);
|
init(new (frozen->records + (frozen->index ++)) SiteRecord, s, v);
|
||||||
@ -1667,7 +1667,7 @@ populateSiteTables(Context* c, Event* e, SiteRecordList* frozen)
|
|||||||
void
|
void
|
||||||
setSites(Context* c, Value* v, Site* s)
|
setSites(Context* c, Value* v, Site* s)
|
||||||
{
|
{
|
||||||
assert(c, live(c, v));
|
assertT(c, live(c, v));
|
||||||
|
|
||||||
for (; s; s = s->next) {
|
for (; s; s = s->next) {
|
||||||
v->addSite(c, s->copy(c));
|
v->addSite(c, s->copy(c));
|
||||||
@ -1738,7 +1738,7 @@ restore(Context* c, Event* e, Snapshot* snapshots)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (Snapshot* s = snapshots; s; s = s->next) {
|
for (Snapshot* s = snapshots; s; s = s->next) {
|
||||||
assert(c, s->buddy);
|
assertT(c, s->buddy);
|
||||||
|
|
||||||
s->value->buddy = s->buddy;
|
s->value->buddy = s->buddy;
|
||||||
}
|
}
|
||||||
@ -2109,9 +2109,9 @@ class Client: public Assembler::Client {
|
|||||||
virtual void save(int r) {
|
virtual void save(int r) {
|
||||||
RegisterResource* reg = c->registerResources + r;
|
RegisterResource* reg = c->registerResources + r;
|
||||||
|
|
||||||
assert(c, reg->referenceCount == 0);
|
assertT(c, reg->referenceCount == 0);
|
||||||
assert(c, reg->freezeCount == 0);
|
assertT(c, reg->freezeCount == 0);
|
||||||
assert(c, not reg->reserved);
|
assertT(c, not reg->reserved);
|
||||||
|
|
||||||
if (reg->value) {
|
if (reg->value) {
|
||||||
steal(c, reg, 0);
|
steal(c, reg, 0);
|
||||||
@ -2177,7 +2177,7 @@ class MyCompiler: public Compiler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual void visitLogicalIp(unsigned logicalIp) {
|
virtual void visitLogicalIp(unsigned logicalIp) {
|
||||||
assert(&c, logicalIp < c.logicalCode.count());
|
assertT(&c, logicalIp < c.logicalCode.count());
|
||||||
|
|
||||||
if (c.logicalCode[c.logicalIp]->lastEvent == 0) {
|
if (c.logicalCode[c.logicalIp]->lastEvent == 0) {
|
||||||
appendDummy(&c);
|
appendDummy(&c);
|
||||||
@ -2213,8 +2213,8 @@ class MyCompiler: public Compiler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual void startLogicalIp(unsigned logicalIp) {
|
virtual void startLogicalIp(unsigned logicalIp) {
|
||||||
assert(&c, logicalIp < c.logicalCode.count());
|
assertT(&c, logicalIp < c.logicalCode.count());
|
||||||
assert(&c, c.logicalCode[logicalIp] == 0);
|
assertT(&c, c.logicalCode[logicalIp] == 0);
|
||||||
|
|
||||||
if (c.logicalCode[c.logicalIp]->lastEvent == 0) {
|
if (c.logicalCode[c.logicalIp]->lastEvent == 0) {
|
||||||
appendDummy(&c);
|
appendDummy(&c);
|
||||||
@ -2298,22 +2298,22 @@ class MyCompiler: public Compiler {
|
|||||||
|
|
||||||
virtual void push(ir::Type type, ir::Value* value)
|
virtual void push(ir::Type type, ir::Value* value)
|
||||||
{
|
{
|
||||||
// TODO: once type information is flowed properly, enable this assert.
|
// TODO: once type information is flowed properly, enable this assertT.
|
||||||
// Some time later, we can remove the parameter.
|
// Some time later, we can remove the parameter.
|
||||||
// assert(&c, value->type == type);
|
// assertT(&c, value->type == type);
|
||||||
compiler::push(&c, typeFootprint(&c, type), static_cast<Value*>(value));
|
compiler::push(&c, typeFootprint(&c, type), static_cast<Value*>(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void save(ir::Type type, ir::Value* value)
|
virtual void save(ir::Type type, ir::Value* value)
|
||||||
{
|
{
|
||||||
// TODO: once type information is flowed properly, enable this assert.
|
// TODO: once type information is flowed properly, enable this assertT.
|
||||||
// Some time later, we can remove the parameter.
|
// Some time later, we can remove the parameter.
|
||||||
// assert(&c, value->type == type);
|
// assertT(&c, value->type == type);
|
||||||
unsigned footprint = typeFootprint(&c, type);
|
unsigned footprint = typeFootprint(&c, type);
|
||||||
c.saved = cons(&c, static_cast<Value*>(value), c.saved);
|
c.saved = cons(&c, static_cast<Value*>(value), c.saved);
|
||||||
if (TargetBytesPerWord == 4 and footprint > 1) {
|
if (TargetBytesPerWord == 4 and footprint > 1) {
|
||||||
assert(&c, footprint == 2);
|
assertT(&c, footprint == 2);
|
||||||
assert(&c, static_cast<Value*>(value)->nextWord);
|
assertT(&c, static_cast<Value*>(value)->nextWord);
|
||||||
|
|
||||||
save(ir::Type::i4(), static_cast<Value*>(value)->nextWord);
|
save(ir::Type::i4(), static_cast<Value*>(value)->nextWord);
|
||||||
}
|
}
|
||||||
@ -2322,9 +2322,9 @@ class MyCompiler: public Compiler {
|
|||||||
virtual ir::Value* pop(ir::Type type)
|
virtual ir::Value* pop(ir::Type type)
|
||||||
{
|
{
|
||||||
ir::Value* value = compiler::pop(&c, typeFootprint(&c, type));
|
ir::Value* value = compiler::pop(&c, typeFootprint(&c, type));
|
||||||
// TODO: once type information is flowed properly, enable this assert.
|
// TODO: once type information is flowed properly, enable this assertT.
|
||||||
// Some time later, we can remove the parameter.
|
// Some time later, we can remove the parameter.
|
||||||
// assert(&c, static_cast<Value*>(value)->type == type);
|
// assertT(&c, static_cast<Value*>(value)->type == type);
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2342,7 +2342,7 @@ class MyCompiler: public Compiler {
|
|||||||
|
|
||||||
virtual void popped(unsigned footprint) {
|
virtual void popped(unsigned footprint) {
|
||||||
for (; footprint; -- footprint) {
|
for (; footprint; -- footprint) {
|
||||||
assert(&c, c.stack->value == 0 or c.stack->value->home >= 0);
|
assertT(&c, c.stack->value == 0 or c.stack->value->home >= 0);
|
||||||
|
|
||||||
if (DebugFrame) {
|
if (DebugFrame) {
|
||||||
fprintf(stderr, "popped %p\n", c.stack->value);
|
fprintf(stderr, "popped %p\n", c.stack->value);
|
||||||
@ -2364,7 +2364,7 @@ class MyCompiler: public Compiler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (footprint > 1) {
|
if (footprint > 1) {
|
||||||
assert(&c, footprint == 2);
|
assertT(&c, footprint == 2);
|
||||||
|
|
||||||
bool bigEndian = c.arch->bigEndian();
|
bool bigEndian = c.arch->bigEndian();
|
||||||
|
|
||||||
@ -2379,7 +2379,7 @@ class MyCompiler: public Compiler {
|
|||||||
high = s->next;
|
high = s->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(&c, (TargetBytesPerWord == 8
|
assertT(&c, (TargetBytesPerWord == 8
|
||||||
and low->value->nextWord == low->value and high->value == 0)
|
and low->value->nextWord == low->value and high->value == 0)
|
||||||
or (TargetBytesPerWord == 4
|
or (TargetBytesPerWord == 4
|
||||||
and low->value->nextWord == high->value));
|
and low->value->nextWord == high->value));
|
||||||
@ -2455,13 +2455,13 @@ class MyCompiler: public Compiler {
|
|||||||
traceHandler,
|
traceHandler,
|
||||||
result,
|
result,
|
||||||
arguments);
|
arguments);
|
||||||
assert(&c, c.stack == b);
|
assertT(&c, c.stack == b);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void return_(ir::Value* a)
|
virtual void return_(ir::Value* a)
|
||||||
{
|
{
|
||||||
assert(&c, a);
|
assertT(&c, a);
|
||||||
appendReturn(&c, static_cast<Value*>(a));
|
appendReturn(&c, static_cast<Value*>(a));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2493,12 +2493,12 @@ class MyCompiler: public Compiler {
|
|||||||
{
|
{
|
||||||
unsigned footprint = typeFootprint(&c, type);
|
unsigned footprint = typeFootprint(&c, type);
|
||||||
|
|
||||||
assert(&c, index + footprint <= c.localFootprint);
|
assertT(&c, index + footprint <= c.localFootprint);
|
||||||
|
|
||||||
Value* v = value(&c, type);
|
Value* v = value(&c, type);
|
||||||
|
|
||||||
if (footprint > 1) {
|
if (footprint > 1) {
|
||||||
assert(&c, footprint == 2);
|
assertT(&c, footprint == 2);
|
||||||
|
|
||||||
unsigned highIndex;
|
unsigned highIndex;
|
||||||
unsigned lowIndex;
|
unsigned lowIndex;
|
||||||
@ -2534,7 +2534,7 @@ class MyCompiler: public Compiler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual void initLocalsFromLogicalIp(unsigned logicalIp) {
|
virtual void initLocalsFromLogicalIp(unsigned logicalIp) {
|
||||||
assert(&c, logicalIp < c.logicalCode.count());
|
assertT(&c, logicalIp < c.logicalCode.count());
|
||||||
|
|
||||||
unsigned footprint = sizeof(Local) * c.localFootprint;
|
unsigned footprint = sizeof(Local) * c.localFootprint;
|
||||||
Local* newLocals = static_cast<Local*>(c.zone->allocate(footprint));
|
Local* newLocals = static_cast<Local*>(c.zone->allocate(footprint));
|
||||||
@ -2565,7 +2565,7 @@ class MyCompiler: public Compiler {
|
|||||||
virtual void saveLocals() {
|
virtual void saveLocals() {
|
||||||
int oldIp UNUSED = c.logicalIp;
|
int oldIp UNUSED = c.logicalIp;
|
||||||
appendSaveLocals(&c);
|
appendSaveLocals(&c);
|
||||||
assert(&c, oldIp == c.logicalIp);
|
assertT(&c, oldIp == c.logicalIp);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void checkBounds(ir::Value* object,
|
virtual void checkBounds(ir::Value* object,
|
||||||
@ -2579,9 +2579,9 @@ class MyCompiler: public Compiler {
|
|||||||
|
|
||||||
virtual ir::Value* truncate(ir::Type type, ir::Value* src)
|
virtual ir::Value* truncate(ir::Type type, ir::Value* src)
|
||||||
{
|
{
|
||||||
assert(&c, src->type.flavor() == type.flavor());
|
assertT(&c, src->type.flavor() == type.flavor());
|
||||||
assert(&c, type.flavor() != ir::Type::Float);
|
assertT(&c, type.flavor() != ir::Type::Float);
|
||||||
assert(&c, type.rawSize() < src->type.rawSize());
|
assertT(&c, type.rawSize() < src->type.rawSize());
|
||||||
Value* dst = value(&c, type);
|
Value* dst = value(&c, type);
|
||||||
appendMove(&c,
|
appendMove(&c,
|
||||||
lir::Move,
|
lir::Move,
|
||||||
@ -2613,7 +2613,7 @@ class MyCompiler: public Compiler {
|
|||||||
|
|
||||||
virtual void store(ir::Value* src, ir::Value* dst)
|
virtual void store(ir::Value* src, ir::Value* dst)
|
||||||
{
|
{
|
||||||
assert(&c, src->type.flavor() == dst->type.flavor());
|
assertT(&c, src->type.flavor() == dst->type.flavor());
|
||||||
|
|
||||||
appendMove(&c,
|
appendMove(&c,
|
||||||
lir::Move,
|
lir::Move,
|
||||||
@ -2628,7 +2628,7 @@ class MyCompiler: public Compiler {
|
|||||||
ir::Value* src,
|
ir::Value* src,
|
||||||
ir::Type dstType)
|
ir::Type dstType)
|
||||||
{
|
{
|
||||||
assert(&c, src->type.flavor() == dstType.flavor());
|
assertT(&c, src->type.flavor() == dstType.flavor());
|
||||||
|
|
||||||
Value* dst = value(&c, dstType);
|
Value* dst = value(&c, dstType);
|
||||||
appendMove(&c,
|
appendMove(&c,
|
||||||
@ -2648,12 +2648,12 @@ class MyCompiler: public Compiler {
|
|||||||
ir::Value* b,
|
ir::Value* b,
|
||||||
ir::Value* addr)
|
ir::Value* addr)
|
||||||
{
|
{
|
||||||
assert(&c,
|
assertT(&c,
|
||||||
(isGeneralBranch(op) and isGeneralValue(a) and isGeneralValue(b))or(
|
(isGeneralBranch(op) and isGeneralValue(a) and isGeneralValue(b))or(
|
||||||
isFloatBranch(op) and isFloatValue(a) and isFloatValue(b)));
|
isFloatBranch(op) and isFloatValue(a) and isFloatValue(b)));
|
||||||
|
|
||||||
assert(&c, a->type == b->type);
|
assertT(&c, a->type == b->type);
|
||||||
assert(&c, addr->type == ir::Type::iptr());
|
assertT(&c, addr->type == ir::Type::iptr());
|
||||||
|
|
||||||
appendBranch(&c,
|
appendBranch(&c,
|
||||||
op,
|
op,
|
||||||
@ -2677,7 +2677,7 @@ class MyCompiler: public Compiler {
|
|||||||
ir::Value* a,
|
ir::Value* a,
|
||||||
ir::Value* b)
|
ir::Value* b)
|
||||||
{
|
{
|
||||||
assert(&c,
|
assertT(&c,
|
||||||
(isGeneralBinaryOp(op) and isGeneralValue(a) and isGeneralValue(b))
|
(isGeneralBinaryOp(op) and isGeneralValue(a) and isGeneralValue(b))
|
||||||
or(isFloatBinaryOp(op) and isFloatValue(a) and isFloatValue(b)));
|
or(isFloatBinaryOp(op) and isFloatValue(a) and isFloatValue(b)));
|
||||||
|
|
||||||
@ -2694,7 +2694,7 @@ class MyCompiler: public Compiler {
|
|||||||
virtual ir::Value* unaryOp(lir::BinaryOperation op,
|
virtual ir::Value* unaryOp(lir::BinaryOperation op,
|
||||||
ir::Value* a)
|
ir::Value* a)
|
||||||
{
|
{
|
||||||
assert(&c,
|
assertT(&c,
|
||||||
(isGeneralUnaryOp(op) and isGeneralValue(a))or(isFloatUnaryOp(op)
|
(isGeneralUnaryOp(op) and isGeneralValue(a))or(isFloatUnaryOp(op)
|
||||||
and isFloatValue(a)));
|
and isFloatValue(a)));
|
||||||
Value* result = value(&c, a->type);
|
Value* result = value(&c, a->type);
|
||||||
@ -2705,8 +2705,8 @@ class MyCompiler: public Compiler {
|
|||||||
|
|
||||||
virtual ir::Value* f2f(ir::Type resType, ir::Value* a)
|
virtual ir::Value* f2f(ir::Type resType, ir::Value* a)
|
||||||
{
|
{
|
||||||
assert(&c, isFloatValue(a));
|
assertT(&c, isFloatValue(a));
|
||||||
assert(&c, resType.flavor() == ir::Type::Float);
|
assertT(&c, resType.flavor() == ir::Type::Float);
|
||||||
Value* result = value(&c, resType);
|
Value* result = value(&c, resType);
|
||||||
appendTranslate(&c,
|
appendTranslate(&c,
|
||||||
lir::Float2Float,
|
lir::Float2Float,
|
||||||
@ -2717,8 +2717,8 @@ class MyCompiler: public Compiler {
|
|||||||
|
|
||||||
virtual ir::Value* f2i(ir::Type resType, ir::Value* a)
|
virtual ir::Value* f2i(ir::Type resType, ir::Value* a)
|
||||||
{
|
{
|
||||||
assert(&c, isFloatValue(a));
|
assertT(&c, isFloatValue(a));
|
||||||
assert(&c, resType.flavor() != ir::Type::Float);
|
assertT(&c, resType.flavor() != ir::Type::Float);
|
||||||
Value* result = value(&c, resType);
|
Value* result = value(&c, resType);
|
||||||
appendTranslate(&c,
|
appendTranslate(&c,
|
||||||
lir::Float2Int,
|
lir::Float2Int,
|
||||||
@ -2729,8 +2729,8 @@ class MyCompiler: public Compiler {
|
|||||||
|
|
||||||
virtual ir::Value* i2f(ir::Type resType, ir::Value* a)
|
virtual ir::Value* i2f(ir::Type resType, ir::Value* a)
|
||||||
{
|
{
|
||||||
assert(&c, isGeneralValue(a));
|
assertT(&c, isGeneralValue(a));
|
||||||
assert(&c, resType.flavor() == ir::Type::Float);
|
assertT(&c, resType.flavor() == ir::Type::Float);
|
||||||
Value* result = value(&c, resType);
|
Value* result = value(&c, resType);
|
||||||
appendTranslate(&c,
|
appendTranslate(&c,
|
||||||
lir::Int2Float,
|
lir::Int2Float,
|
||||||
|
@ -73,7 +73,7 @@ class LogicalCode {
|
|||||||
= logicalCode.cloneAndSet(zone, logicalCode.count + more, 0);
|
= logicalCode.cloneAndSet(zone, logicalCode.count + more, 0);
|
||||||
|
|
||||||
for (size_t i = 0; i < logicalCode.count; i++) {
|
for (size_t i = 0; i < logicalCode.count; i++) {
|
||||||
assert((vm::System*)0, logicalCode[i] == newCode[i]);
|
assertT((vm::System*)0, logicalCode[i] == newCode[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
logicalCode = newCode;
|
logicalCode = newCode;
|
||||||
|
@ -235,7 +235,7 @@ Value* slicePushWord(Context* c,
|
|||||||
|
|
||||||
size_t index UNUSED = slice.count;
|
size_t index UNUSED = slice.count;
|
||||||
|
|
||||||
assert(c, slice.count < slice.capacity);
|
assertT(c, slice.count < slice.capacity);
|
||||||
slice.push(v);
|
slice.push(v);
|
||||||
|
|
||||||
// if (DebugFrame) {
|
// if (DebugFrame) {
|
||||||
@ -255,7 +255,7 @@ void slicePush(Context* c,
|
|||||||
size_t stackBase,
|
size_t stackBase,
|
||||||
SliceStack<ir::Value*>& slice)
|
SliceStack<ir::Value*>& slice)
|
||||||
{
|
{
|
||||||
assert(c, footprint);
|
assertT(c, footprint);
|
||||||
|
|
||||||
bool bigEndian = c->arch->bigEndian();
|
bool bigEndian = c->arch->bigEndian();
|
||||||
|
|
||||||
@ -267,7 +267,7 @@ void slicePush(Context* c,
|
|||||||
|
|
||||||
Value* high;
|
Value* high;
|
||||||
if (footprint > 1) {
|
if (footprint > 1) {
|
||||||
assert(c, footprint == 2);
|
assertT(c, footprint == 2);
|
||||||
|
|
||||||
if (c->targetInfo.pointerSize == 4) {
|
if (c->targetInfo.pointerSize == 4) {
|
||||||
low->maybeSplit(c);
|
low->maybeSplit(c);
|
||||||
@ -315,8 +315,8 @@ class CallEvent: public Event {
|
|||||||
uint32_t registerMask = c->regFile->generalRegisters.mask;
|
uint32_t registerMask = c->regFile->generalRegisters.mask;
|
||||||
|
|
||||||
if (callingConvention == ir::NativeCallingConvention) {
|
if (callingConvention == ir::NativeCallingConvention) {
|
||||||
assert(c, (flags & Compiler::TailJump) == 0);
|
assertT(c, (flags & Compiler::TailJump) == 0);
|
||||||
assert(c, stackArgumentFootprint == 0);
|
assertT(c, stackArgumentFootprint == 0);
|
||||||
|
|
||||||
unsigned index = 0;
|
unsigned index = 0;
|
||||||
unsigned argumentIndex = 0;
|
unsigned argumentIndex = 0;
|
||||||
@ -381,7 +381,7 @@ class CallEvent: public Event {
|
|||||||
op,
|
op,
|
||||||
&thunk);
|
&thunk);
|
||||||
|
|
||||||
assert(c, not thunk);
|
assertT(c, not thunk);
|
||||||
|
|
||||||
this->addRead(c, address, SiteMask
|
this->addRead(c, address, SiteMask
|
||||||
(op.typeMask, registerMask & op.registerMask, AnyFrameIndex));
|
(op.typeMask, registerMask & op.registerMask, AnyFrameIndex));
|
||||||
@ -399,7 +399,7 @@ class CallEvent: public Event {
|
|||||||
if ((c->targetInfo.pointerSize == 8
|
if ((c->targetInfo.pointerSize == 8
|
||||||
&& (v == 0 || (i >= 1 && arguments[i - 1] == 0)))
|
&& (v == 0 || (i >= 1 && arguments[i - 1] == 0)))
|
||||||
|| (c->targetInfo.pointerSize == 4 && v->nextWord != v)) {
|
|| (c->targetInfo.pointerSize == 4 && v->nextWord != v)) {
|
||||||
assert(c,
|
assertT(c,
|
||||||
c->targetInfo.pointerSize == 8
|
c->targetInfo.pointerSize == 8
|
||||||
or v->nextWord == arguments[i - 1]);
|
or v->nextWord == arguments[i - 1]);
|
||||||
|
|
||||||
@ -414,7 +414,7 @@ class CallEvent: public Event {
|
|||||||
int frameOffset;
|
int frameOffset;
|
||||||
|
|
||||||
if (TailCalls and (flags & Compiler::TailJump)) {
|
if (TailCalls and (flags & Compiler::TailJump)) {
|
||||||
assert(c, arguments.count == 0);
|
assertT(c, arguments.count == 0);
|
||||||
|
|
||||||
int base = frameBase(c);
|
int base = frameBase(c);
|
||||||
returnAddressIndex = base + c->arch->returnAddressOffset();
|
returnAddressIndex = base + c->arch->returnAddressOffset();
|
||||||
@ -467,7 +467,7 @@ class CallEvent: public Event {
|
|||||||
- c->arch->frameFooterSize()
|
- c->arch->frameFooterSize()
|
||||||
- stackArgumentIndex;
|
- stackArgumentIndex;
|
||||||
|
|
||||||
assert(c, static_cast<int>(popIndex) >= 0);
|
assertT(c, static_cast<int>(popIndex) >= 0);
|
||||||
|
|
||||||
while (stack) {
|
while (stack) {
|
||||||
if (stack->value) {
|
if (stack->value) {
|
||||||
@ -512,9 +512,9 @@ class CallEvent: public Event {
|
|||||||
op = lir::Jump;
|
op = lir::Jump;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, returnAddressSurrogate == 0
|
assertT(c, returnAddressSurrogate == 0
|
||||||
or returnAddressSurrogate->source->type(c) == lir::RegisterOperand);
|
or returnAddressSurrogate->source->type(c) == lir::RegisterOperand);
|
||||||
assert(c, framePointerSurrogate == 0
|
assertT(c, framePointerSurrogate == 0
|
||||||
or framePointerSurrogate->source->type(c) == lir::RegisterOperand);
|
or framePointerSurrogate->source->type(c) == lir::RegisterOperand);
|
||||||
|
|
||||||
int ras;
|
int ras;
|
||||||
@ -677,7 +677,7 @@ class MoveEvent: public Event {
|
|||||||
dstSize(dstSize),
|
dstSize(dstSize),
|
||||||
dstValue(dstValue)
|
dstValue(dstValue)
|
||||||
{
|
{
|
||||||
assert(c, srcSelectSize <= srcSize);
|
assertT(c, srcSelectSize <= srcSize);
|
||||||
|
|
||||||
bool noop = srcSelectSize >= dstSize;
|
bool noop = srcSelectSize >= dstSize;
|
||||||
|
|
||||||
@ -783,11 +783,11 @@ class MoveEvent: public Event {
|
|||||||
dstValue,
|
dstValue,
|
||||||
dstLowMask);
|
dstLowMask);
|
||||||
} else {
|
} else {
|
||||||
assert(c, srcSize == c->targetInfo.pointerSize);
|
assertT(c, srcSize == c->targetInfo.pointerSize);
|
||||||
assert(c, srcSelectSize == c->targetInfo.pointerSize);
|
assertT(c, srcSelectSize == c->targetInfo.pointerSize);
|
||||||
|
|
||||||
if (dstValue->nextWord->target or live(c, dstValue->nextWord)) {
|
if (dstValue->nextWord->target or live(c, dstValue->nextWord)) {
|
||||||
assert(c, dstLowMask.typeMask & (1 << lir::RegisterOperand));
|
assertT(c, dstLowMask.typeMask & (1 << lir::RegisterOperand));
|
||||||
|
|
||||||
Site* low = freeRegisterSite(c, dstLowMask.registerMask);
|
Site* low = freeRegisterSite(c, dstLowMask.registerMask);
|
||||||
|
|
||||||
@ -817,7 +817,7 @@ class MoveEvent: public Event {
|
|||||||
|
|
||||||
srcValue->source->thaw(c, srcValue);
|
srcValue->source->thaw(c, srcValue);
|
||||||
|
|
||||||
assert(c, dstHighMask.typeMask & (1 << lir::RegisterOperand));
|
assertT(c, dstHighMask.typeMask & (1 << lir::RegisterOperand));
|
||||||
|
|
||||||
Site* high = freeRegisterSite(c, dstHighMask.registerMask);
|
Site* high = freeRegisterSite(c, dstHighMask.registerMask);
|
||||||
|
|
||||||
@ -877,7 +877,7 @@ void appendMove(Context* c,
|
|||||||
|
|
||||||
c->arch->planSource(op, srcSelectSize, src, dstSize, &thunk);
|
c->arch->planSource(op, srcSelectSize, src, dstSize, &thunk);
|
||||||
|
|
||||||
assert(c, not thunk);
|
assertT(c, not thunk);
|
||||||
|
|
||||||
append(c,
|
append(c,
|
||||||
new (c->zone) MoveEvent(c,
|
new (c->zone) MoveEvent(c,
|
||||||
@ -911,7 +911,7 @@ thawSource(Context* c, unsigned size, Value* v)
|
|||||||
}
|
}
|
||||||
|
|
||||||
Read* liveNext(Context* c, Value* v) {
|
Read* liveNext(Context* c, Value* v) {
|
||||||
assert(c, v->buddy->hasBuddy(c, v));
|
assertT(c, v->buddy->hasBuddy(c, v));
|
||||||
|
|
||||||
Read* r = v->reads->next(c);
|
Read* r = v->reads->next(c);
|
||||||
if (valid(r)) return r;
|
if (valid(r)) return r;
|
||||||
@ -1004,7 +1004,7 @@ class CombineEvent: public Event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual void compile(Context* c) {
|
virtual void compile(Context* c) {
|
||||||
assert(c, firstValue->source->type(c) == firstValue->nextWord->source->type(c));
|
assertT(c, firstValue->source->type(c) == firstValue->nextWord->source->type(c));
|
||||||
|
|
||||||
// if (secondValue->source->type(c) != secondValue->nextWord->source->type(c)) {
|
// if (secondValue->source->type(c) != secondValue->nextWord->source->type(c)) {
|
||||||
// fprintf(stderr, "%p %p %d : %p %p %d\n",
|
// fprintf(stderr, "%p %p %d : %p %p %d\n",
|
||||||
@ -1013,7 +1013,7 @@ class CombineEvent: public Event {
|
|||||||
// secondValue->nextWord->source->type(c));
|
// secondValue->nextWord->source->type(c));
|
||||||
// }
|
// }
|
||||||
|
|
||||||
assert(c, secondValue->source->type(c) == secondValue->nextWord->source->type(c));
|
assertT(c, secondValue->source->type(c) == secondValue->nextWord->source->type(c));
|
||||||
|
|
||||||
freezeSource(c, firstValue->type.size(c->targetInfo), firstValue);
|
freezeSource(c, firstValue->type.size(c->targetInfo), firstValue);
|
||||||
|
|
||||||
@ -1197,7 +1197,7 @@ class TranslateEvent: public Event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual void compile(Context* c) {
|
virtual void compile(Context* c) {
|
||||||
assert(c, firstValue->source->type(c) == firstValue->nextWord->source->type(c));
|
assertT(c, firstValue->source->type(c) == firstValue->nextWord->source->type(c));
|
||||||
|
|
||||||
OperandMask bMask;
|
OperandMask bMask;
|
||||||
|
|
||||||
@ -1265,10 +1265,10 @@ void appendTranslate(Context* c,
|
|||||||
Value* firstValue,
|
Value* firstValue,
|
||||||
Value* resultValue)
|
Value* resultValue)
|
||||||
{
|
{
|
||||||
assert(c,
|
assertT(c,
|
||||||
firstValue->type.size(c->targetInfo)
|
firstValue->type.size(c->targetInfo)
|
||||||
== firstValue->type.size(c->targetInfo));
|
== firstValue->type.size(c->targetInfo));
|
||||||
assert(c,
|
assertT(c,
|
||||||
resultValue->type.size(c->targetInfo)
|
resultValue->type.size(c->targetInfo)
|
||||||
== resultValue->type.size(c->targetInfo));
|
== resultValue->type.size(c->targetInfo));
|
||||||
|
|
||||||
@ -1391,13 +1391,13 @@ class MemoryEvent: public Event {
|
|||||||
displacement += (constant->value->value() * scale);
|
displacement += (constant->value->value() * scale);
|
||||||
scale = 1;
|
scale = 1;
|
||||||
} else {
|
} else {
|
||||||
assert(c, index->source->type(c) == lir::RegisterOperand);
|
assertT(c, index->source->type(c) == lir::RegisterOperand);
|
||||||
indexRegister = static_cast<RegisterSite*>(index->source)->number;
|
indexRegister = static_cast<RegisterSite*>(index->source)->number;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
indexRegister = lir::NoRegister;
|
indexRegister = lir::NoRegister;
|
||||||
}
|
}
|
||||||
assert(c, base->source->type(c) == lir::RegisterOperand);
|
assertT(c, base->source->type(c) == lir::RegisterOperand);
|
||||||
int baseRegister = static_cast<RegisterSite*>(base->source)->number;
|
int baseRegister = static_cast<RegisterSite*>(base->source)->number;
|
||||||
|
|
||||||
popRead(c, this, base);
|
popRead(c, this, base);
|
||||||
@ -1693,7 +1693,7 @@ void appendBranch(Context* c,
|
|||||||
firstValue->type.size(c->targetInfo),
|
firstValue->type.size(c->targetInfo),
|
||||||
&threadParameter);
|
&threadParameter);
|
||||||
|
|
||||||
assert(c, not threadParameter);
|
assertT(c, not threadParameter);
|
||||||
|
|
||||||
slicePush(c,
|
slicePush(c,
|
||||||
ceilingDivide(firstValue->type.size(c->targetInfo),
|
ceilingDivide(firstValue->type.size(c->targetInfo),
|
||||||
@ -1786,7 +1786,7 @@ class JumpEvent: public Event {
|
|||||||
OperandMask mask;
|
OperandMask mask;
|
||||||
c->arch->plan(op, c->targetInfo.pointerSize, mask, &thunk);
|
c->arch->plan(op, c->targetInfo.pointerSize, mask, &thunk);
|
||||||
|
|
||||||
assert(c, not thunk);
|
assertT(c, not thunk);
|
||||||
|
|
||||||
this->addRead(c, address, SiteMask::lowPart(mask));
|
this->addRead(c, address, SiteMask::lowPart(mask));
|
||||||
}
|
}
|
||||||
@ -1881,7 +1881,7 @@ class BoundsCheckEvent: public Event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (constant == 0 or constant->value->value() >= 0) {
|
if (constant == 0 or constant->value->value() >= 0) {
|
||||||
assert(c, object->source->type(c) == lir::RegisterOperand);
|
assertT(c, object->source->type(c) == lir::RegisterOperand);
|
||||||
MemorySite length(static_cast<RegisterSite*>(object->source)->number,
|
MemorySite length(static_cast<RegisterSite*>(object->source)->number,
|
||||||
lengthOffset, lir::NoRegister, 1);
|
lengthOffset, lir::NoRegister, 1);
|
||||||
length.acquired = true;
|
length.acquired = true;
|
||||||
|
@ -26,7 +26,7 @@ unsigned totalFrameSize(Context* c) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int frameIndex(Context* c, int localIndex) {
|
int frameIndex(Context* c, int localIndex) {
|
||||||
assert(c, localIndex >= 0);
|
assertT(c, localIndex >= 0);
|
||||||
|
|
||||||
int index = c->alignedFrameSize + c->parameterFootprint - localIndex - 1;
|
int index = c->alignedFrameSize + c->parameterFootprint - localIndex - 1;
|
||||||
|
|
||||||
@ -36,23 +36,23 @@ int frameIndex(Context* c, int localIndex) {
|
|||||||
index -= c->arch->frameFooterSize();
|
index -= c->arch->frameFooterSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, index >= 0);
|
assertT(c, index >= 0);
|
||||||
assert(c, static_cast<unsigned>(index) < totalFrameSize(c));
|
assertT(c, static_cast<unsigned>(index) < totalFrameSize(c));
|
||||||
|
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned frameIndexToOffset(Context* c, unsigned frameIndex) {
|
unsigned frameIndexToOffset(Context* c, unsigned frameIndex) {
|
||||||
assert(c, frameIndex < totalFrameSize(c));
|
assertT(c, frameIndex < totalFrameSize(c));
|
||||||
|
|
||||||
return (frameIndex + c->arch->frameFooterSize()) * c->targetInfo.pointerSize;
|
return (frameIndex + c->arch->frameFooterSize()) * c->targetInfo.pointerSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned offsetToFrameIndex(Context* c, unsigned offset) {
|
unsigned offsetToFrameIndex(Context* c, unsigned offset) {
|
||||||
assert(c,
|
assertT(c,
|
||||||
static_cast<int>((offset / c->targetInfo.pointerSize)
|
static_cast<int>((offset / c->targetInfo.pointerSize)
|
||||||
- c->arch->frameFooterSize()) >= 0);
|
- c->arch->frameFooterSize()) >= 0);
|
||||||
assert(c,
|
assertT(c,
|
||||||
((offset / c->targetInfo.pointerSize) - c->arch->frameFooterSize())
|
((offset / c->targetInfo.pointerSize) - c->arch->frameFooterSize())
|
||||||
< totalFrameSize(c));
|
< totalFrameSize(c));
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ bool SingleRead::valid() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void SingleRead::append(Context* c UNUSED, Read* r) {
|
void SingleRead::append(Context* c UNUSED, Read* r) {
|
||||||
assert(c, next_ == 0);
|
assertT(c, next_ == 0);
|
||||||
next_ = r;
|
next_ = r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,7 +170,7 @@ bool StubRead::valid() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void StubRead::append(Context* c UNUSED, Read* r) {
|
void StubRead::append(Context* c UNUSED, Read* r) {
|
||||||
assert(c, next_ == 0);
|
assertT(c, next_ == 0);
|
||||||
next_ = r;
|
next_ = r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,7 +181,7 @@ Read* StubRead::next(Context*) {
|
|||||||
|
|
||||||
|
|
||||||
SingleRead* read(Context* c, const SiteMask& mask, Value* successor) {
|
SingleRead* read(Context* c, const SiteMask& mask, Value* successor) {
|
||||||
assert(c, (mask.typeMask != 1 << lir::MemoryOperand) or mask.frameIndex >= 0);
|
assertT(c, (mask.typeMask != 1 << lir::MemoryOperand) or mask.frameIndex >= 0);
|
||||||
|
|
||||||
return new(c->zone) SingleRead(mask, successor);
|
return new(c->zone) SingleRead(mask, successor);
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ resourceCost(Context* c, Value* v, Resource* r, SiteMask mask,
|
|||||||
costCalculator ? costCalculator->cost(c, mask) : 0;
|
costCalculator ? costCalculator->cost(c, mask) : 0;
|
||||||
|
|
||||||
if (r->value) {
|
if (r->value) {
|
||||||
assert(c, r->value->findSite(r->site));
|
assertT(c, r->value->findSite(r->site));
|
||||||
|
|
||||||
if (v and r->value->isBuddyOf(v)) {
|
if (v and r->value->isBuddyOf(v)) {
|
||||||
return baseCost;
|
return baseCost;
|
||||||
@ -285,7 +285,7 @@ pickTarget(Context* c, Read* read, bool intersectRead,
|
|||||||
// memory isn't an option - try harder to find an available frame
|
// memory isn't an option - try harder to find an available frame
|
||||||
// site:
|
// site:
|
||||||
best = pickAnyFrameTarget(c, value, costCalculator);
|
best = pickAnyFrameTarget(c, value, costCalculator);
|
||||||
assert(c, best.cost <= 3);
|
assertT(c, best.cost <= 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (best.cost == Target::Impossible) {
|
if (best.cost == Target::Impossible) {
|
||||||
|
@ -22,7 +22,7 @@ const bool DebugResources = false;
|
|||||||
void steal(Context* c, Resource* r, Value* thief);
|
void steal(Context* c, Resource* r, Value* thief);
|
||||||
|
|
||||||
void decrementAvailableGeneralRegisterCount(Context* c) {
|
void decrementAvailableGeneralRegisterCount(Context* c) {
|
||||||
assert(c, c->availableGeneralRegisterCount);
|
assertT(c, c->availableGeneralRegisterCount);
|
||||||
-- c->availableGeneralRegisterCount;
|
-- c->availableGeneralRegisterCount;
|
||||||
|
|
||||||
if (DebugResources) {
|
if (DebugResources) {
|
||||||
@ -56,7 +56,7 @@ void thawResource(Context* c, Resource* r, Value* v) {
|
|||||||
fprintf(stderr, "%p thaw %s to %d\n", v, buffer, r->freezeCount - 1);
|
fprintf(stderr, "%p thaw %s to %d\n", v, buffer, r->freezeCount - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, r->freezeCount);
|
assertT(c, r->freezeCount);
|
||||||
|
|
||||||
-- r->freezeCount;
|
-- r->freezeCount;
|
||||||
}
|
}
|
||||||
@ -128,7 +128,7 @@ void RegisterResource::decrement(Context* c) {
|
|||||||
fprintf(stderr, "decrement %s to %d\n", buffer, this->referenceCount - 1);
|
fprintf(stderr, "decrement %s to %d\n", buffer, this->referenceCount - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, this->referenceCount > 0);
|
assertT(c, this->referenceCount > 0);
|
||||||
|
|
||||||
-- this->referenceCount;
|
-- this->referenceCount;
|
||||||
|
|
||||||
@ -160,8 +160,8 @@ unsigned FrameResource::index(Context* c) {
|
|||||||
|
|
||||||
|
|
||||||
void acquire(Context* c, Resource* resource, Value* value, Site* site) {
|
void acquire(Context* c, Resource* resource, Value* value, Site* site) {
|
||||||
assert(c, value);
|
assertT(c, value);
|
||||||
assert(c, site);
|
assertT(c, site);
|
||||||
|
|
||||||
if (not resource->reserved) {
|
if (not resource->reserved) {
|
||||||
if (DebugResources) {
|
if (DebugResources) {
|
||||||
@ -170,8 +170,8 @@ void acquire(Context* c, Resource* resource, Value* value, Site* site) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (resource->value) {
|
if (resource->value) {
|
||||||
assert(c, resource->value->findSite(resource->site));
|
assertT(c, resource->value->findSite(resource->site));
|
||||||
assert(c, not value->findSite(resource->site));
|
assertT(c, not value->findSite(resource->site));
|
||||||
|
|
||||||
steal(c, resource, value);
|
steal(c, resource, value);
|
||||||
}
|
}
|
||||||
@ -194,11 +194,11 @@ void release(Context* c, Resource* resource, Value* value UNUSED, Site* site UNU
|
|||||||
fprintf(stderr, "%p release %s\n", resource->value, buffer);
|
fprintf(stderr, "%p release %s\n", resource->value, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, resource->value);
|
assertT(c, resource->value);
|
||||||
assert(c, resource->site);
|
assertT(c, resource->site);
|
||||||
|
|
||||||
assert(c, resource->value->isBuddyOf(value));
|
assertT(c, resource->value->isBuddyOf(value));
|
||||||
assert(c, site == resource->site);
|
assertT(c, site == resource->site);
|
||||||
|
|
||||||
Resource* next = resource->nextAcquired;
|
Resource* next = resource->nextAcquired;
|
||||||
if (next) {
|
if (next) {
|
||||||
@ -211,7 +211,7 @@ void release(Context* c, Resource* resource, Value* value UNUSED, Site* site UNU
|
|||||||
previous->nextAcquired = next;
|
previous->nextAcquired = next;
|
||||||
resource->previousAcquired = 0;
|
resource->previousAcquired = 0;
|
||||||
} else {
|
} else {
|
||||||
assert(c, c->acquiredResources == resource);
|
assertT(c, c->acquiredResources == resource);
|
||||||
c->acquiredResources = next;
|
c->acquiredResources = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,7 +154,7 @@ class AddressSite: public Site {
|
|||||||
virtual void asAssemblerOperand(Context* c UNUSED, Site* high UNUSED,
|
virtual void asAssemblerOperand(Context* c UNUSED, Site* high UNUSED,
|
||||||
lir::Operand* result)
|
lir::Operand* result)
|
||||||
{
|
{
|
||||||
assert(c, high == this);
|
assertT(c, high == this);
|
||||||
|
|
||||||
new (result) lir::Address(address);
|
new (result) lir::Address(address);
|
||||||
}
|
}
|
||||||
@ -205,7 +205,7 @@ unsigned RegisterSite::toString(Context*, char* buffer, unsigned bufferSize) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsigned RegisterSite::copyCost(Context* c, Site* s) {
|
unsigned RegisterSite::copyCost(Context* c, Site* s) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
if (s and
|
if (s and
|
||||||
(this == s or
|
(this == s or
|
||||||
@ -219,7 +219,7 @@ unsigned RegisterSite::copyCost(Context* c, Site* s) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask) {
|
bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
if ((mask.typeMask & (1 << lir::RegisterOperand))) {
|
if ((mask.typeMask & (1 << lir::RegisterOperand))) {
|
||||||
return ((static_cast<uint64_t>(1) << number) & mask.registerMask);
|
return ((static_cast<uint64_t>(1) << number) & mask.registerMask);
|
||||||
@ -229,7 +229,7 @@ bool RegisterSite::match(Context* c UNUSED, const SiteMask& mask) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask) {
|
bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
if ((mask.typeMask & (1 << lir::RegisterOperand))) {
|
if ((mask.typeMask & (1 << lir::RegisterOperand))) {
|
||||||
return ((static_cast<uint64_t>(1) << number) == mask.registerMask);
|
return ((static_cast<uint64_t>(1) << number) == mask.registerMask);
|
||||||
@ -239,7 +239,7 @@ bool RegisterSite::loneMatch(Context* c UNUSED, const SiteMask& mask) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool RegisterSite::matchNextWord(Context* c, Site* s, unsigned) {
|
bool RegisterSite::matchNextWord(Context* c, Site* s, unsigned) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
if (s->type(c) != lir::RegisterOperand) {
|
if (s->type(c) != lir::RegisterOperand) {
|
||||||
return false;
|
return false;
|
||||||
@ -248,7 +248,7 @@ bool RegisterSite::matchNextWord(Context* c, Site* s, unsigned) {
|
|||||||
RegisterSite* rs = static_cast<RegisterSite*>(s);
|
RegisterSite* rs = static_cast<RegisterSite*>(s);
|
||||||
unsigned size = rs->registerSize(c);
|
unsigned size = rs->registerSize(c);
|
||||||
if (size > c->targetInfo.pointerSize) {
|
if (size > c->targetInfo.pointerSize) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
return number == rs->number;
|
return number == rs->number;
|
||||||
} else {
|
} else {
|
||||||
uint32_t mask = c->regFile->generalRegisters.mask;
|
uint32_t mask = c->regFile->generalRegisters.mask;
|
||||||
@ -272,25 +272,25 @@ void RegisterSite::acquire(Context* c, Value* v) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void RegisterSite::release(Context* c, Value* v) {
|
void RegisterSite::release(Context* c, Value* v) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
compiler::release(c, c->registerResources + number, v, this);
|
compiler::release(c, c->registerResources + number, v, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RegisterSite::freeze(Context* c, Value* v) {
|
void RegisterSite::freeze(Context* c, Value* v) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
c->registerResources[number].freeze(c, v);
|
c->registerResources[number].freeze(c, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RegisterSite::thaw(Context* c, Value* v) {
|
void RegisterSite::thaw(Context* c, Value* v) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
c->registerResources[number].thaw(c, v);
|
c->registerResources[number].thaw(c, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RegisterSite::frozen(Context* c UNUSED) {
|
bool RegisterSite::frozen(Context* c UNUSED) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
return c->registerResources[number].freezeCount != 0;
|
return c->registerResources[number].freezeCount != 0;
|
||||||
}
|
}
|
||||||
@ -302,12 +302,12 @@ lir::OperandType RegisterSite::type(Context*) {
|
|||||||
void RegisterSite::asAssemblerOperand(Context* c UNUSED, Site* high,
|
void RegisterSite::asAssemblerOperand(Context* c UNUSED, Site* high,
|
||||||
lir::Operand* result)
|
lir::Operand* result)
|
||||||
{
|
{
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
int highNumber;
|
int highNumber;
|
||||||
if (high != this) {
|
if (high != this) {
|
||||||
highNumber = static_cast<RegisterSite*>(high)->number;
|
highNumber = static_cast<RegisterSite*>(high)->number;
|
||||||
assert(c, highNumber != lir::NoRegister);
|
assertT(c, highNumber != lir::NoRegister);
|
||||||
} else {
|
} else {
|
||||||
highNumber = lir::NoRegister;
|
highNumber = lir::NoRegister;
|
||||||
}
|
}
|
||||||
@ -336,8 +336,8 @@ Site* RegisterSite::copyHigh(Context* c) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Site* RegisterSite::makeNextWord(Context* c, unsigned) {
|
Site* RegisterSite::makeNextWord(Context* c, unsigned) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
assert(c, ((1 << number) & c->regFile->generalRegisters.mask));
|
assertT(c, ((1 << number) & c->regFile->generalRegisters.mask));
|
||||||
|
|
||||||
return freeRegisterSite(c, c->regFile->generalRegisters.mask);
|
return freeRegisterSite(c, c->regFile->generalRegisters.mask);
|
||||||
}
|
}
|
||||||
@ -347,7 +347,7 @@ SiteMask RegisterSite::mask(Context* c UNUSED) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
SiteMask RegisterSite::nextWordMask(Context* c, unsigned) {
|
SiteMask RegisterSite::nextWordMask(Context* c, unsigned) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
if (registerSize(c) > c->targetInfo.pointerSize) {
|
if (registerSize(c) > c->targetInfo.pointerSize) {
|
||||||
return SiteMask
|
return SiteMask
|
||||||
@ -359,7 +359,7 @@ SiteMask RegisterSite::nextWordMask(Context* c, unsigned) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsigned RegisterSite::registerSize(Context* c) {
|
unsigned RegisterSite::registerSize(Context* c) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
if ((1 << number) & c->regFile->floatRegisters.mask) {
|
if ((1 << number) & c->regFile->floatRegisters.mask) {
|
||||||
return c->arch->floatRegisterSize();
|
return c->arch->floatRegisterSize();
|
||||||
@ -369,7 +369,7 @@ unsigned RegisterSite::registerSize(Context* c) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsigned RegisterSite::registerMask(Context* c UNUSED) {
|
unsigned RegisterSite::registerMask(Context* c UNUSED) {
|
||||||
assert(c, number != lir::NoRegister);
|
assertT(c, number != lir::NoRegister);
|
||||||
|
|
||||||
return 1 << number;
|
return 1 << number;
|
||||||
}
|
}
|
||||||
@ -377,8 +377,8 @@ unsigned RegisterSite::registerMask(Context* c UNUSED) {
|
|||||||
|
|
||||||
|
|
||||||
Site* registerSite(Context* c, int number) {
|
Site* registerSite(Context* c, int number) {
|
||||||
assert(c, number >= 0);
|
assertT(c, number >= 0);
|
||||||
assert(c, (1 << number) & (c->regFile->generalRegisters.mask
|
assertT(c, (1 << number) & (c->regFile->generalRegisters.mask
|
||||||
| c->regFile->floatRegisters.mask));
|
| c->regFile->floatRegisters.mask));
|
||||||
|
|
||||||
return new(c->zone) RegisterSite(1 << number, number);
|
return new(c->zone) RegisterSite(1 << number, number);
|
||||||
@ -402,7 +402,7 @@ unsigned MemorySite::toString(Context*, char* buffer, unsigned bufferSize) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsigned MemorySite::copyCost(Context* c, Site* s) {
|
unsigned MemorySite::copyCost(Context* c, Site* s) {
|
||||||
assert(c, acquired);
|
assertT(c, acquired);
|
||||||
|
|
||||||
if (s and
|
if (s and
|
||||||
(this == s or
|
(this == s or
|
||||||
@ -426,12 +426,12 @@ bool MemorySite::conflicts(const SiteMask& mask) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool MemorySite::match(Context* c, const SiteMask& mask) {
|
bool MemorySite::match(Context* c, const SiteMask& mask) {
|
||||||
assert(c, acquired);
|
assertT(c, acquired);
|
||||||
|
|
||||||
if (mask.typeMask & (1 << lir::MemoryOperand)) {
|
if (mask.typeMask & (1 << lir::MemoryOperand)) {
|
||||||
if (mask.frameIndex >= 0) {
|
if (mask.frameIndex >= 0) {
|
||||||
if (base == c->arch->stack()) {
|
if (base == c->arch->stack()) {
|
||||||
assert(c, index == lir::NoRegister);
|
assertT(c, index == lir::NoRegister);
|
||||||
return static_cast<int>(frameIndexToOffset(c, mask.frameIndex))
|
return static_cast<int>(frameIndexToOffset(c, mask.frameIndex))
|
||||||
== offset;
|
== offset;
|
||||||
} else {
|
} else {
|
||||||
@ -446,11 +446,11 @@ bool MemorySite::match(Context* c, const SiteMask& mask) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool MemorySite::loneMatch(Context* c, const SiteMask& mask) {
|
bool MemorySite::loneMatch(Context* c, const SiteMask& mask) {
|
||||||
assert(c, acquired);
|
assertT(c, acquired);
|
||||||
|
|
||||||
if (mask.typeMask & (1 << lir::MemoryOperand)) {
|
if (mask.typeMask & (1 << lir::MemoryOperand)) {
|
||||||
if (base == c->arch->stack()) {
|
if (base == c->arch->stack()) {
|
||||||
assert(c, index == lir::NoRegister);
|
assertT(c, index == lir::NoRegister);
|
||||||
|
|
||||||
if (mask.frameIndex == AnyFrameIndex) {
|
if (mask.frameIndex == AnyFrameIndex) {
|
||||||
return false;
|
return false;
|
||||||
@ -487,8 +487,8 @@ void MemorySite::acquire(Context* c, Value* v) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (base == c->arch->stack()) {
|
if (base == c->arch->stack()) {
|
||||||
assert(c, index == lir::NoRegister);
|
assertT(c, index == lir::NoRegister);
|
||||||
assert
|
assertT
|
||||||
(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
|
(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
|
||||||
|
|
||||||
compiler::acquire
|
compiler::acquire
|
||||||
@ -500,8 +500,8 @@ void MemorySite::acquire(Context* c, Value* v) {
|
|||||||
|
|
||||||
void MemorySite::release(Context* c, Value* v) {
|
void MemorySite::release(Context* c, Value* v) {
|
||||||
if (base == c->arch->stack()) {
|
if (base == c->arch->stack()) {
|
||||||
assert(c, index == lir::NoRegister);
|
assertT(c, index == lir::NoRegister);
|
||||||
assert
|
assertT
|
||||||
(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
|
(c, not c->frameResources[offsetToFrameIndex(c, offset)].reserved);
|
||||||
|
|
||||||
compiler::release
|
compiler::release
|
||||||
@ -551,7 +551,7 @@ void MemorySite::asAssemblerOperand(Context* c UNUSED, Site* high UNUSED,
|
|||||||
lir::Operand* result)
|
lir::Operand* result)
|
||||||
{
|
{
|
||||||
// todo: endianness?
|
// todo: endianness?
|
||||||
assert(c,
|
assertT(c,
|
||||||
high == this
|
high == this
|
||||||
or (static_cast<MemorySite*>(high)->base == base
|
or (static_cast<MemorySite*>(high)->base == base
|
||||||
and static_cast<MemorySite*>(high)->offset
|
and static_cast<MemorySite*>(high)->offset
|
||||||
@ -559,7 +559,7 @@ void MemorySite::asAssemblerOperand(Context* c UNUSED, Site* high UNUSED,
|
|||||||
and static_cast<MemorySite*>(high)->index == index
|
and static_cast<MemorySite*>(high)->index == index
|
||||||
and static_cast<MemorySite*>(high)->scale == scale));
|
and static_cast<MemorySite*>(high)->scale == scale));
|
||||||
|
|
||||||
assert(c, acquired);
|
assertT(c, acquired);
|
||||||
|
|
||||||
new (result) lir::Memory(base, offset, index, scale);
|
new (result) lir::Memory(base, offset, index, scale);
|
||||||
}
|
}
|
||||||
@ -604,7 +604,7 @@ SiteMask MemorySite::mask(Context* c) {
|
|||||||
SiteMask MemorySite::nextWordMask(Context* c, unsigned index) {
|
SiteMask MemorySite::nextWordMask(Context* c, unsigned index) {
|
||||||
int frameIndex;
|
int frameIndex;
|
||||||
if (base == c->arch->stack()) {
|
if (base == c->arch->stack()) {
|
||||||
assert(c, this->index == lir::NoRegister);
|
assertT(c, this->index == lir::NoRegister);
|
||||||
frameIndex = static_cast<int>(offsetToFrameIndex(c, offset))
|
frameIndex = static_cast<int>(offsetToFrameIndex(c, offset))
|
||||||
+ ((index == 1) xor c->arch->bigEndian() ? 1 : -1);
|
+ ((index == 1) xor c->arch->bigEndian() ? 1 : -1);
|
||||||
} else {
|
} else {
|
||||||
@ -623,7 +623,7 @@ MemorySite* memorySite(Context* c, int base, int offset, int index, unsigned sca
|
|||||||
}
|
}
|
||||||
|
|
||||||
MemorySite* frameSite(Context* c, int frameIndex) {
|
MemorySite* frameSite(Context* c, int frameIndex) {
|
||||||
assert(c, frameIndex >= 0);
|
assertT(c, frameIndex >= 0);
|
||||||
return memorySite
|
return memorySite
|
||||||
(c, c->arch->stack(), frameIndexToOffset(c, frameIndex), lir::NoRegister, 0);
|
(c, c->arch->stack(), frameIndexToOffset(c, frameIndex), lir::NoRegister, 0);
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ void Value::addSite(Context* c, Site* s) {
|
|||||||
|
|
||||||
|
|
||||||
void Value::grow(Context* c) {
|
void Value::grow(Context* c) {
|
||||||
assert(c, this->nextWord == this);
|
assertT(c, this->nextWord == this);
|
||||||
|
|
||||||
Value* next = value(c, this->type);
|
Value* next = value(c, this->type);
|
||||||
this->nextWord = next;
|
this->nextWord = next;
|
||||||
@ -101,7 +101,7 @@ void Value::removeSite(Context* c, Site* s) {
|
|||||||
if (DebugSites) {
|
if (DebugSites) {
|
||||||
fprintf(stderr, "%p has more: %d\n", this, this->hasSite(c));
|
fprintf(stderr, "%p has more: %d\n", this, this->hasSite(c));
|
||||||
}
|
}
|
||||||
assert(c, not this->findSite(s));
|
assertT(c, not this->findSite(s));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Value::hasSite(Context* c) {
|
bool Value::hasSite(Context* c) {
|
||||||
@ -128,7 +128,7 @@ bool Value::uniqueSite(Context* c, Site* s) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(c, p == s);
|
assertT(c, p == s);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -85,8 +85,8 @@ nextFrame(ArchitectureContext* con, uint32_t* start, unsigned size UNUSED,
|
|||||||
unsigned footprint, void* link, bool,
|
unsigned footprint, void* link, bool,
|
||||||
int targetParameterFootprint UNUSED, void** ip, void** stack)
|
int targetParameterFootprint UNUSED, void** ip, void** stack)
|
||||||
{
|
{
|
||||||
assert(con, *ip >= start);
|
assertT(con, *ip >= start);
|
||||||
assert(con, *ip <= start + (size / TargetBytesPerWord));
|
assertT(con, *ip <= start + (size / TargetBytesPerWord));
|
||||||
|
|
||||||
uint32_t* instruction = static_cast<uint32_t*>(*ip);
|
uint32_t* instruction = static_cast<uint32_t*>(*ip);
|
||||||
|
|
||||||
@ -235,7 +235,7 @@ class MyArchitecture: public Architecture {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual int argumentRegister(unsigned index) {
|
virtual int argumentRegister(unsigned index) {
|
||||||
assert(&con, index < argumentRegisterCount());
|
assertT(&con, index < argumentRegisterCount());
|
||||||
|
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
@ -576,7 +576,7 @@ class MyAssembler: public Assembler {
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
virtual void setClient(Client* client) {
|
virtual void setClient(Client* client) {
|
||||||
assert(&con, con.client == 0);
|
assertT(&con, con.client == 0);
|
||||||
con.client = client;
|
con.client = client;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -660,7 +660,7 @@ class MyAssembler: public Assembler {
|
|||||||
// larger frames may require multiple subtract/add instructions
|
// larger frames may require multiple subtract/add instructions
|
||||||
// to allocate/deallocate, and nextFrame will need to be taught
|
// to allocate/deallocate, and nextFrame will need to be taught
|
||||||
// how to handle them:
|
// how to handle them:
|
||||||
assert(&con, footprint < 256);
|
assertT(&con, footprint < 256);
|
||||||
|
|
||||||
lir::Register stack(StackRegister);
|
lir::Register stack(StackRegister);
|
||||||
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
|
ResolvedPromise footprintPromise(footprint * TargetBytesPerWord);
|
||||||
@ -701,7 +701,7 @@ class MyAssembler: public Assembler {
|
|||||||
int returnAddressSurrogate,
|
int returnAddressSurrogate,
|
||||||
int framePointerSurrogate UNUSED)
|
int framePointerSurrogate UNUSED)
|
||||||
{
|
{
|
||||||
assert(&con, framePointerSurrogate == lir::NoRegister);
|
assertT(&con, framePointerSurrogate == lir::NoRegister);
|
||||||
|
|
||||||
if (TailCalls) {
|
if (TailCalls) {
|
||||||
if (offset) {
|
if (offset) {
|
||||||
@ -720,7 +720,7 @@ class MyAssembler: public Assembler {
|
|||||||
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
|
addC(&con, TargetBytesPerWord, &footprintConstant, &stack, &stack);
|
||||||
|
|
||||||
if (returnAddressSurrogate != lir::NoRegister) {
|
if (returnAddressSurrogate != lir::NoRegister) {
|
||||||
assert(&con, offset > 0);
|
assertT(&con, offset > 0);
|
||||||
|
|
||||||
lir::Register ras(returnAddressSurrogate);
|
lir::Register ras(returnAddressSurrogate);
|
||||||
lir::Memory dst(StackRegister, (offset - 1) * TargetBytesPerWord);
|
lir::Memory dst(StackRegister, (offset - 1) * TargetBytesPerWord);
|
||||||
@ -739,8 +739,8 @@ class MyAssembler: public Assembler {
|
|||||||
{
|
{
|
||||||
popFrame(frameFootprint);
|
popFrame(frameFootprint);
|
||||||
|
|
||||||
assert(&con, argumentFootprint >= StackAlignmentInWords);
|
assertT(&con, argumentFootprint >= StackAlignmentInWords);
|
||||||
assert(&con, (argumentFootprint % StackAlignmentInWords) == 0);
|
assertT(&con, (argumentFootprint % StackAlignmentInWords) == 0);
|
||||||
|
|
||||||
unsigned offset;
|
unsigned offset;
|
||||||
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
|
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
|
||||||
@ -788,16 +788,16 @@ class MyAssembler: public Assembler {
|
|||||||
virtual void apply(lir::TernaryOperation op, OperandInfo a, OperandInfo b, OperandInfo c)
|
virtual void apply(lir::TernaryOperation op, OperandInfo a, OperandInfo b, OperandInfo c)
|
||||||
{
|
{
|
||||||
if (isBranch(op)) {
|
if (isBranch(op)) {
|
||||||
assert(&con, a.size == b.size);
|
assertT(&con, a.size == b.size);
|
||||||
assert(&con, c.size == TargetBytesPerWord);
|
assertT(&con, c.size == TargetBytesPerWord);
|
||||||
assert(&con, c.type == lir::ConstantOperand);
|
assertT(&con, c.type == lir::ConstantOperand);
|
||||||
|
|
||||||
arch_->con.branchOperations[branchIndex(&(arch_->con), a.type, b.type)]
|
arch_->con.branchOperations[branchIndex(&(arch_->con), a.type, b.type)]
|
||||||
(&con, op, a.size, a.operand, b.operand, c.operand);
|
(&con, op, a.size, a.operand, b.operand, c.operand);
|
||||||
} else {
|
} else {
|
||||||
assert(&con, b.size == c.size);
|
assertT(&con, b.size == c.size);
|
||||||
assert(&con, b.type == lir::RegisterOperand);
|
assertT(&con, b.type == lir::RegisterOperand);
|
||||||
assert(&con, c.type == lir::RegisterOperand);
|
assertT(&con, c.type == lir::RegisterOperand);
|
||||||
|
|
||||||
arch_->con.ternaryOperations[index(&(arch_->con), op, a.type)]
|
arch_->con.ternaryOperations[index(&(arch_->con), op, a.type)]
|
||||||
(&con, b.size, a.operand, b.operand, c.operand);
|
(&con, b.size, a.operand, b.operand, c.operand);
|
||||||
|
@ -29,7 +29,7 @@ bool OffsetPromise::resolved() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int64_t OffsetPromise::value() {
|
int64_t OffsetPromise::value() {
|
||||||
assert(con, resolved());
|
assertT(con, resolved());
|
||||||
|
|
||||||
unsigned o = offset - block->offset;
|
unsigned o = offset - block->offset;
|
||||||
return block->start + padding
|
return block->start + padding
|
||||||
@ -99,7 +99,7 @@ ConstantPoolEntry::ConstantPoolEntry(Context* con, Promise* constant, ConstantPo
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
int64_t ConstantPoolEntry::value() {
|
int64_t ConstantPoolEntry::value() {
|
||||||
assert(con, resolved());
|
assertT(con, resolved());
|
||||||
|
|
||||||
return reinterpret_cast<int64_t>(address);
|
return reinterpret_cast<int64_t>(address);
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ unsigned index(ArchitectureContext* con UNUSED,
|
|||||||
lir::TernaryOperation operation,
|
lir::TernaryOperation operation,
|
||||||
lir::OperandType operand1)
|
lir::OperandType operand1)
|
||||||
{
|
{
|
||||||
assert(con, not isBranch(operation));
|
assertT(con, not isBranch(operation));
|
||||||
|
|
||||||
return operation + (lir::NonBranchTernaryOperationCount * operand1);
|
return operation + (lir::NonBranchTernaryOperationCount * operand1);
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ void moveRR(Context* con, unsigned srcSize, lir::Register* src,
|
|||||||
|
|
||||||
void shiftLeftC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t)
|
void shiftLeftC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
if (getValue(a) & 0x1F) {
|
if (getValue(a) & 0x1F) {
|
||||||
emit(con, lsli(t->low, b->low, getValue(a) & 0x1F));
|
emit(con, lsli(t->low, b->low, getValue(a) & 0x1F));
|
||||||
} else {
|
} else {
|
||||||
@ -98,7 +98,7 @@ void shiftRightR(Context* con, unsigned size, lir::Register* a, lir::Register* b
|
|||||||
|
|
||||||
void shiftRightC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t)
|
void shiftRightC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
if (getValue(a) & 0x1F) {
|
if (getValue(a) & 0x1F) {
|
||||||
emit(con, asri(t->low, b->low, getValue(a) & 0x1F));
|
emit(con, asri(t->low, b->low, getValue(a) & 0x1F));
|
||||||
} else {
|
} else {
|
||||||
@ -130,7 +130,7 @@ void unsignedShiftRightR(Context* con, unsigned size, lir::Register* a, lir::Reg
|
|||||||
|
|
||||||
void unsignedShiftRightC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t)
|
void unsignedShiftRightC(Context* con, unsigned size UNUSED, lir::Constant* a, lir::Register* b, lir::Register* t)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
if (getValue(a) & 0x1F) {
|
if (getValue(a) & 0x1F) {
|
||||||
emit(con, lsri(t->low, b->low, getValue(a) & 0x1F));
|
emit(con, lsri(t->low, b->low, getValue(a) & 0x1F));
|
||||||
} else {
|
} else {
|
||||||
@ -223,15 +223,15 @@ void resolve(MyBlock* b)
|
|||||||
|
|
||||||
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target)
|
void jumpR(Context* con, unsigned size UNUSED, lir::Register* target)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
emit(con, bx(target->low));
|
emit(con, bx(target->low));
|
||||||
}
|
}
|
||||||
|
|
||||||
void swapRR(Context* con, unsigned aSize, lir::Register* a,
|
void swapRR(Context* con, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(con, aSize == vm::TargetBytesPerWord);
|
assertT(con, aSize == vm::TargetBytesPerWord);
|
||||||
assert(con, bSize == vm::TargetBytesPerWord);
|
assertT(con, bSize == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||||
moveRR(con, aSize, a, bSize, &tmp);
|
moveRR(con, aSize, a, bSize, &tmp);
|
||||||
@ -246,7 +246,7 @@ void moveRR(Context* con, unsigned srcSize, lir::Register* src,
|
|||||||
bool srcIsFpr = isFpr(src);
|
bool srcIsFpr = isFpr(src);
|
||||||
bool dstIsFpr = isFpr(dst);
|
bool dstIsFpr = isFpr(dst);
|
||||||
if (srcIsFpr || dstIsFpr) { // FPR(s) involved
|
if (srcIsFpr || dstIsFpr) { // FPR(s) involved
|
||||||
assert(con, srcSize == dstSize);
|
assertT(con, srcSize == dstSize);
|
||||||
const bool dprec = srcSize == 8;
|
const bool dprec = srcSize == 8;
|
||||||
if (srcIsFpr && dstIsFpr) { // FPR to FPR
|
if (srcIsFpr && dstIsFpr) { // FPR to FPR
|
||||||
if (dprec) emit(con, fcpyd(fpr64(dst), fpr64(src))); // double
|
if (dprec) emit(con, fcpyd(fpr64(dst), fpr64(src))); // double
|
||||||
@ -370,7 +370,7 @@ void subR(Context* con, unsigned size, lir::Register* a, lir::Register* b, lir::
|
|||||||
void addC(Context* con, unsigned size, lir::Constant* a,
|
void addC(Context* con, unsigned size, lir::Constant* a,
|
||||||
lir::Register* b, lir::Register* dst)
|
lir::Register* b, lir::Register* dst)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
int32_t v = a->value->value();
|
int32_t v = a->value->value();
|
||||||
if (v) {
|
if (v) {
|
||||||
@ -390,7 +390,7 @@ void addC(Context* con, unsigned size, lir::Constant* a,
|
|||||||
void subC(Context* con, unsigned size, lir::Constant* a,
|
void subC(Context* con, unsigned size, lir::Constant* a,
|
||||||
lir::Register* b, lir::Register* dst)
|
lir::Register* b, lir::Register* dst)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
int32_t v = a->value->value();
|
int32_t v = a->value->value();
|
||||||
if (v) {
|
if (v) {
|
||||||
@ -654,7 +654,7 @@ void store(Context* con, unsigned size, lir::Register* src,
|
|||||||
void moveRM(Context* con, unsigned srcSize, lir::Register* src,
|
void moveRM(Context* con, unsigned srcSize, lir::Register* src,
|
||||||
unsigned dstSize UNUSED, lir::Memory* dst)
|
unsigned dstSize UNUSED, lir::Memory* dst)
|
||||||
{
|
{
|
||||||
assert(con, srcSize == dstSize);
|
assertT(con, srcSize == dstSize);
|
||||||
|
|
||||||
store(con, srcSize, src, dst->base, dst->offset, dst->index, dst->scale, true);
|
store(con, srcSize, src, dst->base, dst->offset, dst->index, dst->scale, true);
|
||||||
}
|
}
|
||||||
@ -866,7 +866,7 @@ void xorR(Context* con, unsigned size, lir::Register* a,
|
|||||||
void moveAR2(Context* con, unsigned srcSize, lir::Address* src,
|
void moveAR2(Context* con, unsigned srcSize, lir::Address* src,
|
||||||
unsigned dstSize, lir::Register* dst)
|
unsigned dstSize, lir::Register* dst)
|
||||||
{
|
{
|
||||||
assert(con, srcSize == 4 and dstSize == 4);
|
assertT(con, srcSize == 4 and dstSize == 4);
|
||||||
|
|
||||||
lir::Constant constant(src->address);
|
lir::Constant constant(src->address);
|
||||||
moveCR(con, srcSize, &constant, dstSize, dst);
|
moveCR(con, srcSize, &constant, dstSize, dst);
|
||||||
@ -884,14 +884,14 @@ void moveAR(Context* con, unsigned srcSize, lir::Address* src,
|
|||||||
void compareRR(Context* con, unsigned aSize, lir::Register* a,
|
void compareRR(Context* con, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(con, !(isFpr(a) ^ isFpr(b))); // regs must be of the same type
|
assertT(con, !(isFpr(a) ^ isFpr(b))); // regs must be of the same type
|
||||||
|
|
||||||
if (!isFpr(a)) { // GPR compare
|
if (!isFpr(a)) { // GPR compare
|
||||||
assert(con, aSize == 4 && bSize == 4);
|
assertT(con, aSize == 4 && bSize == 4);
|
||||||
/**///assert(con, b->low != a->low);
|
/**///assertT(con, b->low != a->low);
|
||||||
emit(con, cmp(b->low, a->low));
|
emit(con, cmp(b->low, a->low));
|
||||||
} else { // FPR compare
|
} else { // FPR compare
|
||||||
assert(con, aSize == bSize);
|
assertT(con, aSize == bSize);
|
||||||
if (aSize == 8) emit(con, fcmpd(fpr64(b), fpr64(a))); // double
|
if (aSize == 8) emit(con, fcmpd(fpr64(b), fpr64(a))); // double
|
||||||
else emit(con, fcmps(fpr32(b), fpr32(a))); // single
|
else emit(con, fcmps(fpr32(b), fpr32(a))); // single
|
||||||
emit(con, fmstat());
|
emit(con, fmstat());
|
||||||
@ -901,7 +901,7 @@ void compareRR(Context* con, unsigned aSize, lir::Register* a,
|
|||||||
void compareCR(Context* con, unsigned aSize, lir::Constant* a,
|
void compareCR(Context* con, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(con, aSize == 4 and bSize == 4);
|
assertT(con, aSize == 4 and bSize == 4);
|
||||||
|
|
||||||
if (!isFpr(b) && a->value->resolved() &&
|
if (!isFpr(b) && a->value->resolved() &&
|
||||||
isOfWidth(a->value->value(), 8)) {
|
isOfWidth(a->value->value(), 8)) {
|
||||||
@ -917,7 +917,7 @@ void compareCR(Context* con, unsigned aSize, lir::Constant* a,
|
|||||||
void compareCM(Context* con, unsigned aSize, lir::Constant* a,
|
void compareCM(Context* con, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Memory* b)
|
unsigned bSize, lir::Memory* b)
|
||||||
{
|
{
|
||||||
assert(con, aSize == 4 and bSize == 4);
|
assertT(con, aSize == 4 and bSize == 4);
|
||||||
|
|
||||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||||
moveMR(con, bSize, b, bSize, &tmp);
|
moveMR(con, bSize, b, bSize, &tmp);
|
||||||
@ -928,7 +928,7 @@ void compareCM(Context* con, unsigned aSize, lir::Constant* a,
|
|||||||
void compareRM(Context* con, unsigned aSize, lir::Register* a,
|
void compareRM(Context* con, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize, lir::Memory* b)
|
unsigned bSize, lir::Memory* b)
|
||||||
{
|
{
|
||||||
assert(con, aSize == 4 and bSize == 4);
|
assertT(con, aSize == 4 and bSize == 4);
|
||||||
|
|
||||||
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
lir::Register tmp(con->client->acquireTemporary(GPR_MASK));
|
||||||
moveMR(con, bSize, b, bSize, &tmp);
|
moveMR(con, bSize, b, bSize, &tmp);
|
||||||
@ -1092,7 +1092,7 @@ void branchCR(Context* con, lir::TernaryOperation op, unsigned size,
|
|||||||
lir::Constant* a, lir::Register* b,
|
lir::Constant* a, lir::Register* b,
|
||||||
lir::Constant* target)
|
lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(con, !isFloatBranch(op));
|
assertT(con, !isFloatBranch(op));
|
||||||
|
|
||||||
if (size > vm::TargetBytesPerWord) {
|
if (size > vm::TargetBytesPerWord) {
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
@ -1117,8 +1117,8 @@ void branchRM(Context* con, lir::TernaryOperation op, unsigned size,
|
|||||||
lir::Register* a, lir::Memory* b,
|
lir::Register* a, lir::Memory* b,
|
||||||
lir::Constant* target)
|
lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(con, !isFloatBranch(op));
|
assertT(con, !isFloatBranch(op));
|
||||||
assert(con, size <= vm::TargetBytesPerWord);
|
assertT(con, size <= vm::TargetBytesPerWord);
|
||||||
|
|
||||||
compareRM(con, size, a, size, b);
|
compareRM(con, size, a, size, b);
|
||||||
branch(con, op, target);
|
branch(con, op, target);
|
||||||
@ -1128,8 +1128,8 @@ void branchCM(Context* con, lir::TernaryOperation op, unsigned size,
|
|||||||
lir::Constant* a, lir::Memory* b,
|
lir::Constant* a, lir::Memory* b,
|
||||||
lir::Constant* target)
|
lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(con, !isFloatBranch(op));
|
assertT(con, !isFloatBranch(op));
|
||||||
assert(con, size <= vm::TargetBytesPerWord);
|
assertT(con, size <= vm::TargetBytesPerWord);
|
||||||
|
|
||||||
compareCM(con, size, a, size, b);
|
compareCM(con, size, a, size, b);
|
||||||
branch(con, op, target);
|
branch(con, op, target);
|
||||||
@ -1169,7 +1169,7 @@ void moveCM(Context* con, unsigned srcSize, lir::Constant* src,
|
|||||||
void negateRR(Context* con, unsigned srcSize, lir::Register* src,
|
void negateRR(Context* con, unsigned srcSize, lir::Register* src,
|
||||||
unsigned dstSize UNUSED, lir::Register* dst)
|
unsigned dstSize UNUSED, lir::Register* dst)
|
||||||
{
|
{
|
||||||
assert(con, srcSize == dstSize);
|
assertT(con, srcSize == dstSize);
|
||||||
|
|
||||||
emit(con, mvn(dst->low, src->low));
|
emit(con, mvn(dst->low, src->low));
|
||||||
emit(con, SETS(addi(dst->low, dst->low, 1)));
|
emit(con, SETS(addi(dst->low, dst->low, 1)));
|
||||||
@ -1181,13 +1181,13 @@ void negateRR(Context* con, unsigned srcSize, lir::Register* src,
|
|||||||
|
|
||||||
void callR(Context* con, unsigned size UNUSED, lir::Register* target)
|
void callR(Context* con, unsigned size UNUSED, lir::Register* target)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
emit(con, blx(target->low));
|
emit(con, blx(target->low));
|
||||||
}
|
}
|
||||||
|
|
||||||
void callC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
void callC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
appendOffsetTask(con, target->value, offsetPromise(con));
|
appendOffsetTask(con, target->value, offsetPromise(con));
|
||||||
emit(con, bl(0));
|
emit(con, bl(0));
|
||||||
@ -1195,7 +1195,7 @@ void callC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
|||||||
|
|
||||||
void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
lir::Register tmp(4);
|
lir::Register tmp(4);
|
||||||
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
|
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
|
||||||
@ -1204,7 +1204,7 @@ void longCallC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
|||||||
|
|
||||||
void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
lir::Register tmp(4); // a non-arg reg that we don't mind clobbering
|
lir::Register tmp(4); // a non-arg reg that we don't mind clobbering
|
||||||
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
|
moveCR2(con, vm::TargetBytesPerWord, target, &tmp, offsetPromise(con));
|
||||||
@ -1213,7 +1213,7 @@ void longJumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
|||||||
|
|
||||||
void jumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
void jumpC(Context* con, unsigned size UNUSED, lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(con, size == vm::TargetBytesPerWord);
|
assertT(con, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
appendOffsetTask(con, target->value, offsetPromise(con));
|
appendOffsetTask(con, target->value, offsetPromise(con));
|
||||||
emit(con, b(0));
|
emit(con, b(0));
|
||||||
|
@ -74,8 +74,8 @@ nextFrame(ArchitectureContext* c UNUSED, uint8_t* start, unsigned size UNUSED,
|
|||||||
unsigned footprint, void*, bool mostRecent,
|
unsigned footprint, void*, bool mostRecent,
|
||||||
int targetParameterFootprint, void** ip, void** stack)
|
int targetParameterFootprint, void** ip, void** stack)
|
||||||
{
|
{
|
||||||
assert(c, *ip >= start);
|
assertT(c, *ip >= start);
|
||||||
assert(c, *ip <= start + size);
|
assertT(c, *ip <= start + size);
|
||||||
|
|
||||||
uint8_t* instruction = static_cast<uint8_t*>(*ip);
|
uint8_t* instruction = static_cast<uint8_t*>(*ip);
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ nextFrame(ArchitectureContext* c UNUSED, uint8_t* start, unsigned size UNUSED,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (instruction <= start) {
|
if (instruction <= start) {
|
||||||
assert(c, mostRecent);
|
assertT(c, mostRecent);
|
||||||
*ip = static_cast<void**>(*stack)[0];
|
*ip = static_cast<void**>(*stack)[0];
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -99,7 +99,7 @@ nextFrame(ArchitectureContext* c UNUSED, uint8_t* start, unsigned size UNUSED,
|
|||||||
start += (TargetBytesPerWord == 4 ? 3 : 4);
|
start += (TargetBytesPerWord == 4 ? 3 : 4);
|
||||||
|
|
||||||
if (instruction <= start or *instruction == 0x5d) {
|
if (instruction <= start or *instruction == 0x5d) {
|
||||||
assert(c, mostRecent);
|
assertT(c, mostRecent);
|
||||||
|
|
||||||
*ip = static_cast<void**>(*stack)[1];
|
*ip = static_cast<void**>(*stack)[1];
|
||||||
*stack = static_cast<void**>(*stack) + 1;
|
*stack = static_cast<void**>(*stack) + 1;
|
||||||
@ -143,10 +143,10 @@ nextFrame(ArchitectureContext* c UNUSED, uint8_t* start, unsigned size UNUSED,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (UseFramePointer and not mostRecent) {
|
if (UseFramePointer and not mostRecent) {
|
||||||
assert(c, static_cast<void***>(*stack)[-1] + 1
|
assertT(c, static_cast<void***>(*stack)[-1] + 1
|
||||||
== static_cast<void**>(*stack) + offset);
|
== static_cast<void**>(*stack) + offset);
|
||||||
|
|
||||||
assert(c, static_cast<void***>(*stack)[-1][1]
|
assertT(c, static_cast<void***>(*stack)[-1][1]
|
||||||
== static_cast<void**>(*stack)[offset]);
|
== static_cast<void**>(*stack)[offset]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,7 +263,7 @@ class MyArchitecture: public Architecture {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual int argumentRegister(unsigned index) {
|
virtual int argumentRegister(unsigned index) {
|
||||||
assert(&c, TargetBytesPerWord == 8);
|
assertT(&c, TargetBytesPerWord == 8);
|
||||||
switch (index) {
|
switch (index) {
|
||||||
#if AVIAN_TARGET_FORMAT == AVIAN_FORMAT_PE
|
#if AVIAN_TARGET_FORMAT == AVIAN_FORMAT_PE
|
||||||
case 0:
|
case 0:
|
||||||
@ -312,45 +312,45 @@ class MyArchitecture: public Architecture {
|
|||||||
virtual void updateCall(lir::UnaryOperation op, void* returnAddress,
|
virtual void updateCall(lir::UnaryOperation op, void* returnAddress,
|
||||||
void* newTarget)
|
void* newTarget)
|
||||||
{
|
{
|
||||||
bool assertAlignment UNUSED;
|
bool assertTAlignment UNUSED;
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case lir::AlignedCall:
|
case lir::AlignedCall:
|
||||||
op = lir::Call;
|
op = lir::Call;
|
||||||
assertAlignment = true;
|
assertTAlignment = true;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case lir::AlignedJump:
|
case lir::AlignedJump:
|
||||||
op = lir::Jump;
|
op = lir::Jump;
|
||||||
assertAlignment = true;
|
assertTAlignment = true;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case lir::AlignedLongCall:
|
case lir::AlignedLongCall:
|
||||||
op = lir::LongCall;
|
op = lir::LongCall;
|
||||||
assertAlignment = true;
|
assertTAlignment = true;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case lir::AlignedLongJump:
|
case lir::AlignedLongJump:
|
||||||
op = lir::LongJump;
|
op = lir::LongJump;
|
||||||
assertAlignment = true;
|
assertTAlignment = true;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
assertAlignment = false;
|
assertTAlignment = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TargetBytesPerWord == 4 or op == lir::Call or op == lir::Jump) {
|
if (TargetBytesPerWord == 4 or op == lir::Call or op == lir::Jump) {
|
||||||
uint8_t* instruction = static_cast<uint8_t*>(returnAddress) - 5;
|
uint8_t* instruction = static_cast<uint8_t*>(returnAddress) - 5;
|
||||||
|
|
||||||
assert(&c, ((op == lir::Call or op == lir::LongCall) and *instruction == 0xE8)
|
assertT(&c, ((op == lir::Call or op == lir::LongCall) and *instruction == 0xE8)
|
||||||
or ((op == lir::Jump or op == lir::LongJump) and *instruction == 0xE9));
|
or ((op == lir::Jump or op == lir::LongJump) and *instruction == 0xE9));
|
||||||
|
|
||||||
assert(&c, (not assertAlignment)
|
assertT(&c, (not assertTAlignment)
|
||||||
or reinterpret_cast<uintptr_t>(instruction + 1) % 4 == 0);
|
or reinterpret_cast<uintptr_t>(instruction + 1) % 4 == 0);
|
||||||
|
|
||||||
intptr_t v = static_cast<uint8_t*>(newTarget)
|
intptr_t v = static_cast<uint8_t*>(newTarget)
|
||||||
- static_cast<uint8_t*>(returnAddress);
|
- static_cast<uint8_t*>(returnAddress);
|
||||||
|
|
||||||
assert(&c, vm::fitsInInt32(v));
|
assertT(&c, vm::fitsInInt32(v));
|
||||||
|
|
||||||
int32_t v32 = v;
|
int32_t v32 = v;
|
||||||
|
|
||||||
@ -358,12 +358,12 @@ class MyArchitecture: public Architecture {
|
|||||||
} else {
|
} else {
|
||||||
uint8_t* instruction = static_cast<uint8_t*>(returnAddress) - 13;
|
uint8_t* instruction = static_cast<uint8_t*>(returnAddress) - 13;
|
||||||
|
|
||||||
assert(&c, instruction[0] == 0x49 and instruction[1] == 0xBA);
|
assertT(&c, instruction[0] == 0x49 and instruction[1] == 0xBA);
|
||||||
assert(&c, instruction[10] == 0x41 and instruction[11] == 0xFF);
|
assertT(&c, instruction[10] == 0x41 and instruction[11] == 0xFF);
|
||||||
assert(&c, (op == lir::LongCall and instruction[12] == 0xD2)
|
assertT(&c, (op == lir::LongCall and instruction[12] == 0xD2)
|
||||||
or (op == lir::LongJump and instruction[12] == 0xE2));
|
or (op == lir::LongJump and instruction[12] == 0xE2));
|
||||||
|
|
||||||
assert(&c, (not assertAlignment)
|
assertT(&c, (not assertTAlignment)
|
||||||
or reinterpret_cast<uintptr_t>(instruction + 2) % 8 == 0);
|
or reinterpret_cast<uintptr_t>(instruction + 2) % 8 == 0);
|
||||||
|
|
||||||
memcpy(instruction + 2, &newTarget, 8);
|
memcpy(instruction + 2, &newTarget, 8);
|
||||||
@ -827,7 +827,7 @@ class MyAssembler: public Assembler {
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
virtual void setClient(Client* client) {
|
virtual void setClient(Client* client) {
|
||||||
assert(&c, c.client == 0);
|
assertT(&c, c.client == 0);
|
||||||
c.client = client;
|
c.client = client;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -989,7 +989,7 @@ class MyAssembler: public Assembler {
|
|||||||
addCR(&c, TargetBytesPerWord, &footprint, TargetBytesPerWord, &stack);
|
addCR(&c, TargetBytesPerWord, &footprint, TargetBytesPerWord, &stack);
|
||||||
|
|
||||||
if (returnAddressSurrogate != lir::NoRegister) {
|
if (returnAddressSurrogate != lir::NoRegister) {
|
||||||
assert(&c, offset > 0);
|
assertT(&c, offset > 0);
|
||||||
|
|
||||||
lir::Register ras(returnAddressSurrogate);
|
lir::Register ras(returnAddressSurrogate);
|
||||||
lir::Memory dst(rsp, offset * TargetBytesPerWord);
|
lir::Memory dst(rsp, offset * TargetBytesPerWord);
|
||||||
@ -997,7 +997,7 @@ class MyAssembler: public Assembler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (framePointerSurrogate != lir::NoRegister) {
|
if (framePointerSurrogate != lir::NoRegister) {
|
||||||
assert(&c, offset > 0);
|
assertT(&c, offset > 0);
|
||||||
|
|
||||||
lir::Register fps(framePointerSurrogate);
|
lir::Register fps(framePointerSurrogate);
|
||||||
lir::Memory dst(rsp, (offset - 1) * TargetBytesPerWord);
|
lir::Memory dst(rsp, (offset - 1) * TargetBytesPerWord);
|
||||||
@ -1016,8 +1016,8 @@ class MyAssembler: public Assembler {
|
|||||||
{
|
{
|
||||||
popFrame(frameFootprint);
|
popFrame(frameFootprint);
|
||||||
|
|
||||||
assert(&c, argumentFootprint >= StackAlignmentInWords);
|
assertT(&c, argumentFootprint >= StackAlignmentInWords);
|
||||||
assert(&c, (argumentFootprint % StackAlignmentInWords) == 0);
|
assertT(&c, (argumentFootprint % StackAlignmentInWords) == 0);
|
||||||
|
|
||||||
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
|
if (TailCalls and argumentFootprint > StackAlignmentInWords) {
|
||||||
lir::Register returnAddress(rcx);
|
lir::Register returnAddress(rcx);
|
||||||
@ -1069,15 +1069,15 @@ class MyAssembler: public Assembler {
|
|||||||
virtual void apply(lir::TernaryOperation op, OperandInfo a, OperandInfo b, OperandInfo c)
|
virtual void apply(lir::TernaryOperation op, OperandInfo a, OperandInfo b, OperandInfo c)
|
||||||
{
|
{
|
||||||
if (isBranch(op)) {
|
if (isBranch(op)) {
|
||||||
assert(&this->c, a.size == b.size);
|
assertT(&this->c, a.size == b.size);
|
||||||
assert(&this->c, c.size == TargetBytesPerWord);
|
assertT(&this->c, c.size == TargetBytesPerWord);
|
||||||
assert(&this->c, c.type == lir::ConstantOperand);
|
assertT(&this->c, c.type == lir::ConstantOperand);
|
||||||
|
|
||||||
arch_->c.branchOperations[branchIndex(&(arch_->c), a.type, b.type)]
|
arch_->c.branchOperations[branchIndex(&(arch_->c), a.type, b.type)]
|
||||||
(&this->c, op, a.size, a.operand, b.operand, c.operand);
|
(&this->c, op, a.size, a.operand, b.operand, c.operand);
|
||||||
} else {
|
} else {
|
||||||
assert(&this->c, b.size == c.size);
|
assertT(&this->c, b.size == c.size);
|
||||||
assert(&this->c, b.type == c.type);
|
assertT(&this->c, b.type == c.type);
|
||||||
|
|
||||||
arch_->c.binaryOperations[index(&(arch_->c), op, a.type, b.type)]
|
arch_->c.binaryOperations[index(&(arch_->c), op, a.type, b.type)]
|
||||||
(&this->c, a.size, a.operand, b.size, b.operand);
|
(&this->c, a.size, a.operand, b.size, b.operand);
|
||||||
|
@ -154,8 +154,8 @@ void conditional(Context* c, unsigned condition, lir::Constant* a) {
|
|||||||
void sseMoveRR(Context* c, unsigned aSize, lir::Register* a,
|
void sseMoveRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize >= 4);
|
assertT(c, aSize >= 4);
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
if (isFloatReg(a) and isFloatReg(b)) {
|
if (isFloatReg(a) and isFloatReg(b)) {
|
||||||
if (aSize == 4) {
|
if (aSize == 4) {
|
||||||
@ -185,7 +185,7 @@ void sseMoveRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void sseMoveCR(Context* c, unsigned aSize, lir::Constant* a,
|
void sseMoveCR(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize <= vm::TargetBytesPerWord);
|
assertT(c, aSize <= vm::TargetBytesPerWord);
|
||||||
lir::Register tmp(c->client->acquireTemporary(GeneralRegisterMask));
|
lir::Register tmp(c->client->acquireTemporary(GeneralRegisterMask));
|
||||||
moveCR2(c, aSize, a, aSize, &tmp, 0);
|
moveCR2(c, aSize, a, aSize, &tmp, 0);
|
||||||
sseMoveRR(c, aSize, &tmp, bSize, b);
|
sseMoveRR(c, aSize, &tmp, bSize, b);
|
||||||
@ -195,7 +195,7 @@ void sseMoveCR(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void sseMoveMR(Context* c, unsigned aSize, lir::Memory* a,
|
void sseMoveMR(Context* c, unsigned aSize, lir::Memory* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize >= 4);
|
assertT(c, aSize >= 4);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
||||||
opcode(c, 0xf3);
|
opcode(c, 0xf3);
|
||||||
@ -212,8 +212,8 @@ void sseMoveMR(Context* c, unsigned aSize, lir::Memory* a,
|
|||||||
void sseMoveRM(Context* c, unsigned aSize, lir::Register* a,
|
void sseMoveRM(Context* c, unsigned aSize, lir::Register* a,
|
||||||
UNUSED unsigned bSize, lir::Memory* b)
|
UNUSED unsigned bSize, lir::Memory* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize >= 4);
|
assertT(c, aSize >= 4);
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
||||||
opcode(c, 0x66);
|
opcode(c, 0x66);
|
||||||
|
@ -42,7 +42,7 @@ bool OffsetPromise::resolved() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int64_t OffsetPromise::value() {
|
int64_t OffsetPromise::value() {
|
||||||
assert(c, resolved());
|
assertT(c, resolved());
|
||||||
|
|
||||||
if (value_ == -1) {
|
if (value_ == -1) {
|
||||||
value_ = block->start + (offset - block->offset)
|
value_ = block->start + (offset - block->offset)
|
||||||
|
@ -39,7 +39,7 @@ unsigned index(ArchitectureContext*, lir::BinaryOperation operation,
|
|||||||
unsigned index(ArchitectureContext* c UNUSED, lir::TernaryOperation operation,
|
unsigned index(ArchitectureContext* c UNUSED, lir::TernaryOperation operation,
|
||||||
lir::OperandType operand1, lir::OperandType operand2)
|
lir::OperandType operand1, lir::OperandType operand2)
|
||||||
{
|
{
|
||||||
assert(c, not isBranch(operation));
|
assertT(c, not isBranch(operation));
|
||||||
|
|
||||||
return lir::BinaryOperationCount + operation
|
return lir::BinaryOperationCount + operation
|
||||||
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) * operand1)
|
+ ((lir::BinaryOperationCount + lir::NonBranchTernaryOperationCount) * operand1)
|
||||||
|
@ -62,13 +62,13 @@ void storeLoadBarrier(Context* c) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void callC(Context* c, unsigned size UNUSED, lir::Constant* a) {
|
void callC(Context* c, unsigned size UNUSED, lir::Constant* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
unconditional(c, 0xe8, a);
|
unconditional(c, 0xe8, a);
|
||||||
}
|
}
|
||||||
|
|
||||||
void longCallC(Context* c, unsigned size, lir::Constant* a) {
|
void longCallC(Context* c, unsigned size, lir::Constant* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 8) {
|
if (vm::TargetBytesPerWord == 8) {
|
||||||
lir::Register r(LongJumpRegister);
|
lir::Register r(LongJumpRegister);
|
||||||
@ -80,20 +80,20 @@ void longCallC(Context* c, unsigned size, lir::Constant* a) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void jumpR(Context* c, unsigned size UNUSED, lir::Register* a) {
|
void jumpR(Context* c, unsigned size UNUSED, lir::Register* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
maybeRex(c, 4, a);
|
maybeRex(c, 4, a);
|
||||||
opcode(c, 0xff, 0xe0 + regCode(a));
|
opcode(c, 0xff, 0xe0 + regCode(a));
|
||||||
}
|
}
|
||||||
|
|
||||||
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* a) {
|
void jumpC(Context* c, unsigned size UNUSED, lir::Constant* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
unconditional(c, 0xe9, a);
|
unconditional(c, 0xe9, a);
|
||||||
}
|
}
|
||||||
|
|
||||||
void jumpM(Context* c, unsigned size UNUSED, lir::Memory* a) {
|
void jumpM(Context* c, unsigned size UNUSED, lir::Memory* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
maybeRex(c, 4, a);
|
maybeRex(c, 4, a);
|
||||||
opcode(c, 0xff);
|
opcode(c, 0xff);
|
||||||
@ -101,7 +101,7 @@ void jumpM(Context* c, unsigned size UNUSED, lir::Memory* a) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void longJumpC(Context* c, unsigned size, lir::Constant* a) {
|
void longJumpC(Context* c, unsigned size, lir::Constant* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 8) {
|
if (vm::TargetBytesPerWord == 8) {
|
||||||
lir::Register r(LongJumpRegister);
|
lir::Register r(LongJumpRegister);
|
||||||
@ -113,7 +113,7 @@ void longJumpC(Context* c, unsigned size, lir::Constant* a) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void callR(Context* c, unsigned size UNUSED, lir::Register* a) {
|
void callR(Context* c, unsigned size UNUSED, lir::Register* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
// maybeRex.W has no meaning here so we disable it
|
// maybeRex.W has no meaning here so we disable it
|
||||||
maybeRex(c, 4, a);
|
maybeRex(c, 4, a);
|
||||||
@ -121,7 +121,7 @@ void callR(Context* c, unsigned size UNUSED, lir::Register* a) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void callM(Context* c, unsigned size UNUSED, lir::Memory* a) {
|
void callM(Context* c, unsigned size UNUSED, lir::Memory* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
maybeRex(c, 4, a);
|
maybeRex(c, 4, a);
|
||||||
opcode(c, 0xff);
|
opcode(c, 0xff);
|
||||||
@ -134,7 +134,7 @@ void alignedCallC(Context* c, unsigned size, lir::Constant* a) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void alignedLongCallC(Context* c, unsigned size, lir::Constant* a) {
|
void alignedLongCallC(Context* c, unsigned size, lir::Constant* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 8) {
|
if (vm::TargetBytesPerWord == 8) {
|
||||||
new (c->zone) AlignmentPadding(c, 2, 8);
|
new (c->zone) AlignmentPadding(c, 2, 8);
|
||||||
@ -150,7 +150,7 @@ void alignedJumpC(Context* c, unsigned size, lir::Constant* a) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void alignedLongJumpC(Context* c, unsigned size, lir::Constant* a) {
|
void alignedLongJumpC(Context* c, unsigned size, lir::Constant* a) {
|
||||||
assert(c, size == vm::TargetBytesPerWord);
|
assertT(c, size == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 8) {
|
if (vm::TargetBytesPerWord == 8) {
|
||||||
new (c->zone) AlignmentPadding(c, 2, 8);
|
new (c->zone) AlignmentPadding(c, 2, 8);
|
||||||
@ -192,7 +192,7 @@ void popR(Context* c, unsigned size, lir::Register* a)
|
|||||||
void negateR(Context* c, unsigned size, lir::Register* a)
|
void negateR(Context* c, unsigned size, lir::Register* a)
|
||||||
{
|
{
|
||||||
if (vm::TargetBytesPerWord == 4 and size == 8) {
|
if (vm::TargetBytesPerWord == 4 and size == 8) {
|
||||||
assert(c, a->low == rax and a->high == rdx);
|
assertT(c, a->low == rax and a->high == rdx);
|
||||||
|
|
||||||
ResolvedPromise zeroPromise(0);
|
ResolvedPromise zeroPromise(0);
|
||||||
lir::Constant zero(&zeroPromise);
|
lir::Constant zero(&zeroPromise);
|
||||||
@ -211,7 +211,7 @@ void negateR(Context* c, unsigned size, lir::Register* a)
|
|||||||
void negateRR(Context* c, unsigned aSize, lir::Register* a,
|
void negateRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b UNUSED)
|
unsigned bSize UNUSED, lir::Register* b UNUSED)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
negateR(c, aSize, a);
|
negateR(c, aSize, a);
|
||||||
}
|
}
|
||||||
@ -229,10 +229,10 @@ void moveCR(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void moveZCR(Context* c, unsigned aSize UNUSED, lir::Constant* a,
|
void moveZCR(Context* c, unsigned aSize UNUSED, lir::Constant* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, not isFloatReg(b));
|
assertT(c, not isFloatReg(b));
|
||||||
assert(c, aSize == 2);
|
assertT(c, aSize == 2);
|
||||||
assert(c, bSize == vm::TargetBytesPerWord);
|
assertT(c, bSize == vm::TargetBytesPerWord);
|
||||||
assert(c, a->value->resolved());
|
assertT(c, a->value->resolved());
|
||||||
|
|
||||||
maybeRex(c, vm::TargetBytesPerWord, b);
|
maybeRex(c, vm::TargetBytesPerWord, b);
|
||||||
opcode(c, 0xb8 + regCode(b));
|
opcode(c, 0xb8 + regCode(b));
|
||||||
@ -242,8 +242,8 @@ void moveZCR(Context* c, unsigned aSize UNUSED, lir::Constant* a,
|
|||||||
void swapRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
|
void swapRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
assert(c, aSize == vm::TargetBytesPerWord);
|
assertT(c, aSize == vm::TargetBytesPerWord);
|
||||||
|
|
||||||
alwaysRex(c, aSize, a, b);
|
alwaysRex(c, aSize, a, b);
|
||||||
opcode(c, 0x87);
|
opcode(c, 0x87);
|
||||||
@ -277,7 +277,7 @@ void moveRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
switch (aSize) {
|
switch (aSize) {
|
||||||
case 1:
|
case 1:
|
||||||
if (vm::TargetBytesPerWord == 4 and a->low > rbx) {
|
if (vm::TargetBytesPerWord == 4 and a->low > rbx) {
|
||||||
assert(c, b->low <= rbx);
|
assertT(c, b->low <= rbx);
|
||||||
|
|
||||||
moveRR(c, vm::TargetBytesPerWord, a, vm::TargetBytesPerWord, b);
|
moveRR(c, vm::TargetBytesPerWord, a, vm::TargetBytesPerWord, b);
|
||||||
moveRR(c, 1, b, vm::TargetBytesPerWord, b);
|
moveRR(c, 1, b, vm::TargetBytesPerWord, b);
|
||||||
@ -304,7 +304,7 @@ void moveRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
if (a->low == rax and b->low == rax and b->high == rdx) {
|
if (a->low == rax and b->low == rax and b->high == rdx) {
|
||||||
opcode(c, 0x99); //cdq
|
opcode(c, 0x99); //cdq
|
||||||
} else {
|
} else {
|
||||||
assert(c, b->low == rax and b->high == rdx);
|
assertT(c, b->low == rax and b->high == rdx);
|
||||||
|
|
||||||
moveRR(c, 4, a, 4, b);
|
moveRR(c, 4, a, 4, b);
|
||||||
moveRR(c, 4, b, 8, b);
|
moveRR(c, 4, b, 8, b);
|
||||||
@ -358,7 +358,7 @@ void moveMR(Context* c, unsigned aSize, lir::Memory* a,
|
|||||||
modrmSibImm(c, b, a);
|
modrmSibImm(c, b, a);
|
||||||
} else {
|
} else {
|
||||||
if (bSize == 8) {
|
if (bSize == 8) {
|
||||||
assert(c, b->low == rax and b->high == rdx);
|
assertT(c, b->low == rax and b->high == rdx);
|
||||||
|
|
||||||
moveMR(c, 4, a, 4, b);
|
moveMR(c, 4, a, 4, b);
|
||||||
moveRR(c, 4, b, 8, b);
|
moveRR(c, 4, b, 8, b);
|
||||||
@ -391,7 +391,7 @@ void moveMR(Context* c, unsigned aSize, lir::Memory* a,
|
|||||||
void moveRM(Context* c, unsigned aSize, lir::Register* a,
|
void moveRM(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Memory* b)
|
unsigned bSize UNUSED, lir::Memory* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
if (isFloatReg(a)) {
|
if (isFloatReg(a)) {
|
||||||
sseMoveRM(c, aSize, a, bSize, b);
|
sseMoveRM(c, aSize, a, bSize, b);
|
||||||
@ -445,7 +445,7 @@ void moveRM(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void moveAR(Context* c, unsigned aSize, lir::Address* a,
|
void moveAR(Context* c, unsigned aSize, lir::Address* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, vm::TargetBytesPerWord == 8 or (aSize == 4 and bSize == 4));
|
assertT(c, vm::TargetBytesPerWord == 8 or (aSize == 4 and bSize == 4));
|
||||||
|
|
||||||
lir::Constant constant(a->address);
|
lir::Constant constant(a->address);
|
||||||
lir::Memory memory(b->low, 0, -1, 0);
|
lir::Memory memory(b->low, 0, -1, 0);
|
||||||
@ -531,8 +531,8 @@ void moveZRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void moveZMR(Context* c, unsigned aSize UNUSED, lir::Memory* a,
|
void moveZMR(Context* c, unsigned aSize UNUSED, lir::Memory* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, bSize == vm::TargetBytesPerWord);
|
assertT(c, bSize == vm::TargetBytesPerWord);
|
||||||
assert(c, aSize == 2);
|
assertT(c, aSize == 2);
|
||||||
|
|
||||||
maybeRex(c, bSize, b, a);
|
maybeRex(c, bSize, b, a);
|
||||||
opcode(c, 0x0f, 0xb7);
|
opcode(c, 0x0f, 0xb7);
|
||||||
@ -542,7 +542,7 @@ void moveZMR(Context* c, unsigned aSize UNUSED, lir::Memory* a,
|
|||||||
void addCarryRR(Context* c, unsigned size, lir::Register* a,
|
void addCarryRR(Context* c, unsigned size, lir::Register* a,
|
||||||
lir::Register* b)
|
lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, vm::TargetBytesPerWord == 8 or size == 4);
|
assertT(c, vm::TargetBytesPerWord == 8 or size == 4);
|
||||||
|
|
||||||
maybeRex(c, size, a, b);
|
maybeRex(c, size, a, b);
|
||||||
opcode(c, 0x11);
|
opcode(c, 0x11);
|
||||||
@ -552,7 +552,7 @@ void addCarryRR(Context* c, unsigned size, lir::Register* a,
|
|||||||
void addRR(Context* c, unsigned aSize, lir::Register* a,
|
void addRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
||||||
lir::Register ah(a->high);
|
lir::Register ah(a->high);
|
||||||
@ -585,7 +585,7 @@ void addCarryCR(Context* c, unsigned size, lir::Constant* a,
|
|||||||
void addCR(Context* c, unsigned aSize, lir::Constant* a,
|
void addCR(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
if (v) {
|
if (v) {
|
||||||
@ -624,7 +624,7 @@ void addCR(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void subtractBorrowCR(Context* c, unsigned size UNUSED, lir::Constant* a,
|
void subtractBorrowCR(Context* c, unsigned size UNUSED, lir::Constant* a,
|
||||||
lir::Register* b)
|
lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, vm::TargetBytesPerWord == 8 or size == 4);
|
assertT(c, vm::TargetBytesPerWord == 8 or size == 4);
|
||||||
|
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
if (vm::fitsInInt8(v)) {
|
if (vm::fitsInInt8(v)) {
|
||||||
@ -639,7 +639,7 @@ void subtractBorrowCR(Context* c, unsigned size UNUSED, lir::Constant* a,
|
|||||||
void subtractCR(Context* c, unsigned aSize, lir::Constant* a,
|
void subtractCR(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
if (v) {
|
if (v) {
|
||||||
@ -678,7 +678,7 @@ void subtractCR(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void subtractBorrowRR(Context* c, unsigned size, lir::Register* a,
|
void subtractBorrowRR(Context* c, unsigned size, lir::Register* a,
|
||||||
lir::Register* b)
|
lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, vm::TargetBytesPerWord == 8 or size == 4);
|
assertT(c, vm::TargetBytesPerWord == 8 or size == 4);
|
||||||
|
|
||||||
maybeRex(c, size, a, b);
|
maybeRex(c, size, a, b);
|
||||||
opcode(c, 0x19);
|
opcode(c, 0x19);
|
||||||
@ -688,7 +688,7 @@ void subtractBorrowRR(Context* c, unsigned size, lir::Register* a,
|
|||||||
void subtractRR(Context* c, unsigned aSize, lir::Register* a,
|
void subtractRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
||||||
lir::Register ah(a->high);
|
lir::Register ah(a->high);
|
||||||
@ -706,7 +706,7 @@ void subtractRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void andRR(Context* c, unsigned aSize, lir::Register* a,
|
void andRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
||||||
@ -725,7 +725,7 @@ void andRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void andCR(Context* c, unsigned aSize, lir::Constant* a,
|
void andCR(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
|
|
||||||
@ -763,7 +763,7 @@ void andCR(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void orRR(Context* c, unsigned aSize, lir::Register* a,
|
void orRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
||||||
lir::Register ah(a->high);
|
lir::Register ah(a->high);
|
||||||
@ -781,7 +781,7 @@ void orRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void orCR(Context* c, unsigned aSize, lir::Constant* a,
|
void orCR(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
if (v) {
|
if (v) {
|
||||||
@ -836,7 +836,7 @@ void xorRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void xorCR(Context* c, unsigned aSize, lir::Constant* a,
|
void xorCR(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
if (v) {
|
if (v) {
|
||||||
@ -875,14 +875,14 @@ void xorCR(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void multiplyRR(Context* c, unsigned aSize, lir::Register* a,
|
void multiplyRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
||||||
assert(c, b->high == rdx);
|
assertT(c, b->high == rdx);
|
||||||
assert(c, b->low != rax);
|
assertT(c, b->low != rax);
|
||||||
assert(c, a->low != rax);
|
assertT(c, a->low != rax);
|
||||||
assert(c, a->high != rax);
|
assertT(c, a->high != rax);
|
||||||
|
|
||||||
c->client->save(rax);
|
c->client->save(rax);
|
||||||
|
|
||||||
@ -925,8 +925,8 @@ void multiplyRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void compareRR(Context* c, unsigned aSize, lir::Register* a,
|
void compareRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
assert(c, aSize <= vm::TargetBytesPerWord);
|
assertT(c, aSize <= vm::TargetBytesPerWord);
|
||||||
|
|
||||||
maybeRex(c, aSize, a, b);
|
maybeRex(c, aSize, a, b);
|
||||||
opcode(c, 0x39);
|
opcode(c, 0x39);
|
||||||
@ -936,8 +936,8 @@ void compareRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void compareCR(Context* c, unsigned aSize, lir::Constant* a,
|
void compareCR(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
assert(c, vm::TargetBytesPerWord == 8 or aSize == 4);
|
assertT(c, vm::TargetBytesPerWord == 8 or aSize == 4);
|
||||||
|
|
||||||
if (a->value->resolved() and vm::fitsInInt32(a->value->value())) {
|
if (a->value->resolved() and vm::fitsInInt32(a->value->value())) {
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
@ -960,8 +960,8 @@ void compareCR(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void compareRM(Context* c, unsigned aSize, lir::Register* a,
|
void compareRM(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Memory* b)
|
unsigned bSize UNUSED, lir::Memory* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
assert(c, vm::TargetBytesPerWord == 8 or aSize == 4);
|
assertT(c, vm::TargetBytesPerWord == 8 or aSize == 4);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 8 and aSize == 4) {
|
if (vm::TargetBytesPerWord == 8 and aSize == 4) {
|
||||||
moveRR(c, 4, a, 8, a);
|
moveRR(c, 4, a, 8, a);
|
||||||
@ -974,8 +974,8 @@ void compareRM(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void compareCM(Context* c, unsigned aSize, lir::Constant* a,
|
void compareCM(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Memory* b)
|
unsigned bSize, lir::Memory* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
assert(c, vm::TargetBytesPerWord == 8 or aSize == 4);
|
assertT(c, vm::TargetBytesPerWord == 8 or aSize == 4);
|
||||||
|
|
||||||
if (a->value->resolved()) {
|
if (a->value->resolved()) {
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
@ -1001,7 +1001,7 @@ void compareCM(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void compareFloatRR(Context* c, unsigned aSize, lir::Register* a,
|
void compareFloatRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
if (aSize == 8) {
|
if (aSize == 8) {
|
||||||
opcode(c, 0x66);
|
opcode(c, 0x66);
|
||||||
@ -1113,7 +1113,7 @@ void branchCR(Context* c, lir::TernaryOperation op, unsigned size,
|
|||||||
lir::Constant* a, lir::Register* b,
|
lir::Constant* a, lir::Register* b,
|
||||||
lir::Constant* target)
|
lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(c, not isFloatBranch(op));
|
assertT(c, not isFloatBranch(op));
|
||||||
|
|
||||||
if (size > vm::TargetBytesPerWord) {
|
if (size > vm::TargetBytesPerWord) {
|
||||||
int64_t v = a->value->value();
|
int64_t v = a->value->value();
|
||||||
@ -1137,8 +1137,8 @@ void branchRM(Context* c, lir::TernaryOperation op, unsigned size,
|
|||||||
lir::Register* a, lir::Memory* b,
|
lir::Register* a, lir::Memory* b,
|
||||||
lir::Constant* target)
|
lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(c, not isFloatBranch(op));
|
assertT(c, not isFloatBranch(op));
|
||||||
assert(c, size <= vm::TargetBytesPerWord);
|
assertT(c, size <= vm::TargetBytesPerWord);
|
||||||
|
|
||||||
compareRM(c, size, a, size, b);
|
compareRM(c, size, a, size, b);
|
||||||
branch(c, op, target);
|
branch(c, op, target);
|
||||||
@ -1148,8 +1148,8 @@ void branchCM(Context* c, lir::TernaryOperation op, unsigned size,
|
|||||||
lir::Constant* a, lir::Memory* b,
|
lir::Constant* a, lir::Memory* b,
|
||||||
lir::Constant* target)
|
lir::Constant* target)
|
||||||
{
|
{
|
||||||
assert(c, not isFloatBranch(op));
|
assertT(c, not isFloatBranch(op));
|
||||||
assert(c, size <= vm::TargetBytesPerWord);
|
assertT(c, size <= vm::TargetBytesPerWord);
|
||||||
|
|
||||||
compareCM(c, size, a, size, b);
|
compareCM(c, size, a, size, b);
|
||||||
branch(c, op, target);
|
branch(c, op, target);
|
||||||
@ -1158,7 +1158,7 @@ void branchCM(Context* c, lir::TernaryOperation op, unsigned size,
|
|||||||
void multiplyCR(Context* c, unsigned aSize, lir::Constant* a,
|
void multiplyCR(Context* c, unsigned aSize, lir::Constant* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
if (vm::TargetBytesPerWord == 4 and aSize == 8) {
|
||||||
const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
|
const uint32_t mask = GeneralRegisterMask & ~((1 << rax) | (1 << rdx));
|
||||||
@ -1197,10 +1197,10 @@ void multiplyCR(Context* c, unsigned aSize, lir::Constant* a,
|
|||||||
void divideRR(Context* c, unsigned aSize, lir::Register* a,
|
void divideRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b UNUSED)
|
unsigned bSize UNUSED, lir::Register* b UNUSED)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
assert(c, b->low == rax);
|
assertT(c, b->low == rax);
|
||||||
assert(c, a->low != rdx);
|
assertT(c, a->low != rdx);
|
||||||
|
|
||||||
c->client->save(rdx);
|
c->client->save(rdx);
|
||||||
|
|
||||||
@ -1213,10 +1213,10 @@ void divideRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void remainderRR(Context* c, unsigned aSize, lir::Register* a,
|
void remainderRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize);
|
assertT(c, aSize == bSize);
|
||||||
|
|
||||||
assert(c, b->low == rax);
|
assertT(c, b->low == rax);
|
||||||
assert(c, a->low != rdx);
|
assertT(c, a->low != rdx);
|
||||||
|
|
||||||
c->client->save(rdx);
|
c->client->save(rdx);
|
||||||
|
|
||||||
@ -1290,7 +1290,7 @@ void shiftLeftRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
|
|||||||
moveRR(c, 4, b, 4, &bh); // 2 bytes
|
moveRR(c, 4, b, 4, &bh); // 2 bytes
|
||||||
xorRR(c, 4, b, 4, b); // 2 bytes
|
xorRR(c, 4, b, 4, b); // 2 bytes
|
||||||
} else {
|
} else {
|
||||||
assert(c, a->low == rcx);
|
assertT(c, a->low == rcx);
|
||||||
|
|
||||||
maybeRex(c, bSize, a, b);
|
maybeRex(c, bSize, a, b);
|
||||||
opcode(c, 0xd3, 0xe0 + regCode(b));
|
opcode(c, 0xd3, 0xe0 + regCode(b));
|
||||||
@ -1338,7 +1338,7 @@ void shiftRightRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
|
|||||||
opcode(c, 0xc1, 0xf8 + b->high);
|
opcode(c, 0xc1, 0xf8 + b->high);
|
||||||
c->code.append(31);
|
c->code.append(31);
|
||||||
} else {
|
} else {
|
||||||
assert(c, a->low == rcx);
|
assertT(c, a->low == rcx);
|
||||||
|
|
||||||
maybeRex(c, bSize, a, b);
|
maybeRex(c, bSize, a, b);
|
||||||
opcode(c, 0xd3, 0xf8 + regCode(b));
|
opcode(c, 0xd3, 0xf8 + regCode(b));
|
||||||
@ -1383,7 +1383,7 @@ void unsignedShiftRightRR(Context* c, UNUSED unsigned aSize, lir::Register* a,
|
|||||||
moveRR(c, 4, &bh, 4, b); // 2 bytes
|
moveRR(c, 4, &bh, 4, b); // 2 bytes
|
||||||
xorRR(c, 4, &bh, 4, &bh); // 2 bytes
|
xorRR(c, 4, &bh, 4, &bh); // 2 bytes
|
||||||
} else {
|
} else {
|
||||||
assert(c, a->low == rcx);
|
assertT(c, a->low == rcx);
|
||||||
|
|
||||||
maybeRex(c, bSize, a, b);
|
maybeRex(c, bSize, a, b);
|
||||||
opcode(c, 0xd3, 0xe8 + regCode(b));
|
opcode(c, 0xd3, 0xe8 + regCode(b));
|
||||||
@ -1471,7 +1471,7 @@ void float2FloatMR(Context* c, unsigned aSize, lir::Memory* a,
|
|||||||
void float2IntRR(Context* c, unsigned aSize, lir::Register* a,
|
void float2IntRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize, lir::Register* b)
|
unsigned bSize, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, not isFloatReg(b));
|
assertT(c, not isFloatReg(b));
|
||||||
floatRegOp(c, aSize, a, bSize, b, 0x2c);
|
floatRegOp(c, aSize, a, bSize, b, 0x2c);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1496,10 +1496,10 @@ void int2FloatMR(Context* c, unsigned aSize, lir::Memory* a,
|
|||||||
void floatNegateRR(Context* c, unsigned aSize, lir::Register* a,
|
void floatNegateRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, isFloatReg(a) and isFloatReg(b));
|
assertT(c, isFloatReg(a) and isFloatReg(b));
|
||||||
// unlike most of the other floating point code, this does NOT
|
// unlike most of the other floating point code, this does NOT
|
||||||
// support doubles:
|
// support doubles:
|
||||||
assert(c, aSize == 4);
|
assertT(c, aSize == 4);
|
||||||
ResolvedPromise pcon(0x80000000);
|
ResolvedPromise pcon(0x80000000);
|
||||||
lir::Constant con(&pcon);
|
lir::Constant con(&pcon);
|
||||||
if (a->low == b->low) {
|
if (a->low == b->low) {
|
||||||
@ -1521,10 +1521,10 @@ void floatNegateRR(Context* c, unsigned aSize, lir::Register* a,
|
|||||||
void floatAbsoluteRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
|
void floatAbsoluteRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b)
|
unsigned bSize UNUSED, lir::Register* b)
|
||||||
{
|
{
|
||||||
assert(c, isFloatReg(a) and isFloatReg(b));
|
assertT(c, isFloatReg(a) and isFloatReg(b));
|
||||||
// unlike most of the other floating point code, this does NOT
|
// unlike most of the other floating point code, this does NOT
|
||||||
// support doubles:
|
// support doubles:
|
||||||
assert(c, aSize == 4);
|
assertT(c, aSize == 4);
|
||||||
ResolvedPromise pcon(0x7fffffff);
|
ResolvedPromise pcon(0x7fffffff);
|
||||||
lir::Constant con(&pcon);
|
lir::Constant con(&pcon);
|
||||||
if (a->low == b->low) {
|
if (a->low == b->low) {
|
||||||
@ -1545,7 +1545,7 @@ void floatAbsoluteRR(Context* c, unsigned aSize UNUSED, lir::Register* a,
|
|||||||
void absoluteRR(Context* c, unsigned aSize, lir::Register* a,
|
void absoluteRR(Context* c, unsigned aSize, lir::Register* a,
|
||||||
unsigned bSize UNUSED, lir::Register* b UNUSED)
|
unsigned bSize UNUSED, lir::Register* b UNUSED)
|
||||||
{
|
{
|
||||||
assert(c, aSize == bSize and a->low == rax and b->low == rax);
|
assertT(c, aSize == bSize and a->low == rax and b->low == rax);
|
||||||
lir::Register d
|
lir::Register d
|
||||||
(c->client->acquireTemporary(static_cast<uint64_t>(1) << rdx));
|
(c->client->acquireTemporary(static_cast<uint64_t>(1) << rdx));
|
||||||
maybeRex(c, aSize, a, b);
|
maybeRex(c, aSize, a, b);
|
||||||
|
212
src/compile.cpp
212
src/compile.cpp
@ -135,7 +135,7 @@ class MyThread: public Thread {
|
|||||||
}
|
}
|
||||||
|
|
||||||
~CallTrace() {
|
~CallTrace() {
|
||||||
assert(t, t->stack == 0);
|
assertT(t, t->stack == 0);
|
||||||
|
|
||||||
t->scratch = scratch;
|
t->scratch = scratch;
|
||||||
|
|
||||||
@ -225,7 +225,7 @@ class MyThread: public Thread {
|
|||||||
// interrupt us at any time and still get a consistent, accurate
|
// interrupt us at any time and still get a consistent, accurate
|
||||||
// stack trace. See MyProcessor::getStackTrace for details.
|
// stack trace. See MyProcessor::getStackTrace for details.
|
||||||
|
|
||||||
assert(t, t->transition == 0);
|
assertT(t, t->transition == 0);
|
||||||
|
|
||||||
Context c(t, ip, stack, continuation, trace);
|
Context c(t, ip, stack, continuation, trace);
|
||||||
|
|
||||||
@ -710,7 +710,7 @@ localOffset(MyThread* t, int v, GcMethod* method)
|
|||||||
+ parameterFootprint
|
+ parameterFootprint
|
||||||
- v - 1));
|
- v - 1));
|
||||||
|
|
||||||
assert(t, offset >= 0);
|
assertT(t, offset >= 0);
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -746,7 +746,7 @@ class PoolElement: public avian::codegen::Promise {
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
virtual int64_t value() {
|
virtual int64_t value() {
|
||||||
assert(t, resolved());
|
assertT(t, resolved());
|
||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -837,7 +837,7 @@ class TraceElementPromise: public avian::codegen::Promise {
|
|||||||
TraceElementPromise(System* s, TraceElement* trace): s(s), trace(trace) { }
|
TraceElementPromise(System* s, TraceElement* trace): s(s), trace(trace) { }
|
||||||
|
|
||||||
virtual int64_t value() {
|
virtual int64_t value() {
|
||||||
assert(s, resolved());
|
assertT(s, resolved());
|
||||||
return trace->address->value();
|
return trace->address->value();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -960,26 +960,26 @@ class Context {
|
|||||||
if (size == 8) {
|
if (size == 8) {
|
||||||
switch(op) {
|
switch(op) {
|
||||||
case avian::codegen::lir::Absolute:
|
case avian::codegen::lir::Absolute:
|
||||||
assert(t, resultSize == 8);
|
assertT(t, resultSize == 8);
|
||||||
return local::getThunk(t, absoluteLongThunk);
|
return local::getThunk(t, absoluteLongThunk);
|
||||||
|
|
||||||
case avian::codegen::lir::FloatNegate:
|
case avian::codegen::lir::FloatNegate:
|
||||||
assert(t, resultSize == 8);
|
assertT(t, resultSize == 8);
|
||||||
return local::getThunk(t, negateDoubleThunk);
|
return local::getThunk(t, negateDoubleThunk);
|
||||||
|
|
||||||
case avian::codegen::lir::FloatSquareRoot:
|
case avian::codegen::lir::FloatSquareRoot:
|
||||||
assert(t, resultSize == 8);
|
assertT(t, resultSize == 8);
|
||||||
return local::getThunk(t, squareRootDoubleThunk);
|
return local::getThunk(t, squareRootDoubleThunk);
|
||||||
|
|
||||||
case avian::codegen::lir::Float2Float:
|
case avian::codegen::lir::Float2Float:
|
||||||
assert(t, resultSize == 4);
|
assertT(t, resultSize == 4);
|
||||||
return local::getThunk(t, doubleToFloatThunk);
|
return local::getThunk(t, doubleToFloatThunk);
|
||||||
|
|
||||||
case avian::codegen::lir::Float2Int:
|
case avian::codegen::lir::Float2Int:
|
||||||
if (resultSize == 8) {
|
if (resultSize == 8) {
|
||||||
return local::getThunk(t, doubleToLongThunk);
|
return local::getThunk(t, doubleToLongThunk);
|
||||||
} else {
|
} else {
|
||||||
assert(t, resultSize == 4);
|
assertT(t, resultSize == 4);
|
||||||
return local::getThunk(t, doubleToIntThunk);
|
return local::getThunk(t, doubleToIntThunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -987,37 +987,37 @@ class Context {
|
|||||||
if (resultSize == 8) {
|
if (resultSize == 8) {
|
||||||
return local::getThunk(t, longToDoubleThunk);
|
return local::getThunk(t, longToDoubleThunk);
|
||||||
} else {
|
} else {
|
||||||
assert(t, resultSize == 4);
|
assertT(t, resultSize == 4);
|
||||||
return local::getThunk(t, longToFloatThunk);
|
return local::getThunk(t, longToFloatThunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
default: abort(t);
|
default: abort(t);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(t, size == 4);
|
assertT(t, size == 4);
|
||||||
|
|
||||||
switch(op) {
|
switch(op) {
|
||||||
case avian::codegen::lir::Absolute:
|
case avian::codegen::lir::Absolute:
|
||||||
assert(t, resultSize == 4);
|
assertT(t, resultSize == 4);
|
||||||
return local::getThunk(t, absoluteIntThunk);
|
return local::getThunk(t, absoluteIntThunk);
|
||||||
|
|
||||||
case avian::codegen::lir::FloatNegate:
|
case avian::codegen::lir::FloatNegate:
|
||||||
assert(t, resultSize == 4);
|
assertT(t, resultSize == 4);
|
||||||
return local::getThunk(t, negateFloatThunk);
|
return local::getThunk(t, negateFloatThunk);
|
||||||
|
|
||||||
case avian::codegen::lir::FloatAbsolute:
|
case avian::codegen::lir::FloatAbsolute:
|
||||||
assert(t, resultSize == 4);
|
assertT(t, resultSize == 4);
|
||||||
return local::getThunk(t, absoluteFloatThunk);
|
return local::getThunk(t, absoluteFloatThunk);
|
||||||
|
|
||||||
case avian::codegen::lir::Float2Float:
|
case avian::codegen::lir::Float2Float:
|
||||||
assert(t, resultSize == 8);
|
assertT(t, resultSize == 8);
|
||||||
return local::getThunk(t, floatToDoubleThunk);
|
return local::getThunk(t, floatToDoubleThunk);
|
||||||
|
|
||||||
case avian::codegen::lir::Float2Int:
|
case avian::codegen::lir::Float2Int:
|
||||||
if (resultSize == 4) {
|
if (resultSize == 4) {
|
||||||
return local::getThunk(t, floatToIntThunk);
|
return local::getThunk(t, floatToIntThunk);
|
||||||
} else {
|
} else {
|
||||||
assert(t, resultSize == 8);
|
assertT(t, resultSize == 8);
|
||||||
return local::getThunk(t, floatToLongThunk);
|
return local::getThunk(t, floatToLongThunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1025,7 +1025,7 @@ class Context {
|
|||||||
if (resultSize == 4) {
|
if (resultSize == 4) {
|
||||||
return local::getThunk(t, intToFloatThunk);
|
return local::getThunk(t, intToFloatThunk);
|
||||||
} else {
|
} else {
|
||||||
assert(t, resultSize == 8);
|
assertT(t, resultSize == 8);
|
||||||
return local::getThunk(t, intToDoubleThunk);
|
return local::getThunk(t, intToDoubleThunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1081,7 +1081,7 @@ class Context {
|
|||||||
default: abort(t);
|
default: abort(t);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(t, size == 4);
|
assertT(t, size == 4);
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case avian::codegen::lir::Divide:
|
case avian::codegen::lir::Divide:
|
||||||
*threadParameter = true;
|
*threadParameter = true;
|
||||||
@ -1268,7 +1268,7 @@ ir::Value* loadLocal(Context* context,
|
|||||||
ir::Value* result = context->compiler->loadLocal(
|
ir::Value* result = context->compiler->loadLocal(
|
||||||
type, translateLocalIndex(context, footprint, index));
|
type, translateLocalIndex(context, footprint, index));
|
||||||
|
|
||||||
assert(context->thread, type == result->type);
|
assertT(context->thread, type == result->type);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1278,7 +1278,7 @@ void storeLocal(Context* context,
|
|||||||
ir::Value* value,
|
ir::Value* value,
|
||||||
unsigned index)
|
unsigned index)
|
||||||
{
|
{
|
||||||
assert(context->thread, type == value->type);
|
assertT(context->thread, type == value->type);
|
||||||
context->compiler->storeLocal(value,
|
context->compiler->storeLocal(value,
|
||||||
translateLocalIndex(context, footprint, index));
|
translateLocalIndex(context, footprint, index));
|
||||||
}
|
}
|
||||||
@ -1432,7 +1432,7 @@ class Frame {
|
|||||||
|
|
||||||
void set(unsigned index, ir::Type type)
|
void set(unsigned index, ir::Type type)
|
||||||
{
|
{
|
||||||
assert(t, index < frameSize());
|
assertT(t, index < frameSize());
|
||||||
|
|
||||||
if (type == ir::Type::object()) {
|
if (type == ir::Type::object()) {
|
||||||
context->eventLog.append(MarkEvent);
|
context->eventLog.append(MarkEvent);
|
||||||
@ -1450,15 +1450,15 @@ class Frame {
|
|||||||
|
|
||||||
ir::Type get(unsigned index)
|
ir::Type get(unsigned index)
|
||||||
{
|
{
|
||||||
assert(t, index < frameSize());
|
assertT(t, index < frameSize());
|
||||||
int si = index - localSize();
|
int si = index - localSize();
|
||||||
assert(t, si >= 0);
|
assertT(t, si >= 0);
|
||||||
return stackMap[si];
|
return stackMap[si];
|
||||||
}
|
}
|
||||||
|
|
||||||
void popped(unsigned count) {
|
void popped(unsigned count) {
|
||||||
assert(t, sp >= count);
|
assertT(t, sp >= count);
|
||||||
assert(t, sp - count >= localSize());
|
assertT(t, sp - count >= localSize());
|
||||||
while (count) {
|
while (count) {
|
||||||
set(--sp, ir::Type::i4());
|
set(--sp, ir::Type::i4());
|
||||||
-- count;
|
-- count;
|
||||||
@ -1539,24 +1539,24 @@ class Frame {
|
|||||||
|
|
||||||
void push(ir::Type type, ir::Value* o)
|
void push(ir::Type type, ir::Value* o)
|
||||||
{
|
{
|
||||||
assert(t, type == o->type);
|
assertT(t, type == o->type);
|
||||||
c->push(o->type, o);
|
c->push(o->type, o);
|
||||||
assert(t, sp + 1 <= frameSize());
|
assertT(t, sp + 1 <= frameSize());
|
||||||
set(sp++, type);
|
set(sp++, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
void pushObject() {
|
void pushObject() {
|
||||||
c->pushed(ir::Type::object());
|
c->pushed(ir::Type::object());
|
||||||
|
|
||||||
assert(t, sp + 1 <= frameSize());
|
assertT(t, sp + 1 <= frameSize());
|
||||||
set(sp++, ir::Type::object());
|
set(sp++, ir::Type::object());
|
||||||
}
|
}
|
||||||
|
|
||||||
void pushLarge(ir::Type type, ir::Value* o)
|
void pushLarge(ir::Type type, ir::Value* o)
|
||||||
{
|
{
|
||||||
assert(t, o->type == type);
|
assertT(t, o->type == type);
|
||||||
c->push(type, o);
|
c->push(type, o);
|
||||||
assert(t, sp + 2 <= frameSize());
|
assertT(t, sp + 2 <= frameSize());
|
||||||
set(sp++, type);
|
set(sp++, type);
|
||||||
set(sp++, type);
|
set(sp++, type);
|
||||||
}
|
}
|
||||||
@ -1569,49 +1569,49 @@ class Frame {
|
|||||||
|
|
||||||
ir::Value* pop(ir::Type type)
|
ir::Value* pop(ir::Type type)
|
||||||
{
|
{
|
||||||
assert(t, sp >= 1);
|
assertT(t, sp >= 1);
|
||||||
assert(t, sp - 1 >= localSize());
|
assertT(t, sp - 1 >= localSize());
|
||||||
assert(t, get(sp - 1) == type);
|
assertT(t, get(sp - 1) == type);
|
||||||
set(--sp, ir::Type::i4());
|
set(--sp, ir::Type::i4());
|
||||||
return c->pop(type);
|
return c->pop(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
ir::Value* popLarge(ir::Type type)
|
ir::Value* popLarge(ir::Type type)
|
||||||
{
|
{
|
||||||
assert(t, sp >= 1);
|
assertT(t, sp >= 1);
|
||||||
assert(t, sp - 2 >= localSize());
|
assertT(t, sp - 2 >= localSize());
|
||||||
assert(t, get(sp - 1) == type);
|
assertT(t, get(sp - 1) == type);
|
||||||
assert(t, get(sp - 2) == type);
|
assertT(t, get(sp - 2) == type);
|
||||||
sp -= 2;
|
sp -= 2;
|
||||||
return c->pop(type);
|
return c->pop(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
void load(ir::Type type, unsigned index)
|
void load(ir::Type type, unsigned index)
|
||||||
{
|
{
|
||||||
assert(t, index < localSize());
|
assertT(t, index < localSize());
|
||||||
push(type, loadLocal(context, 1, type, index));
|
push(type, loadLocal(context, 1, type, index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void loadLarge(ir::Type type, unsigned index) {
|
void loadLarge(ir::Type type, unsigned index) {
|
||||||
assert(t, index < static_cast<unsigned>(localSize() - 1));
|
assertT(t, index < static_cast<unsigned>(localSize() - 1));
|
||||||
pushLarge(type, loadLocal(context, 2, type, index));
|
pushLarge(type, loadLocal(context, 2, type, index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void store(ir::Type type, unsigned index)
|
void store(ir::Type type, unsigned index)
|
||||||
{
|
{
|
||||||
assert(t,
|
assertT(t,
|
||||||
type == ir::Type::i4() || type == ir::Type::f4()
|
type == ir::Type::i4() || type == ir::Type::f4()
|
||||||
|| type == ir::Type::object());
|
|| type == ir::Type::object());
|
||||||
storeLocal(context, 1, type, pop(type), index);
|
storeLocal(context, 1, type, pop(type), index);
|
||||||
unsigned ti = translateLocalIndex(context, 1, index);
|
unsigned ti = translateLocalIndex(context, 1, index);
|
||||||
assert(t, ti < localSize());
|
assertT(t, ti < localSize());
|
||||||
set(ti, type);
|
set(ti, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
void storeLarge(ir::Type type, unsigned index) {
|
void storeLarge(ir::Type type, unsigned index) {
|
||||||
storeLocal(context, 2, type, popLarge(type), index);
|
storeLocal(context, 2, type, popLarge(type), index);
|
||||||
unsigned ti = translateLocalIndex(context, 2, index);
|
unsigned ti = translateLocalIndex(context, 2, index);
|
||||||
assert(t, ti + 1 < localSize());
|
assertT(t, ti + 1 < localSize());
|
||||||
set(ti, type);
|
set(ti, type);
|
||||||
set(ti + 1, type);
|
set(ti + 1, type);
|
||||||
}
|
}
|
||||||
@ -1619,8 +1619,8 @@ class Frame {
|
|||||||
void dup() {
|
void dup() {
|
||||||
c->push(ir::Type::i4(), c->peek(1, 0));
|
c->push(ir::Type::i4(), c->peek(1, 0));
|
||||||
|
|
||||||
assert(t, sp + 1 <= frameSize());
|
assertT(t, sp + 1 <= frameSize());
|
||||||
assert(t, sp - 1 >= localSize());
|
assertT(t, sp - 1 >= localSize());
|
||||||
set(sp, get(sp - 1));
|
set(sp, get(sp - 1));
|
||||||
++ sp;
|
++ sp;
|
||||||
}
|
}
|
||||||
@ -1633,8 +1633,8 @@ class Frame {
|
|||||||
c->push(ir::Type::i4(), s1);
|
c->push(ir::Type::i4(), s1);
|
||||||
c->push(ir::Type::i4(), s0);
|
c->push(ir::Type::i4(), s0);
|
||||||
|
|
||||||
assert(t, sp + 1 <= frameSize());
|
assertT(t, sp + 1 <= frameSize());
|
||||||
assert(t, sp - 2 >= localSize());
|
assertT(t, sp - 2 >= localSize());
|
||||||
|
|
||||||
ir::Type b2 = get(sp - 2);
|
ir::Type b2 = get(sp - 2);
|
||||||
ir::Type b1 = get(sp - 1);
|
ir::Type b1 = get(sp - 1);
|
||||||
@ -1665,8 +1665,8 @@ class Frame {
|
|||||||
c->push(ir::Type::i4(), s0);
|
c->push(ir::Type::i4(), s0);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, sp + 1 <= frameSize());
|
assertT(t, sp + 1 <= frameSize());
|
||||||
assert(t, sp - 3 >= localSize());
|
assertT(t, sp - 3 >= localSize());
|
||||||
|
|
||||||
ir::Type b3 = get(sp - 3);
|
ir::Type b3 = get(sp - 3);
|
||||||
ir::Type b2 = get(sp - 2);
|
ir::Type b2 = get(sp - 2);
|
||||||
@ -1693,8 +1693,8 @@ class Frame {
|
|||||||
c->push(ir::Type::i4(), s0);
|
c->push(ir::Type::i4(), s0);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, sp + 2 <= frameSize());
|
assertT(t, sp + 2 <= frameSize());
|
||||||
assert(t, sp - 2 >= localSize());
|
assertT(t, sp - 2 >= localSize());
|
||||||
|
|
||||||
ir::Type b2 = get(sp - 2);
|
ir::Type b2 = get(sp - 2);
|
||||||
ir::Type b1 = get(sp - 1);
|
ir::Type b1 = get(sp - 1);
|
||||||
@ -1725,8 +1725,8 @@ class Frame {
|
|||||||
c->push(ir::Type::i4(), s0);
|
c->push(ir::Type::i4(), s0);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, sp + 2 <= frameSize());
|
assertT(t, sp + 2 <= frameSize());
|
||||||
assert(t, sp - 3 >= localSize());
|
assertT(t, sp - 3 >= localSize());
|
||||||
|
|
||||||
ir::Type b3 = get(sp - 3);
|
ir::Type b3 = get(sp - 3);
|
||||||
ir::Type b2 = get(sp - 2);
|
ir::Type b2 = get(sp - 2);
|
||||||
@ -1774,8 +1774,8 @@ class Frame {
|
|||||||
c->push(ir::Type::i4(), s0);
|
c->push(ir::Type::i4(), s0);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, sp + 2 <= frameSize());
|
assertT(t, sp + 2 <= frameSize());
|
||||||
assert(t, sp - 4 >= localSize());
|
assertT(t, sp - 4 >= localSize());
|
||||||
|
|
||||||
ir::Type b4 = get(sp - 4);
|
ir::Type b4 = get(sp - 4);
|
||||||
ir::Type b3 = get(sp - 3);
|
ir::Type b3 = get(sp - 3);
|
||||||
@ -1799,7 +1799,7 @@ class Frame {
|
|||||||
c->push(ir::Type::i4(), s0);
|
c->push(ir::Type::i4(), s0);
|
||||||
c->push(ir::Type::i4(), s1);
|
c->push(ir::Type::i4(), s1);
|
||||||
|
|
||||||
assert(t, sp - 2 >= localSize());
|
assertT(t, sp - 2 >= localSize());
|
||||||
|
|
||||||
ir::Type saved = get(sp - 1);
|
ir::Type saved = get(sp - 1);
|
||||||
|
|
||||||
@ -3028,7 +3028,7 @@ useLongJump(MyThread* t, uintptr_t target)
|
|||||||
uintptr_t start = reinterpret_cast<uintptr_t>(a->memory.begin());
|
uintptr_t start = reinterpret_cast<uintptr_t>(a->memory.begin());
|
||||||
uintptr_t end = reinterpret_cast<uintptr_t>(a->memory.begin())
|
uintptr_t end = reinterpret_cast<uintptr_t>(a->memory.begin())
|
||||||
+ a->memory.count;
|
+ a->memory.count;
|
||||||
assert(t, end - start < reach);
|
assertT(t, end - start < reach);
|
||||||
|
|
||||||
return (target > end && (target - start) > reach)
|
return (target > end && (target - start) > reach)
|
||||||
or (target < start && (end - target) > reach);
|
or (target < start && (end - target) > reach);
|
||||||
@ -3315,7 +3315,7 @@ returnsNext(MyThread* t, object code, unsigned ip)
|
|||||||
case goto_: {
|
case goto_: {
|
||||||
uint32_t offset = codeReadInt16(t, code, ++ip);
|
uint32_t offset = codeReadInt16(t, code, ++ip);
|
||||||
uint32_t newIp = (ip - 3) + offset;
|
uint32_t newIp = (ip - 3) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
return returnsNext(t, code, newIp);
|
return returnsNext(t, code, newIp);
|
||||||
}
|
}
|
||||||
@ -3323,7 +3323,7 @@ returnsNext(MyThread* t, object code, unsigned ip)
|
|||||||
case goto_w: {
|
case goto_w: {
|
||||||
uint32_t offset = codeReadInt32(t, code, ++ip);
|
uint32_t offset = codeReadInt32(t, code, ++ip);
|
||||||
uint32_t newIp = (ip - 5) + offset;
|
uint32_t newIp = (ip - 5) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
return returnsNext(t, code, newIp);
|
return returnsNext(t, code, newIp);
|
||||||
}
|
}
|
||||||
@ -3417,7 +3417,7 @@ bool integerBranch(MyThread* t,
|
|||||||
unsigned instruction = codeBody(t, code, ip++);
|
unsigned instruction = codeBody(t, code, ip++);
|
||||||
uint32_t offset = codeReadInt16(t, code, ip);
|
uint32_t offset = codeReadInt16(t, code, ip);
|
||||||
uint32_t newIp = (ip - 3) + offset;
|
uint32_t newIp = (ip - 3) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
ir::Value* target = frame->machineIpValue(newIp);
|
ir::Value* target = frame->machineIpValue(newIp);
|
||||||
|
|
||||||
@ -3495,7 +3495,7 @@ bool floatBranch(MyThread* t,
|
|||||||
unsigned instruction = codeBody(t, code, ip++);
|
unsigned instruction = codeBody(t, code, ip++);
|
||||||
uint32_t offset = codeReadInt16(t, code, ip);
|
uint32_t offset = codeReadInt16(t, code, ip);
|
||||||
uint32_t newIp = (ip - 3) + offset;
|
uint32_t newIp = (ip - 3) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
ir::Value* target = frame->machineIpValue(newIp);
|
ir::Value* target = frame->machineIpValue(newIp);
|
||||||
|
|
||||||
@ -4611,7 +4611,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
case goto_: {
|
case goto_: {
|
||||||
uint32_t offset = codeReadInt16(t, code, ip);
|
uint32_t offset = codeReadInt16(t, code, ip);
|
||||||
uint32_t newIp = (ip - 3) + offset;
|
uint32_t newIp = (ip - 3) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
if(newIp <= ip) {
|
if(newIp <= ip) {
|
||||||
compileSafePoint(t, c, frame);
|
compileSafePoint(t, c, frame);
|
||||||
@ -4624,7 +4624,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
case goto_w: {
|
case goto_w: {
|
||||||
uint32_t offset = codeReadInt32(t, code, ip);
|
uint32_t offset = codeReadInt32(t, code, ip);
|
||||||
uint32_t newIp = (ip - 5) + offset;
|
uint32_t newIp = (ip - 5) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
if(newIp <= ip) {
|
if(newIp <= ip) {
|
||||||
compileSafePoint(t, c, frame);
|
compileSafePoint(t, c, frame);
|
||||||
@ -4738,7 +4738,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
case if_acmpne: {
|
case if_acmpne: {
|
||||||
uint32_t offset = codeReadInt16(t, code, ip);
|
uint32_t offset = codeReadInt16(t, code, ip);
|
||||||
newIp = (ip - 3) + offset;
|
newIp = (ip - 3) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
if(newIp <= ip) {
|
if(newIp <= ip) {
|
||||||
compileSafePoint(t, c, frame);
|
compileSafePoint(t, c, frame);
|
||||||
@ -4759,7 +4759,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
case if_icmple: {
|
case if_icmple: {
|
||||||
uint32_t offset = codeReadInt16(t, code, ip);
|
uint32_t offset = codeReadInt16(t, code, ip);
|
||||||
newIp = (ip - 3) + offset;
|
newIp = (ip - 3) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
if(newIp <= ip) {
|
if(newIp <= ip) {
|
||||||
compileSafePoint(t, c, frame);
|
compileSafePoint(t, c, frame);
|
||||||
@ -4780,7 +4780,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
case ifle: {
|
case ifle: {
|
||||||
uint32_t offset = codeReadInt16(t, code, ip);
|
uint32_t offset = codeReadInt16(t, code, ip);
|
||||||
newIp = (ip - 3) + offset;
|
newIp = (ip - 3) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
ir::Value* target = frame->machineIpValue(newIp);
|
ir::Value* target = frame->machineIpValue(newIp);
|
||||||
|
|
||||||
@ -4798,7 +4798,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
case ifnonnull: {
|
case ifnonnull: {
|
||||||
uint32_t offset = codeReadInt16(t, code, ip);
|
uint32_t offset = codeReadInt16(t, code, ip);
|
||||||
newIp = (ip - 3) + offset;
|
newIp = (ip - 3) + offset;
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
if(newIp <= ip) {
|
if(newIp <= ip) {
|
||||||
compileSafePoint(t, c, frame);
|
compileSafePoint(t, c, frame);
|
||||||
@ -5161,7 +5161,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
newIp = thisIp + offset;
|
newIp = thisIp + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
frame->startSubroutine(newIp, ip);
|
frame->startSubroutine(newIp, ip);
|
||||||
|
|
||||||
@ -5365,7 +5365,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
ir::Value* key = frame->pop(ir::Type::i4());
|
ir::Value* key = frame->pop(ir::Type::i4());
|
||||||
|
|
||||||
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
||||||
assert(t, defaultIp < codeLength(t, code));
|
assertT(t, defaultIp < codeLength(t, code));
|
||||||
|
|
||||||
int32_t pairCount = codeReadInt32(t, code, ip);
|
int32_t pairCount = codeReadInt32(t, code, ip);
|
||||||
|
|
||||||
@ -5380,7 +5380,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
unsigned index = ip + (i * 8);
|
unsigned index = ip + (i * 8);
|
||||||
int32_t key = codeReadInt32(t, code, index);
|
int32_t key = codeReadInt32(t, code, index);
|
||||||
uint32_t newIp = base + codeReadInt32(t, code, index);
|
uint32_t newIp = base + codeReadInt32(t, code, index);
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
ipTable[i] = newIp;
|
ipTable[i] = newIp;
|
||||||
|
|
||||||
@ -5390,7 +5390,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
}
|
}
|
||||||
c->poolAppendPromise(frame->addressPromise(frame->machineIp(newIp)));
|
c->poolAppendPromise(frame->addressPromise(frame->machineIp(newIp)));
|
||||||
}
|
}
|
||||||
assert(t, start);
|
assertT(t, start);
|
||||||
|
|
||||||
ir::Value* address = c->call(
|
ir::Value* address = c->call(
|
||||||
c->constant(getThunk(t, lookUpAddressThunk), ir::Type::iptr()),
|
c->constant(getThunk(t, lookUpAddressThunk), ir::Type::iptr()),
|
||||||
@ -5925,7 +5925,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
ip = (ip + 3) & ~3; // pad to four byte boundary
|
ip = (ip + 3) & ~3; // pad to four byte boundary
|
||||||
|
|
||||||
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
uint32_t defaultIp = base + codeReadInt32(t, code, ip);
|
||||||
assert(t, defaultIp < codeLength(t, code));
|
assertT(t, defaultIp < codeLength(t, code));
|
||||||
|
|
||||||
int32_t bottom = codeReadInt32(t, code, ip);
|
int32_t bottom = codeReadInt32(t, code, ip);
|
||||||
int32_t top = codeReadInt32(t, code, ip);
|
int32_t top = codeReadInt32(t, code, ip);
|
||||||
@ -5937,7 +5937,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
for (int32_t i = 0; i < top - bottom + 1; ++i) {
|
for (int32_t i = 0; i < top - bottom + 1; ++i) {
|
||||||
unsigned index = ip + (i * 4);
|
unsigned index = ip + (i * 4);
|
||||||
uint32_t newIp = base + codeReadInt32(t, code, index);
|
uint32_t newIp = base + codeReadInt32(t, code, index);
|
||||||
assert(t, newIp < codeLength(t, code));
|
assertT(t, newIp < codeLength(t, code));
|
||||||
|
|
||||||
ipTable[i] = newIp;
|
ipTable[i] = newIp;
|
||||||
|
|
||||||
@ -5947,7 +5947,7 @@ compile(MyThread* t, Frame* initialFrame, unsigned initialIp,
|
|||||||
start = p;
|
start = p;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(t, start);
|
assertT(t, start);
|
||||||
|
|
||||||
ir::Value* key = frame->pop(ir::Type::i4());
|
ir::Value* key = frame->pop(ir::Type::i4());
|
||||||
|
|
||||||
@ -6280,7 +6280,7 @@ object translateExceptionHandlerTable(MyThread* t,
|
|||||||
duplicatedBaseIp + exceptionHandlerEnd(oldHandler));
|
duplicatedBaseIp + exceptionHandlerEnd(oldHandler));
|
||||||
|
|
||||||
if (LIKELY(handlerStart >= 0)) {
|
if (LIKELY(handlerStart >= 0)) {
|
||||||
assert(
|
assertT(
|
||||||
t,
|
t,
|
||||||
handlerStart
|
handlerStart
|
||||||
< static_cast<int>(codeLength(t, context->method->code())
|
< static_cast<int>(codeLength(t, context->method->code())
|
||||||
@ -6291,8 +6291,8 @@ object translateExceptionHandlerTable(MyThread* t,
|
|||||||
duplicatedBaseIp + exceptionHandlerEnd(oldHandler),
|
duplicatedBaseIp + exceptionHandlerEnd(oldHandler),
|
||||||
duplicatedBaseIp + exceptionHandlerStart(oldHandler));
|
duplicatedBaseIp + exceptionHandlerStart(oldHandler));
|
||||||
|
|
||||||
assert(t, handlerEnd >= 0);
|
assertT(t, handlerEnd >= 0);
|
||||||
assert(t,
|
assertT(t,
|
||||||
handlerEnd <= static_cast<int>(
|
handlerEnd <= static_cast<int>(
|
||||||
codeLength(t, context->method->code())
|
codeLength(t, context->method->code())
|
||||||
* (context->subroutineCount + 1)));
|
* (context->subroutineCount + 1)));
|
||||||
@ -6486,7 +6486,7 @@ unsigned calculateFrameMaps(MyThread* t,
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(context->thread, ip * mapSize <= context->rootTable.count);
|
assertT(context->thread, ip * mapSize <= context->rootTable.count);
|
||||||
uintptr_t* tableRoots = context->rootTable.begin() + (ip * mapSize);
|
uintptr_t* tableRoots = context->rootTable.begin() + (ip * mapSize);
|
||||||
|
|
||||||
if (context->visitTable[ip] > 1) {
|
if (context->visitTable[ip] > 1) {
|
||||||
@ -6718,7 +6718,7 @@ makeSimpleFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
|||||||
object table = reinterpret_cast<object>(makeIntArray
|
object table = reinterpret_cast<object>(makeIntArray
|
||||||
(t, elementCount + ceilingDivide(elementCount * mapSize, 32)));
|
(t, elementCount + ceilingDivide(elementCount * mapSize, 32)));
|
||||||
|
|
||||||
assert(t, intArrayLength(t, table) == elementCount
|
assertT(t, intArrayLength(t, table) == elementCount
|
||||||
+ simpleFrameMapTableSize(t, context->method, table));
|
+ simpleFrameMapTableSize(t, context->method, table));
|
||||||
|
|
||||||
for (unsigned i = 0; i < elementCount; ++i) {
|
for (unsigned i = 0; i < elementCount; ++i) {
|
||||||
@ -6727,7 +6727,7 @@ makeSimpleFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
|||||||
intArrayBody(t, table, i) = static_cast<intptr_t>(p->address->value())
|
intArrayBody(t, table, i) = static_cast<intptr_t>(p->address->value())
|
||||||
- reinterpret_cast<intptr_t>(start);
|
- reinterpret_cast<intptr_t>(start);
|
||||||
|
|
||||||
assert(t, elementCount + ceilingDivide((i + 1) * mapSize, 32)
|
assertT(t, elementCount + ceilingDivide((i + 1) * mapSize, 32)
|
||||||
<= intArrayLength(t, table));
|
<= intArrayLength(t, table));
|
||||||
|
|
||||||
if (mapSize) {
|
if (mapSize) {
|
||||||
@ -6857,7 +6857,7 @@ finish(MyThread* t, FixedAllocator* allocator, Context* context)
|
|||||||
// unsigned pathFootprint = 0;
|
// unsigned pathFootprint = 0;
|
||||||
// unsigned mapCount = 0;
|
// unsigned mapCount = 0;
|
||||||
for (TraceElement* p = context->traceLog; p; p = p->next) {
|
for (TraceElement* p = context->traceLog; p; p = p->next) {
|
||||||
assert(t, index < context->traceLogCount);
|
assertT(t, index < context->traceLogCount);
|
||||||
|
|
||||||
if (p->address) {
|
if (p->address) {
|
||||||
|
|
||||||
@ -7325,7 +7325,7 @@ invokeNative(MyThread* t)
|
|||||||
t->trace->nativeMethod = target;
|
t->trace->nativeMethod = target;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, t->tailAddress == 0);
|
assertT(t, t->tailAddress == 0);
|
||||||
|
|
||||||
uint64_t result = 0;
|
uint64_t result = 0;
|
||||||
|
|
||||||
@ -7609,7 +7609,7 @@ void
|
|||||||
callContinuation(MyThread* t, object continuation, object result,
|
callContinuation(MyThread* t, object continuation, object result,
|
||||||
object exception, void* ip, void* stack)
|
object exception, void* ip, void* stack)
|
||||||
{
|
{
|
||||||
assert(t, t->exception == 0);
|
assertT(t, t->exception == 0);
|
||||||
|
|
||||||
if (exception) {
|
if (exception) {
|
||||||
t->exception = exception;
|
t->exception = exception;
|
||||||
@ -7651,8 +7651,8 @@ returnClass(MyThread* t, GcMethod* method)
|
|||||||
name = reinterpret_cast<object>(makeByteArray(t, length + 1));
|
name = reinterpret_cast<object>(makeByteArray(t, length + 1));
|
||||||
memcpy(&byteArrayBody(t, name, 0), spec, length);
|
memcpy(&byteArrayBody(t, name, 0), spec, length);
|
||||||
} else {
|
} else {
|
||||||
assert(t, *spec == 'L');
|
assertT(t, *spec == 'L');
|
||||||
assert(t, spec[length - 1] == ';');
|
assertT(t, spec[length - 1] == ';');
|
||||||
name = reinterpret_cast<object>(makeByteArray(t, length - 1));
|
name = reinterpret_cast<object>(makeByteArray(t, length - 1));
|
||||||
memcpy(&byteArrayBody(t, name, 0), spec + 1, length - 2);
|
memcpy(&byteArrayBody(t, name, 0), spec + 1, length - 2);
|
||||||
}
|
}
|
||||||
@ -7703,7 +7703,7 @@ jumpAndInvoke(MyThread* t, GcMethod* method, void* stack, ...)
|
|||||||
}
|
}
|
||||||
va_end(a);
|
va_end(a);
|
||||||
|
|
||||||
assert(t, t->exception == 0);
|
assertT(t, t->exception == 0);
|
||||||
|
|
||||||
popResources(t);
|
popResources(t);
|
||||||
|
|
||||||
@ -8030,7 +8030,7 @@ class ArgumentList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void addObject(object v) {
|
void addObject(object v) {
|
||||||
assert(t, position < size);
|
assertT(t, position < size);
|
||||||
|
|
||||||
array[position] = reinterpret_cast<uintptr_t>(v);
|
array[position] = reinterpret_cast<uintptr_t>(v);
|
||||||
objectMask[position] = true;
|
objectMask[position] = true;
|
||||||
@ -8038,7 +8038,7 @@ class ArgumentList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void addInt(uintptr_t v) {
|
void addInt(uintptr_t v) {
|
||||||
assert(t, position < size);
|
assertT(t, position < size);
|
||||||
|
|
||||||
array[position] = v;
|
array[position] = v;
|
||||||
objectMask[position] = false;
|
objectMask[position] = false;
|
||||||
@ -8046,7 +8046,7 @@ class ArgumentList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void addLong(uint64_t v) {
|
void addLong(uint64_t v) {
|
||||||
assert(t, position < size - 1);
|
assertT(t, position < size - 1);
|
||||||
|
|
||||||
memcpy(array + position, &v, 8);
|
memcpy(array + position, &v, 8);
|
||||||
|
|
||||||
@ -8114,7 +8114,7 @@ invoke(Thread* thread, GcMethod* method, ArgumentList* arguments)
|
|||||||
|
|
||||||
MyCheckpoint checkpoint(t);
|
MyCheckpoint checkpoint(t);
|
||||||
|
|
||||||
assert(t, arguments->position == arguments->size);
|
assertT(t, arguments->position == arguments->size);
|
||||||
|
|
||||||
result = vmInvoke
|
result = vmInvoke
|
||||||
(t, reinterpret_cast<void*>(methodAddress(t, method)),
|
(t, reinterpret_cast<void*>(methodAddress(t, method)),
|
||||||
@ -8616,12 +8616,12 @@ class MyProcessor: public Processor {
|
|||||||
virtual object
|
virtual object
|
||||||
invokeArray(Thread* t, GcMethod* method, object this_, object arguments)
|
invokeArray(Thread* t, GcMethod* method, object this_, object arguments)
|
||||||
{
|
{
|
||||||
assert(t, t->exception == 0);
|
assertT(t, t->exception == 0);
|
||||||
|
|
||||||
assert(t, t->state == Thread::ActiveState
|
assertT(t, t->state == Thread::ActiveState
|
||||||
or t->state == Thread::ExclusiveState);
|
or t->state == Thread::ExclusiveState);
|
||||||
|
|
||||||
assert(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
||||||
|
|
||||||
method = findMethod(t, method, this_);
|
method = findMethod(t, method, this_);
|
||||||
|
|
||||||
@ -8646,12 +8646,12 @@ class MyProcessor: public Processor {
|
|||||||
virtual object
|
virtual object
|
||||||
invokeArray(Thread* t, GcMethod* method, object this_, const jvalue* arguments)
|
invokeArray(Thread* t, GcMethod* method, object this_, const jvalue* arguments)
|
||||||
{
|
{
|
||||||
assert(t, t->exception == 0);
|
assertT(t, t->exception == 0);
|
||||||
|
|
||||||
assert(t, t->state == Thread::ActiveState
|
assertT(t, t->state == Thread::ActiveState
|
||||||
or t->state == Thread::ExclusiveState);
|
or t->state == Thread::ExclusiveState);
|
||||||
|
|
||||||
assert(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
||||||
|
|
||||||
method = findMethod(t, method, this_);
|
method = findMethod(t, method, this_);
|
||||||
|
|
||||||
@ -8677,12 +8677,12 @@ class MyProcessor: public Processor {
|
|||||||
invokeList(Thread* t, GcMethod* method, object this_, bool indirectObjects,
|
invokeList(Thread* t, GcMethod* method, object this_, bool indirectObjects,
|
||||||
va_list arguments)
|
va_list arguments)
|
||||||
{
|
{
|
||||||
assert(t, t->exception == 0);
|
assertT(t, t->exception == 0);
|
||||||
|
|
||||||
assert(t, t->state == Thread::ActiveState
|
assertT(t, t->state == Thread::ActiveState
|
||||||
or t->state == Thread::ExclusiveState);
|
or t->state == Thread::ExclusiveState);
|
||||||
|
|
||||||
assert(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
||||||
|
|
||||||
method = findMethod(t, method, this_);
|
method = findMethod(t, method, this_);
|
||||||
|
|
||||||
@ -8709,9 +8709,9 @@ class MyProcessor: public Processor {
|
|||||||
const char* methodName, const char* methodSpec,
|
const char* methodName, const char* methodSpec,
|
||||||
object this_, va_list arguments)
|
object this_, va_list arguments)
|
||||||
{
|
{
|
||||||
assert(t, t->exception == 0);
|
assertT(t, t->exception == 0);
|
||||||
|
|
||||||
assert(t, t->state == Thread::ActiveState
|
assertT(t, t->state == Thread::ActiveState
|
||||||
or t->state == Thread::ExclusiveState);
|
or t->state == Thread::ExclusiveState);
|
||||||
|
|
||||||
unsigned size = parameterFootprint(t, methodSpec, this_ == 0);
|
unsigned size = parameterFootprint(t, methodSpec, this_ == 0);
|
||||||
@ -8724,7 +8724,7 @@ class MyProcessor: public Processor {
|
|||||||
GcMethod* method = resolveMethod
|
GcMethod* method = resolveMethod
|
||||||
(t, loader, className, methodName, methodSpec);
|
(t, loader, className, methodName, methodSpec);
|
||||||
|
|
||||||
assert(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
||||||
|
|
||||||
PROTECT(t, method);
|
PROTECT(t, method);
|
||||||
|
|
||||||
@ -9398,7 +9398,7 @@ fixupHeap(MyThread* t UNUSED, uintptr_t* map, unsigned size, uintptr_t* heap)
|
|||||||
unsigned index = indexOf(word, bit);
|
unsigned index = indexOf(word, bit);
|
||||||
|
|
||||||
uintptr_t* p = heap + index;
|
uintptr_t* p = heap + index;
|
||||||
assert(t, *p);
|
assertT(t, *p);
|
||||||
|
|
||||||
uintptr_t number = *p & BootMask;
|
uintptr_t number = *p & BootMask;
|
||||||
uintptr_t mark = *p >> BootShift;
|
uintptr_t mark = *p >> BootShift;
|
||||||
@ -9472,7 +9472,7 @@ fixupMethods(Thread* t, GcHashMap* map, BootImage* image UNUSED, uint8_t* code)
|
|||||||
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
|
for (unsigned i = 0; i < arrayLength(t, classMethodTable(t, c)); ++i) {
|
||||||
GcMethod* method = cast<GcMethod>(t, arrayBody(t, classMethodTable(t, c), i));
|
GcMethod* method = cast<GcMethod>(t, arrayBody(t, classMethodTable(t, c), i));
|
||||||
if (method->code()) {
|
if (method->code()) {
|
||||||
assert(t, methodCompiled(t, method)
|
assertT(t, methodCompiled(t, method)
|
||||||
<= static_cast<int32_t>(image->codeSize));
|
<= static_cast<int32_t>(image->codeSize));
|
||||||
|
|
||||||
codeCompiled(t, method->code())
|
codeCompiled(t, method->code())
|
||||||
@ -9536,7 +9536,7 @@ fixupVirtualThunks(MyThread* t, uint8_t* code)
|
|||||||
void
|
void
|
||||||
boot(MyThread* t, BootImage* image, uint8_t* code)
|
boot(MyThread* t, BootImage* image, uint8_t* code)
|
||||||
{
|
{
|
||||||
assert(t, image->magic == BootImage::Magic);
|
assertT(t, image->magic == BootImage::Magic);
|
||||||
|
|
||||||
unsigned* bootClassTable = reinterpret_cast<unsigned*>(image + 1);
|
unsigned* bootClassTable = reinterpret_cast<unsigned*>(image + 1);
|
||||||
unsigned* appClassTable = bootClassTable + image->bootClassCount;
|
unsigned* appClassTable = bootClassTable + image->bootClassCount;
|
||||||
@ -10014,7 +10014,7 @@ compile(MyThread* t, FixedAllocator* allocator, BootContext* bootContext,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, (method->flags() & ACC_NATIVE) == 0);
|
assertT(t, (method->flags() & ACC_NATIVE) == 0);
|
||||||
|
|
||||||
// We must avoid acquiring any locks until after the first pass of
|
// We must avoid acquiring any locks until after the first pass of
|
||||||
// compilation, since this pass may trigger classloading operations
|
// compilation, since this pass may trigger classloading operations
|
||||||
|
@ -115,9 +115,9 @@ class Segment {
|
|||||||
Iterator(Map* map, unsigned start, unsigned end):
|
Iterator(Map* map, unsigned start, unsigned end):
|
||||||
map(map)
|
map(map)
|
||||||
{
|
{
|
||||||
assert(map->segment->context, map->bitsPerRecord == 1);
|
assertT(map->segment->context, map->bitsPerRecord == 1);
|
||||||
assert(map->segment->context, map->segment);
|
assertT(map->segment->context, map->segment);
|
||||||
assert(map->segment->context, start <= map->segment->position());
|
assertT(map->segment->context, start <= map->segment->position());
|
||||||
|
|
||||||
if (end > map->segment->position()) end = map->segment->position();
|
if (end > map->segment->position()) end = map->segment->position();
|
||||||
|
|
||||||
@ -159,8 +159,8 @@ class Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsigned next() {
|
unsigned next() {
|
||||||
assert(map->segment->context, hasMore());
|
assertT(map->segment->context, hasMore());
|
||||||
assert(map->segment->context, map->segment);
|
assertT(map->segment->context, map->segment);
|
||||||
|
|
||||||
return (index++) * map->scale;
|
return (index++) * map->scale;
|
||||||
}
|
}
|
||||||
@ -194,9 +194,9 @@ class Segment {
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
void init() {
|
void init() {
|
||||||
assert(segment->context, bitsPerRecord);
|
assertT(segment->context, bitsPerRecord);
|
||||||
assert(segment->context, scale);
|
assertT(segment->context, scale);
|
||||||
assert(segment->context, powerOfTwo(scale));
|
assertT(segment->context, powerOfTwo(scale));
|
||||||
|
|
||||||
if (data == 0) {
|
if (data == 0) {
|
||||||
data = segment->data + segment->capacity()
|
data = segment->data + segment->capacity()
|
||||||
@ -223,7 +223,7 @@ class Segment {
|
|||||||
{
|
{
|
||||||
unsigned result
|
unsigned result
|
||||||
= ceilingDivide(ceilingDivide(capacity, scale) * bitsPerRecord, BitsPerWord);
|
= ceilingDivide(ceilingDivide(capacity, scale) * bitsPerRecord, BitsPerWord);
|
||||||
assert(c, result);
|
assertT(c, result);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,8 +242,8 @@ class Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void replaceWith(Map* m) {
|
void replaceWith(Map* m) {
|
||||||
assert(segment->context, bitsPerRecord == m->bitsPerRecord);
|
assertT(segment->context, bitsPerRecord == m->bitsPerRecord);
|
||||||
assert(segment->context, scale == m->scale);
|
assertT(segment->context, scale == m->scale);
|
||||||
|
|
||||||
data = m->data;
|
data = m->data;
|
||||||
|
|
||||||
@ -258,19 +258,19 @@ class Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsigned indexOf(void* p) {
|
unsigned indexOf(void* p) {
|
||||||
assert(segment->context, segment->almostContains(p));
|
assertT(segment->context, segment->almostContains(p));
|
||||||
assert(segment->context, segment->capacity());
|
assertT(segment->context, segment->capacity());
|
||||||
return indexOf(segment->indexOf(p));
|
return indexOf(segment->indexOf(p));
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearBit(unsigned i) {
|
void clearBit(unsigned i) {
|
||||||
assert(segment->context, wordOf(i) < size());
|
assertT(segment->context, wordOf(i) < size());
|
||||||
|
|
||||||
vm::clearBit(data, i);
|
vm::clearBit(data, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
void setBit(unsigned i) {
|
void setBit(unsigned i) {
|
||||||
assert(segment->context, wordOf(i) < size());
|
assertT(segment->context, wordOf(i) < size());
|
||||||
|
|
||||||
vm::markBit(data, i);
|
vm::markBit(data, i);
|
||||||
}
|
}
|
||||||
@ -306,15 +306,15 @@ class Segment {
|
|||||||
|
|
||||||
void set(void* p, unsigned v = 1) {
|
void set(void* p, unsigned v = 1) {
|
||||||
setOnly(p, v);
|
setOnly(p, v);
|
||||||
assert(segment->context, get(p) == v);
|
assertT(segment->context, get(p) == v);
|
||||||
if (child) child->set(p, v);
|
if (child) child->set(p, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef USE_ATOMIC_OPERATIONS
|
#ifdef USE_ATOMIC_OPERATIONS
|
||||||
void markAtomic(void* p) {
|
void markAtomic(void* p) {
|
||||||
assert(segment->context, bitsPerRecord == 1);
|
assertT(segment->context, bitsPerRecord == 1);
|
||||||
markBitAtomic(data, indexOf(p));
|
markBitAtomic(data, indexOf(p));
|
||||||
assert(segment->context, getBit(data, indexOf(p)));
|
assertT(segment->context, getBit(data, indexOf(p)));
|
||||||
if (child) child->markAtomic(p);
|
if (child) child->markAtomic(p);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -343,7 +343,7 @@ class Segment {
|
|||||||
minimum = 1;
|
minimum = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(context, desired >= minimum);
|
assertT(context, desired >= minimum);
|
||||||
|
|
||||||
capacity_ = desired;
|
capacity_ = desired;
|
||||||
|
|
||||||
@ -453,7 +453,7 @@ class Segment {
|
|||||||
abort(context);
|
abort(context);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(context, map == 0);
|
assertT(context, map == 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -466,18 +466,18 @@ class Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void* get(unsigned offset) {
|
void* get(unsigned offset) {
|
||||||
assert(context, offset <= position());
|
assertT(context, offset <= position());
|
||||||
return data + offset;
|
return data + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned indexOf(void* p) {
|
unsigned indexOf(void* p) {
|
||||||
assert(context, almostContains(p));
|
assertT(context, almostContains(p));
|
||||||
return static_cast<uintptr_t*>(p) - data;
|
return static_cast<uintptr_t*>(p) - data;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* allocate(unsigned size) {
|
void* allocate(unsigned size) {
|
||||||
assert(context, size);
|
assertT(context, size);
|
||||||
assert(context, position() + size <= capacity());
|
assertT(context, position() + size <= capacity());
|
||||||
|
|
||||||
void* p = data + position();
|
void* p = data + position();
|
||||||
position_ += size;
|
position_ += size;
|
||||||
@ -520,8 +520,8 @@ class Fixie {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void add(Context* c UNUSED, Fixie** handle) {
|
void add(Context* c UNUSED, Fixie** handle) {
|
||||||
assert(c, this->handle == 0);
|
assertT(c, this->handle == 0);
|
||||||
assert(c, next == 0);
|
assertT(c, next == 0);
|
||||||
|
|
||||||
this->handle = handle;
|
this->handle = handle;
|
||||||
if (handle) {
|
if (handle) {
|
||||||
@ -535,7 +535,7 @@ class Fixie {
|
|||||||
|
|
||||||
void remove(Context* c UNUSED) {
|
void remove(Context* c UNUSED) {
|
||||||
if (handle) {
|
if (handle) {
|
||||||
assert(c, *handle == this);
|
assertT(c, *handle == this);
|
||||||
*handle = next;
|
*handle = next;
|
||||||
}
|
}
|
||||||
if (next) {
|
if (next) {
|
||||||
@ -878,21 +878,21 @@ wasCollected(Context* c, void* o)
|
|||||||
inline void*
|
inline void*
|
||||||
follow(Context* c UNUSED, void* o)
|
follow(Context* c UNUSED, void* o)
|
||||||
{
|
{
|
||||||
assert(c, wasCollected(c, o));
|
assertT(c, wasCollected(c, o));
|
||||||
return fieldAtOffset<void*>(o, 0);
|
return fieldAtOffset<void*>(o, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void*&
|
inline void*&
|
||||||
parent(Context* c UNUSED, void* o)
|
parent(Context* c UNUSED, void* o)
|
||||||
{
|
{
|
||||||
assert(c, wasCollected(c, o));
|
assertT(c, wasCollected(c, o));
|
||||||
return fieldAtOffset<void*>(o, BytesPerWord);
|
return fieldAtOffset<void*>(o, BytesPerWord);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline uintptr_t*
|
inline uintptr_t*
|
||||||
bitset(Context* c UNUSED, void* o)
|
bitset(Context* c UNUSED, void* o)
|
||||||
{
|
{
|
||||||
assert(c, wasCollected(c, o));
|
assertT(c, wasCollected(c, o));
|
||||||
return &fieldAtOffset<uintptr_t>(o, BytesPerWord * 2);
|
return &fieldAtOffset<uintptr_t>(o, BytesPerWord * 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -939,7 +939,7 @@ kill(Fixie* fixies)
|
|||||||
void
|
void
|
||||||
killFixies(Context* c)
|
killFixies(Context* c)
|
||||||
{
|
{
|
||||||
assert(c, c->markedFixies == 0);
|
assertT(c, c->markedFixies == 0);
|
||||||
|
|
||||||
if (c->mode == Heap::MajorCollection) {
|
if (c->mode == Heap::MajorCollection) {
|
||||||
kill(c->tenuredFixies);
|
kill(c->tenuredFixies);
|
||||||
@ -951,7 +951,7 @@ killFixies(Context* c)
|
|||||||
void
|
void
|
||||||
sweepFixies(Context* c)
|
sweepFixies(Context* c)
|
||||||
{
|
{
|
||||||
assert(c, c->markedFixies == 0);
|
assertT(c, c->markedFixies == 0);
|
||||||
|
|
||||||
if (c->mode == Heap::MajorCollection) {
|
if (c->mode == Heap::MajorCollection) {
|
||||||
free(c, &(c->tenuredFixies));
|
free(c, &(c->tenuredFixies));
|
||||||
@ -1007,7 +1007,7 @@ sweepFixies(Context* c)
|
|||||||
inline void*
|
inline void*
|
||||||
copyTo(Context* c, Segment* s, void* o, unsigned size)
|
copyTo(Context* c, Segment* s, void* o, unsigned size)
|
||||||
{
|
{
|
||||||
assert(c, s->remaining() >= size);
|
assertT(c, s->remaining() >= size);
|
||||||
void* dst = s->allocate(size);
|
void* dst = s->allocate(size);
|
||||||
c->client->copy(o, dst);
|
c->client->copy(o, dst);
|
||||||
return dst;
|
return dst;
|
||||||
@ -1025,14 +1025,14 @@ copy2(Context* c, void* o)
|
|||||||
unsigned size = c->client->copiedSizeInWords(o);
|
unsigned size = c->client->copiedSizeInWords(o);
|
||||||
|
|
||||||
if (c->gen2.contains(o)) {
|
if (c->gen2.contains(o)) {
|
||||||
assert(c, c->mode == Heap::MajorCollection);
|
assertT(c, c->mode == Heap::MajorCollection);
|
||||||
|
|
||||||
return copyTo(c, &(c->nextGen2), o, size);
|
return copyTo(c, &(c->nextGen2), o, size);
|
||||||
} else if (c->gen1.contains(o)) {
|
} else if (c->gen1.contains(o)) {
|
||||||
unsigned age = c->ageMap.get(o);
|
unsigned age = c->ageMap.get(o);
|
||||||
if (age == TenureThreshold) {
|
if (age == TenureThreshold) {
|
||||||
if (c->mode == Heap::MinorCollection) {
|
if (c->mode == Heap::MinorCollection) {
|
||||||
assert(c, c->gen2.remaining() >= size);
|
assertT(c, c->gen2.remaining() >= size);
|
||||||
|
|
||||||
if (c->gen2Base == Top) {
|
if (c->gen2Base == Top) {
|
||||||
c->gen2Base = c->gen2.position();
|
c->gen2Base = c->gen2.position();
|
||||||
@ -1053,9 +1053,9 @@ copy2(Context* c, void* o)
|
|||||||
return o;
|
return o;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(c, not c->nextGen1.contains(o));
|
assertT(c, not c->nextGen1.contains(o));
|
||||||
assert(c, not c->nextGen2.contains(o));
|
assertT(c, not c->nextGen2.contains(o));
|
||||||
assert(c, not immortalHeapContains(c, o));
|
assertT(c, not immortalHeapContains(c, o));
|
||||||
|
|
||||||
o = copyTo(c, &(c->nextGen1), o, size);
|
o = copyTo(c, &(c->nextGen1), o, size);
|
||||||
|
|
||||||
@ -1171,7 +1171,7 @@ updateHeapMap(Context* c, void* p, void* target, unsigned offset, void* result)
|
|||||||
{
|
{
|
||||||
if (target and c->client->isFixed(target)) {
|
if (target and c->client->isFixed(target)) {
|
||||||
Fixie* f = fixie(target);
|
Fixie* f = fixie(target);
|
||||||
assert(c, offset == 0 or f->hasMask());
|
assertT(c, offset == 0 or f->hasMask());
|
||||||
|
|
||||||
if (static_cast<unsigned>(f->age + 1) >= FixieTenureThreshold) {
|
if (static_cast<unsigned>(f->age + 1) >= FixieTenureThreshold) {
|
||||||
if (DebugFixies) {
|
if (DebugFixies) {
|
||||||
@ -1280,7 +1280,7 @@ unsigned
|
|||||||
bitsetNext(Context* c, uintptr_t* p)
|
bitsetNext(Context* c, uintptr_t* p)
|
||||||
{
|
{
|
||||||
bool more UNUSED = bitsetHasMore(p);
|
bool more UNUSED = bitsetHasMore(p);
|
||||||
assert(c, more);
|
assertT(c, more);
|
||||||
|
|
||||||
switch (*p) {
|
switch (*p) {
|
||||||
case 0: abort(c);
|
case 0: abort(c);
|
||||||
@ -1288,7 +1288,7 @@ bitsetNext(Context* c, uintptr_t* p)
|
|||||||
case BitsetExtensionBit: {
|
case BitsetExtensionBit: {
|
||||||
uintptr_t i = p[1];
|
uintptr_t i = p[1];
|
||||||
uintptr_t word = wordOf(i);
|
uintptr_t word = wordOf(i);
|
||||||
assert(c, word < p[2]);
|
assertT(c, word < p[2]);
|
||||||
for (uintptr_t bit = bitOf(i); bit < BitsPerWord; ++bit) {
|
for (uintptr_t bit = bitOf(i); bit < BitsPerWord; ++bit) {
|
||||||
if (p[word + 3] & (static_cast<uintptr_t>(1) << bit)) {
|
if (p[word + 3] & (static_cast<uintptr_t>(1) << bit)) {
|
||||||
p[1] = indexOf(word, bit) + 1;
|
p[1] = indexOf(word, bit) + 1;
|
||||||
@ -1477,7 +1477,7 @@ collect(Context* c, void** p, void* target, unsigned offset)
|
|||||||
|
|
||||||
c->client->walk(copy, &walker);
|
c->client->walk(copy, &walker);
|
||||||
|
|
||||||
assert(c, walker.total > 1);
|
assertT(c, walker.total > 1);
|
||||||
|
|
||||||
if (walker.total == 3 and bitsetHasMore(bitset(c, original))) {
|
if (walker.total == 3 and bitsetHasMore(bitset(c, original))) {
|
||||||
parent_ = original;
|
parent_ = original;
|
||||||
@ -1568,7 +1568,7 @@ visitDirtyFixies(Context* c, Fixie** p)
|
|||||||
fprintf(stderr, "done cleaning fixie %p\n", f);
|
fprintf(stderr, "done cleaning fixie %p\n", f);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, wasDirty);
|
assertT(c, wasDirty);
|
||||||
|
|
||||||
if (clean) {
|
if (clean) {
|
||||||
markClean(c, f);
|
markClean(c, f);
|
||||||
@ -1618,7 +1618,7 @@ collect(Context* c, Segment::Map* map, unsigned start, unsigned end,
|
|||||||
for (Segment::Map::Iterator it(map, start, end); it.hasMore();) {
|
for (Segment::Map::Iterator it(map, start, end); it.hasMore();) {
|
||||||
wasDirty = true;
|
wasDirty = true;
|
||||||
if (map->child) {
|
if (map->child) {
|
||||||
assert(c, map->scale > 1);
|
assertT(c, map->scale > 1);
|
||||||
unsigned s = it.next();
|
unsigned s = it.next();
|
||||||
unsigned e = s + map->scale;
|
unsigned e = s + map->scale;
|
||||||
|
|
||||||
@ -1630,7 +1630,7 @@ collect(Context* c, Segment::Map* map, unsigned start, unsigned end,
|
|||||||
*dirty = true;
|
*dirty = true;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(c, map->scale == 1);
|
assertT(c, map->scale == 1);
|
||||||
void** p = reinterpret_cast<void**>(map->segment->get(it.next()));
|
void** p = reinterpret_cast<void**>(map->segment->get(it.next()));
|
||||||
|
|
||||||
map->clearOnly(p);
|
map->clearOnly(p);
|
||||||
@ -1648,7 +1648,7 @@ collect(Context* c, Segment::Map* map, unsigned start, unsigned end,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(c, wasDirty or not expectDirty);
|
assertT(c, wasDirty or not expectDirty);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -1879,7 +1879,7 @@ class MyHeap: public Heap {
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
virtual void setClient(Heap::Client* client) {
|
virtual void setClient(Heap::Client* client) {
|
||||||
assert(&c, c.client == 0);
|
assertT(&c, c.client == 0);
|
||||||
c.client = client;
|
c.client = client;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1954,7 +1954,7 @@ class MyHeap: public Heap {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool needsMark(void* p) {
|
bool needsMark(void* p) {
|
||||||
assert(&c, c.client->isFixed(p) or (not immortalHeapContains(&c, p)));
|
assertT(&c, c.client->isFixed(p) or (not immortalHeapContains(&c, p)));
|
||||||
|
|
||||||
if (c.client->isFixed(p)) {
|
if (c.client->isFixed(p)) {
|
||||||
return fixie(p)->age >= FixieTenureThreshold;
|
return fixie(p)->age >= FixieTenureThreshold;
|
||||||
@ -1980,7 +1980,7 @@ class MyHeap: public Heap {
|
|||||||
|
|
||||||
if (c.client->isFixed(p)) {
|
if (c.client->isFixed(p)) {
|
||||||
Fixie* f = fixie(p);
|
Fixie* f = fixie(p);
|
||||||
assert(&c, offset == 0 or f->hasMask());
|
assertT(&c, offset == 0 or f->hasMask());
|
||||||
|
|
||||||
bool dirty = false;
|
bool dirty = false;
|
||||||
for (unsigned i = 0; i < count; ++i) {
|
for (unsigned i = 0; i < count; ++i) {
|
||||||
@ -1997,7 +1997,7 @@ class MyHeap: public Heap {
|
|||||||
#else
|
#else
|
||||||
markBit(f->mask(), offset + i);
|
markBit(f->mask(), offset + i);
|
||||||
#endif
|
#endif
|
||||||
assert(&c, getBit(f->mask(), offset + i));
|
assertT(&c, getBit(f->mask(), offset + i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2007,7 +2007,7 @@ class MyHeap: public Heap {
|
|||||||
if (c.gen2.contains(p)) {
|
if (c.gen2.contains(p)) {
|
||||||
map = &(c.heapMap);
|
map = &(c.heapMap);
|
||||||
} else {
|
} else {
|
||||||
assert(&c, c.nextGen2.contains(p));
|
assertT(&c, c.nextGen2.contains(p));
|
||||||
map = &(c.nextHeapMap);
|
map = &(c.nextHeapMap);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2097,7 +2097,7 @@ class MyHeap: public Heap {
|
|||||||
|
|
||||||
virtual void dispose() {
|
virtual void dispose() {
|
||||||
c.dispose();
|
c.dispose();
|
||||||
assert(&c, c.count == 0);
|
assertT(&c, c.count == 0);
|
||||||
c.system->free(this);
|
c.system->free(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,7 +176,7 @@ Set::dispose()
|
|||||||
Set::Entry*
|
Set::Entry*
|
||||||
add(Context* c UNUSED, Set* set, object p, uint32_t number)
|
add(Context* c UNUSED, Set* set, object p, uint32_t number)
|
||||||
{
|
{
|
||||||
assert(c->thread, set->size < set->capacity);
|
assertT(c->thread, set->size < set->capacity);
|
||||||
|
|
||||||
unsigned index = hash(p, set->capacity);
|
unsigned index = hash(p, set->capacity);
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ pushObject(Thread* t, object o)
|
|||||||
fprintf(stderr, "push object %p at %d\n", o, t->sp);
|
fprintf(stderr, "push object %p at %d\n", o, t->sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, t->sp + 1 < stackSizeInWords(t) / 2);
|
assertT(t, t->sp + 1 < stackSizeInWords(t) / 2);
|
||||||
t->stack[(t->sp * 2) ] = ObjectTag;
|
t->stack[(t->sp * 2) ] = ObjectTag;
|
||||||
t->stack[(t->sp * 2) + 1] = reinterpret_cast<uintptr_t>(o);
|
t->stack[(t->sp * 2) + 1] = reinterpret_cast<uintptr_t>(o);
|
||||||
++ t->sp;
|
++ t->sp;
|
||||||
@ -72,7 +72,7 @@ pushInt(Thread* t, uint32_t v)
|
|||||||
fprintf(stderr, "push int %d at %d\n", v, t->sp);
|
fprintf(stderr, "push int %d at %d\n", v, t->sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, t->sp + 1 < stackSizeInWords(t) / 2);
|
assertT(t, t->sp + 1 < stackSizeInWords(t) / 2);
|
||||||
t->stack[(t->sp * 2) ] = IntTag;
|
t->stack[(t->sp * 2) ] = IntTag;
|
||||||
t->stack[(t->sp * 2) + 1] = v;
|
t->stack[(t->sp * 2) + 1] = v;
|
||||||
++ t->sp;
|
++ t->sp;
|
||||||
@ -111,7 +111,7 @@ popObject(Thread* t)
|
|||||||
t->sp - 1);
|
t->sp - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, t->stack[(t->sp - 1) * 2] == ObjectTag);
|
assertT(t, t->stack[(t->sp - 1) * 2] == ObjectTag);
|
||||||
return reinterpret_cast<object>(t->stack[((-- t->sp) * 2) + 1]);
|
return reinterpret_cast<object>(t->stack[((-- t->sp) * 2) + 1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,7 +124,7 @@ popInt(Thread* t)
|
|||||||
t->sp - 1);
|
t->sp - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, t->stack[(t->sp - 1) * 2] == IntTag);
|
assertT(t, t->stack[(t->sp - 1) * 2] == IntTag);
|
||||||
return t->stack[((-- t->sp) * 2) + 1];
|
return t->stack[((-- t->sp) * 2) + 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,8 +165,8 @@ peekObject(Thread* t, unsigned index)
|
|||||||
index);
|
index);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, index < stackSizeInWords(t) / 2);
|
assertT(t, index < stackSizeInWords(t) / 2);
|
||||||
assert(t, t->stack[index * 2] == ObjectTag);
|
assertT(t, t->stack[index * 2] == ObjectTag);
|
||||||
return reinterpret_cast<object>(t->stack[(index * 2) + 1]);
|
return reinterpret_cast<object>(t->stack[(index * 2) + 1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,8 +179,8 @@ peekInt(Thread* t, unsigned index)
|
|||||||
index);
|
index);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, index < stackSizeInWords(t) / 2);
|
assertT(t, index < stackSizeInWords(t) / 2);
|
||||||
assert(t, t->stack[index * 2] == IntTag);
|
assertT(t, t->stack[index * 2] == IntTag);
|
||||||
return t->stack[(index * 2) + 1];
|
return t->stack[(index * 2) + 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1465,7 +1465,7 @@ interpret3(Thread* t, const int base)
|
|||||||
|
|
||||||
object field = resolveField(t, frameMethod(t, frame), index - 1);
|
object field = resolveField(t, frameMethod(t, frame), index - 1);
|
||||||
|
|
||||||
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
assertT(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
||||||
|
|
||||||
PROTECT(t, field);
|
PROTECT(t, field);
|
||||||
|
|
||||||
@ -1483,7 +1483,7 @@ interpret3(Thread* t, const int base)
|
|||||||
|
|
||||||
object field = resolveField(t, frameMethod(t, frame), index - 1);
|
object field = resolveField(t, frameMethod(t, frame), index - 1);
|
||||||
|
|
||||||
assert(t, fieldFlags(t, field) & ACC_STATIC);
|
assertT(t, fieldFlags(t, field) & ACC_STATIC);
|
||||||
|
|
||||||
PROTECT(t, field);
|
PROTECT(t, field);
|
||||||
|
|
||||||
@ -2436,7 +2436,7 @@ interpret3(Thread* t, const int base)
|
|||||||
|
|
||||||
object field = resolveField(t, frameMethod(t, frame), index - 1);
|
object field = resolveField(t, frameMethod(t, frame), index - 1);
|
||||||
|
|
||||||
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
assertT(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
||||||
PROTECT(t, field);
|
PROTECT(t, field);
|
||||||
|
|
||||||
{ ACQUIRE_FIELD_FOR_WRITE(t, field);
|
{ ACQUIRE_FIELD_FOR_WRITE(t, field);
|
||||||
@ -2507,7 +2507,7 @@ interpret3(Thread* t, const int base)
|
|||||||
|
|
||||||
object field = resolveField(t, frameMethod(t, frame), index - 1);
|
object field = resolveField(t, frameMethod(t, frame), index - 1);
|
||||||
|
|
||||||
assert(t, fieldFlags(t, field) & ACC_STATIC);
|
assertT(t, fieldFlags(t, field) & ACC_STATIC);
|
||||||
|
|
||||||
PROTECT(t, field);
|
PROTECT(t, field);
|
||||||
|
|
||||||
@ -2657,10 +2657,10 @@ interpret3(Thread* t, const int base)
|
|||||||
// bootstrap class, so we need to load the real class to get the
|
// bootstrap class, so we need to load the real class to get the
|
||||||
// real method and call it.
|
// real method and call it.
|
||||||
|
|
||||||
assert(t, frameNext(t, frame) >= base);
|
assertT(t, frameNext(t, frame) >= base);
|
||||||
popFrame(t);
|
popFrame(t);
|
||||||
|
|
||||||
assert(t, codeBody(t, code, ip - 3) == invokevirtual);
|
assertT(t, codeBody(t, code, ip - 3) == invokevirtual);
|
||||||
ip -= 2;
|
ip -= 2;
|
||||||
|
|
||||||
uint16_t index = codeReadInt16(t, code, ip);
|
uint16_t index = codeReadInt16(t, code, ip);
|
||||||
@ -2668,7 +2668,7 @@ interpret3(Thread* t, const int base)
|
|||||||
|
|
||||||
unsigned parameterFootprint = method->parameterFootprint();
|
unsigned parameterFootprint = method->parameterFootprint();
|
||||||
GcClass* class_ = objectClass(t, peekObject(t, sp - parameterFootprint));
|
GcClass* class_ = objectClass(t, peekObject(t, sp - parameterFootprint));
|
||||||
assert(t, class_->vmFlags() & BootstrapFlag);
|
assertT(t, class_->vmFlags() & BootstrapFlag);
|
||||||
|
|
||||||
resolveClass(t, classLoader(t, frameMethod(t, frame)->class_()),
|
resolveClass(t, classLoader(t, frameMethod(t, frame)->class_()),
|
||||||
class_->name());
|
class_->name());
|
||||||
@ -3121,10 +3121,10 @@ class MyProcessor: public Processor {
|
|||||||
{
|
{
|
||||||
Thread* t = static_cast<Thread*>(vmt);
|
Thread* t = static_cast<Thread*>(vmt);
|
||||||
|
|
||||||
assert(t, t->state == Thread::ActiveState
|
assertT(t, t->state == Thread::ActiveState
|
||||||
or t->state == Thread::ExclusiveState);
|
or t->state == Thread::ExclusiveState);
|
||||||
|
|
||||||
assert(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
||||||
|
|
||||||
if (UNLIKELY(t->sp + method->parameterFootprint() + 1
|
if (UNLIKELY(t->sp + method->parameterFootprint() + 1
|
||||||
> stackSizeInWords(t) / 2))
|
> stackSizeInWords(t) / 2))
|
||||||
@ -3145,10 +3145,10 @@ class MyProcessor: public Processor {
|
|||||||
{
|
{
|
||||||
Thread* t = static_cast<Thread*>(vmt);
|
Thread* t = static_cast<Thread*>(vmt);
|
||||||
|
|
||||||
assert(t, t->state == Thread::ActiveState
|
assertT(t, t->state == Thread::ActiveState
|
||||||
or t->state == Thread::ExclusiveState);
|
or t->state == Thread::ExclusiveState);
|
||||||
|
|
||||||
assert(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
||||||
|
|
||||||
if (UNLIKELY(t->sp + method->parameterFootprint() + 1
|
if (UNLIKELY(t->sp + method->parameterFootprint() + 1
|
||||||
> stackSizeInWords(t) / 2))
|
> stackSizeInWords(t) / 2))
|
||||||
@ -3169,10 +3169,10 @@ class MyProcessor: public Processor {
|
|||||||
{
|
{
|
||||||
Thread* t = static_cast<Thread*>(vmt);
|
Thread* t = static_cast<Thread*>(vmt);
|
||||||
|
|
||||||
assert(t, t->state == Thread::ActiveState
|
assertT(t, t->state == Thread::ActiveState
|
||||||
or t->state == Thread::ExclusiveState);
|
or t->state == Thread::ExclusiveState);
|
||||||
|
|
||||||
assert(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
||||||
|
|
||||||
if (UNLIKELY(t->sp + method->parameterFootprint() + 1
|
if (UNLIKELY(t->sp + method->parameterFootprint() + 1
|
||||||
> stackSizeInWords(t) / 2))
|
> stackSizeInWords(t) / 2))
|
||||||
@ -3194,7 +3194,7 @@ class MyProcessor: public Processor {
|
|||||||
{
|
{
|
||||||
Thread* t = static_cast<Thread*>(vmt);
|
Thread* t = static_cast<Thread*>(vmt);
|
||||||
|
|
||||||
assert(t, t->state == Thread::ActiveState
|
assertT(t, t->state == Thread::ActiveState
|
||||||
or t->state == Thread::ExclusiveState);
|
or t->state == Thread::ExclusiveState);
|
||||||
|
|
||||||
if (UNLIKELY(t->sp + parameterFootprint(vmt, methodSpec, false)
|
if (UNLIKELY(t->sp + parameterFootprint(vmt, methodSpec, false)
|
||||||
@ -3208,7 +3208,7 @@ class MyProcessor: public Processor {
|
|||||||
GcMethod* method = resolveMethod
|
GcMethod* method = resolveMethod
|
||||||
(t, loader, className, methodName, methodSpec);
|
(t, loader, className, methodName, methodSpec);
|
||||||
|
|
||||||
assert(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
assertT(t, ((method->flags() & ACC_STATIC) == 0) xor (this_ == 0));
|
||||||
|
|
||||||
return local::invoke(t, method);
|
return local::invoke(t, method);
|
||||||
}
|
}
|
||||||
|
@ -564,7 +564,7 @@ getMethodID(Thread* t, uintptr_t* arguments)
|
|||||||
|
|
||||||
GcMethod* method = findMethod(t, c, name, spec);
|
GcMethod* method = findMethod(t, c, name, spec);
|
||||||
|
|
||||||
assert(t, (method->flags() & ACC_STATIC) == 0);
|
assertT(t, (method->flags() & ACC_STATIC) == 0);
|
||||||
|
|
||||||
return methodID(t, method);
|
return methodID(t, method);
|
||||||
}
|
}
|
||||||
@ -588,7 +588,7 @@ getStaticMethodID(Thread* t, uintptr_t* arguments)
|
|||||||
|
|
||||||
GcMethod* method = findMethod(t, c, name, spec);
|
GcMethod* method = findMethod(t, c, name, spec);
|
||||||
|
|
||||||
assert(t, method->flags() & ACC_STATIC);
|
assertT(t, method->flags() & ACC_STATIC);
|
||||||
|
|
||||||
return methodID(t, method);
|
return methodID(t, method);
|
||||||
}
|
}
|
||||||
@ -606,11 +606,11 @@ GetStaticMethodID(Thread* t, jclass c, const char* name, const char* spec)
|
|||||||
GcMethod*
|
GcMethod*
|
||||||
getMethod(Thread* t, jmethodID m)
|
getMethod(Thread* t, jmethodID m)
|
||||||
{
|
{
|
||||||
assert(t, m);
|
assertT(t, m);
|
||||||
|
|
||||||
GcMethod* method = cast<GcMethod>(t, vectorBody(t, root(t, Machine::JNIMethodTable), m - 1));
|
GcMethod* method = cast<GcMethod>(t, vectorBody(t, root(t, Machine::JNIMethodTable), m - 1));
|
||||||
|
|
||||||
assert(t, (method->flags() & ACC_STATIC) == 0);
|
assertT(t, (method->flags() & ACC_STATIC) == 0);
|
||||||
|
|
||||||
return method;
|
return method;
|
||||||
}
|
}
|
||||||
@ -1101,11 +1101,11 @@ CallVoidMethodA(Thread* t, jobject o, jmethodID m, const jvalue* a)
|
|||||||
GcMethod*
|
GcMethod*
|
||||||
getStaticMethod(Thread* t, jmethodID m)
|
getStaticMethod(Thread* t, jmethodID m)
|
||||||
{
|
{
|
||||||
assert(t, m);
|
assertT(t, m);
|
||||||
|
|
||||||
GcMethod* method = cast<GcMethod>(t, vectorBody(t, root(t, Machine::JNIMethodTable), m - 1));
|
GcMethod* method = cast<GcMethod>(t, vectorBody(t, root(t, Machine::JNIMethodTable), m - 1));
|
||||||
|
|
||||||
assert(t, method->flags() & ACC_STATIC);
|
assertT(t, method->flags() & ACC_STATIC);
|
||||||
|
|
||||||
return method;
|
return method;
|
||||||
}
|
}
|
||||||
@ -1540,11 +1540,11 @@ GetStaticFieldID(Thread* t, jclass c, const char* name, const char* spec)
|
|||||||
object
|
object
|
||||||
getField(Thread* t, jfieldID f)
|
getField(Thread* t, jfieldID f)
|
||||||
{
|
{
|
||||||
assert(t, f);
|
assertT(t, f);
|
||||||
|
|
||||||
object field = vectorBody(t, root(t, Machine::JNIFieldTable), f - 1);
|
object field = vectorBody(t, root(t, Machine::JNIFieldTable), f - 1);
|
||||||
|
|
||||||
assert(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
assertT(t, (fieldFlags(t, field) & ACC_STATIC) == 0);
|
||||||
|
|
||||||
return field;
|
return field;
|
||||||
}
|
}
|
||||||
@ -1969,11 +1969,11 @@ SetDoubleField(Thread* t, jobject o, jfieldID field, jdouble v)
|
|||||||
object
|
object
|
||||||
getStaticField(Thread* t, jfieldID f)
|
getStaticField(Thread* t, jfieldID f)
|
||||||
{
|
{
|
||||||
assert(t, f);
|
assertT(t, f);
|
||||||
|
|
||||||
object field = vectorBody(t, root(t, Machine::JNIFieldTable), f - 1);
|
object field = vectorBody(t, root(t, Machine::JNIFieldTable), f - 1);
|
||||||
|
|
||||||
assert(t, fieldFlags(t, field) & ACC_STATIC);
|
assertT(t, fieldFlags(t, field) & ACC_STATIC);
|
||||||
|
|
||||||
return field;
|
return field;
|
||||||
}
|
}
|
||||||
|
@ -49,8 +49,8 @@ void
|
|||||||
join(Thread* t, Thread* o)
|
join(Thread* t, Thread* o)
|
||||||
{
|
{
|
||||||
if (t != o) {
|
if (t != o) {
|
||||||
assert(t, o->state != Thread::JoinedState);
|
assertT(t, o->state != Thread::JoinedState);
|
||||||
assert(t, (o->flags & Thread::SystemFlag) == 0);
|
assertT(t, (o->flags & Thread::SystemFlag) == 0);
|
||||||
if (o->flags & Thread::JoinFlag) {
|
if (o->flags & Thread::JoinFlag) {
|
||||||
o->systemThread->join();
|
o->systemThread->join();
|
||||||
}
|
}
|
||||||
@ -508,7 +508,7 @@ postVisit(Thread* t, Heap::Visitor* v)
|
|||||||
Machine* m = t->m;
|
Machine* m = t->m;
|
||||||
bool major = m->heap->collectionType() == Heap::MajorCollection;
|
bool major = m->heap->collectionType() == Heap::MajorCollection;
|
||||||
|
|
||||||
assert(t, m->finalizeQueue == 0);
|
assertT(t, m->finalizeQueue == 0);
|
||||||
|
|
||||||
m->heap->postVisit();
|
m->heap->postVisit();
|
||||||
|
|
||||||
@ -796,7 +796,7 @@ parseUtf8NonAscii(Thread* t, AbstractStream& s, object bytesSoFar,
|
|||||||
if (a & 0x20) {
|
if (a & 0x20) {
|
||||||
// 3 bytes
|
// 3 bytes
|
||||||
si += 2;
|
si += 2;
|
||||||
assert(t, si < length);
|
assertT(t, si < length);
|
||||||
unsigned b = readByte(s, &byteB);
|
unsigned b = readByte(s, &byteB);
|
||||||
unsigned c = s.read1();
|
unsigned c = s.read1();
|
||||||
charArrayBody(t, value, vi++)
|
charArrayBody(t, value, vi++)
|
||||||
@ -804,7 +804,7 @@ parseUtf8NonAscii(Thread* t, AbstractStream& s, object bytesSoFar,
|
|||||||
} else {
|
} else {
|
||||||
// 2 bytes
|
// 2 bytes
|
||||||
++ si;
|
++ si;
|
||||||
assert(t, si < length);
|
assertT(t, si < length);
|
||||||
unsigned b = readByte(s, &byteB);
|
unsigned b = readByte(s, &byteB);
|
||||||
|
|
||||||
if (a == 0xC0 and b == 0x80) {
|
if (a == 0xC0 and b == 0x80) {
|
||||||
@ -846,7 +846,7 @@ parseUtf8(Thread* t, AbstractStream& s, unsigned length)
|
|||||||
|
|
||||||
if (a == 0xC0 and b == 0x80) {
|
if (a == 0xC0 and b == 0x80) {
|
||||||
++ si;
|
++ si;
|
||||||
assert(t, si < length);
|
assertT(t, si < length);
|
||||||
byteArrayBody(t, value, vi++) = 0;
|
byteArrayBody(t, value, vi++) = 0;
|
||||||
} else {
|
} else {
|
||||||
return parseUtf8NonAscii(t, s, value, vi, si, a, b);
|
return parseUtf8NonAscii(t, s, value, vi, si, a, b);
|
||||||
@ -2281,7 +2281,7 @@ parseMethodTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
|
|||||||
|
|
||||||
for (HashMapIterator it(t, virtualMap); it.hasMore();) {
|
for (HashMapIterator it(t, virtualMap); it.hasMore();) {
|
||||||
object method = tripleFirst(t, it.next());
|
object method = tripleFirst(t, it.next());
|
||||||
assert(t, arrayBody(t, vtable, methodOffset(t, method)) == 0);
|
assertT(t, arrayBody(t, vtable, methodOffset(t, method)) == 0);
|
||||||
set(t, vtable, ArrayBody + (methodOffset(t, method) * BytesPerWord),
|
set(t, vtable, ArrayBody + (methodOffset(t, method) * BytesPerWord),
|
||||||
method);
|
method);
|
||||||
++ i;
|
++ i;
|
||||||
@ -2340,12 +2340,12 @@ parseMethodTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, arrayLength(t, newMethodTable) == mti);
|
assertT(t, arrayLength(t, newMethodTable) == mti);
|
||||||
|
|
||||||
set(t, reinterpret_cast<object>(class_), ClassMethodTable, newMethodTable);
|
set(t, reinterpret_cast<object>(class_), ClassMethodTable, newMethodTable);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, arrayLength(t, vtable) == i);
|
assertT(t, arrayLength(t, vtable) == i);
|
||||||
|
|
||||||
set(t, reinterpret_cast<object>(class_), ClassVirtualTable, vtable);
|
set(t, reinterpret_cast<object>(class_), ClassVirtualTable, vtable);
|
||||||
}
|
}
|
||||||
@ -2365,7 +2365,7 @@ parseMethodTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
|
|||||||
object method = arrayBody(t, ivtable, j);
|
object method = arrayBody(t, ivtable, j);
|
||||||
method = hashMapFind
|
method = hashMapFind
|
||||||
(t, virtualMap, method, methodHash, methodEqual);
|
(t, virtualMap, method, methodHash, methodEqual);
|
||||||
assert(t, method);
|
assertT(t, method);
|
||||||
|
|
||||||
set(t, vtable, ArrayBody + (j * BytesPerWord), method);
|
set(t, vtable, ArrayBody + (j * BytesPerWord), method);
|
||||||
}
|
}
|
||||||
@ -2980,7 +2980,7 @@ class HeapClient: public Heap::Client {
|
|||||||
Thread* t = m->rootThread;
|
Thread* t = m->rootThread;
|
||||||
|
|
||||||
object o = static_cast<object>(m->heap->follow(maskAlignedPointer(p)));
|
object o = static_cast<object>(m->heap->follow(maskAlignedPointer(p)));
|
||||||
assert(t, not objectFixed(t, o));
|
assertT(t, not objectFixed(t, o));
|
||||||
|
|
||||||
unsigned n = baseSize(t, o, cast<GcClass>(t, static_cast<object>
|
unsigned n = baseSize(t, o, cast<GcClass>(t, static_cast<object>
|
||||||
(m->heap->follow(objectClass(t, o)))));
|
(m->heap->follow(objectClass(t, o)))));
|
||||||
@ -2996,7 +2996,7 @@ class HeapClient: public Heap::Client {
|
|||||||
Thread* t = m->rootThread;
|
Thread* t = m->rootThread;
|
||||||
|
|
||||||
object src = static_cast<object>(m->heap->follow(maskAlignedPointer(srcp)));
|
object src = static_cast<object>(m->heap->follow(maskAlignedPointer(srcp)));
|
||||||
assert(t, not objectFixed(t, src));
|
assertT(t, not objectFixed(t, src));
|
||||||
|
|
||||||
GcClass* class_ = cast<GcClass>(t, static_cast<object>
|
GcClass* class_ = cast<GcClass>(t, static_cast<object>
|
||||||
(m->heap->follow(objectClass(t, src))));
|
(m->heap->follow(objectClass(t, src))));
|
||||||
@ -3379,8 +3379,8 @@ Thread::init()
|
|||||||
memset(backupHeap, 0, ThreadBackupHeapSizeInBytes);
|
memset(backupHeap, 0, ThreadBackupHeapSizeInBytes);
|
||||||
|
|
||||||
if (parent == 0) {
|
if (parent == 0) {
|
||||||
assert(this, m->rootThread == 0);
|
assertT(this, m->rootThread == 0);
|
||||||
assert(this, javaThread == 0);
|
assertT(this, javaThread == 0);
|
||||||
|
|
||||||
m->rootThread = this;
|
m->rootThread = this;
|
||||||
m->unsafe = true;
|
m->unsafe = true;
|
||||||
@ -3612,7 +3612,7 @@ enter(Thread* t, Thread::State s)
|
|||||||
case Thread::IdleState:
|
case Thread::IdleState:
|
||||||
if (LIKELY(t->state == Thread::ActiveState)) {
|
if (LIKELY(t->state == Thread::ActiveState)) {
|
||||||
// fast path
|
// fast path
|
||||||
assert(t, t->m->activeCount > 0);
|
assertT(t, t->m->activeCount > 0);
|
||||||
INCREMENT(&(t->m->activeCount), -1);
|
INCREMENT(&(t->m->activeCount), -1);
|
||||||
|
|
||||||
t->state = s;
|
t->state = s;
|
||||||
@ -3633,7 +3633,7 @@ enter(Thread* t, Thread::State s)
|
|||||||
|
|
||||||
switch (t->state) {
|
switch (t->state) {
|
||||||
case Thread::ExclusiveState: {
|
case Thread::ExclusiveState: {
|
||||||
assert(t, t->m->exclusive == t);
|
assertT(t, t->m->exclusive == t);
|
||||||
t->m->exclusive = 0;
|
t->m->exclusive = 0;
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
@ -3642,11 +3642,11 @@ enter(Thread* t, Thread::State s)
|
|||||||
default: abort(t);
|
default: abort(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, t->m->activeCount > 0);
|
assertT(t, t->m->activeCount > 0);
|
||||||
INCREMENT(&(t->m->activeCount), -1);
|
INCREMENT(&(t->m->activeCount), -1);
|
||||||
|
|
||||||
if (s == Thread::ZombieState) {
|
if (s == Thread::ZombieState) {
|
||||||
assert(t, t->m->liveCount > 0);
|
assertT(t, t->m->liveCount > 0);
|
||||||
-- t->m->liveCount;
|
-- t->m->liveCount;
|
||||||
|
|
||||||
if (t->flags & Thread::DaemonFlag) {
|
if (t->flags & Thread::DaemonFlag) {
|
||||||
@ -3679,7 +3679,7 @@ enter(Thread* t, Thread::State s)
|
|||||||
|
|
||||||
switch (t->state) {
|
switch (t->state) {
|
||||||
case Thread::ExclusiveState: {
|
case Thread::ExclusiveState: {
|
||||||
assert(t, t->m->exclusive == t);
|
assertT(t, t->m->exclusive == t);
|
||||||
|
|
||||||
t->state = s;
|
t->state = s;
|
||||||
t->m->exclusive = 0;
|
t->m->exclusive = 0;
|
||||||
@ -3710,7 +3710,7 @@ enter(Thread* t, Thread::State s)
|
|||||||
|
|
||||||
switch (t->state) {
|
switch (t->state) {
|
||||||
case Thread::ExclusiveState: {
|
case Thread::ExclusiveState: {
|
||||||
assert(t, t->m->exclusive == t);
|
assertT(t, t->m->exclusive == t);
|
||||||
// exit state should also be exclusive, so don't set exclusive = 0
|
// exit state should also be exclusive, so don't set exclusive = 0
|
||||||
|
|
||||||
t->m->stateLock->notifyAll(t->systemThread);
|
t->m->stateLock->notifyAll(t->systemThread);
|
||||||
@ -3721,7 +3721,7 @@ enter(Thread* t, Thread::State s)
|
|||||||
default: abort(t);
|
default: abort(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(t, t->m->activeCount > 0);
|
assertT(t, t->m->activeCount > 0);
|
||||||
INCREMENT(&(t->m->activeCount), -1);
|
INCREMENT(&(t->m->activeCount), -1);
|
||||||
|
|
||||||
t->state = s;
|
t->state = s;
|
||||||
@ -3888,7 +3888,7 @@ collect(Thread* t, Heap::CollectionType type, int pendingAllocation)
|
|||||||
object
|
object
|
||||||
makeNewGeneral(Thread* t, GcClass* class_)
|
makeNewGeneral(Thread* t, GcClass* class_)
|
||||||
{
|
{
|
||||||
assert(t, t->state == Thread::ActiveState);
|
assertT(t, t->state == Thread::ActiveState);
|
||||||
|
|
||||||
PROTECT(t, class_);
|
PROTECT(t, class_);
|
||||||
|
|
||||||
@ -4039,7 +4039,7 @@ void
|
|||||||
stringUTFChars(Thread* t, object string, unsigned start, unsigned length,
|
stringUTFChars(Thread* t, object string, unsigned start, unsigned length,
|
||||||
char* chars, unsigned charsLength UNUSED)
|
char* chars, unsigned charsLength UNUSED)
|
||||||
{
|
{
|
||||||
assert(t, static_cast<unsigned>
|
assertT(t, static_cast<unsigned>
|
||||||
(stringUTFLength(t, string, start, length)) == charsLength);
|
(stringUTFLength(t, string, start, length)) == charsLength);
|
||||||
|
|
||||||
object data = stringData(t, string);
|
object data = stringData(t, string);
|
||||||
@ -4083,8 +4083,8 @@ resolveBootstrap(Thread* t, uintptr_t* arguments)
|
|||||||
bool
|
bool
|
||||||
isAssignableFrom(Thread* t, GcClass* a, GcClass* b)
|
isAssignableFrom(Thread* t, GcClass* a, GcClass* b)
|
||||||
{
|
{
|
||||||
assert(t, a);
|
assertT(t, a);
|
||||||
assert(t, b);
|
assertT(t, b);
|
||||||
|
|
||||||
if (a == b) return true;
|
if (a == b) return true;
|
||||||
|
|
||||||
@ -4872,7 +4872,7 @@ addFinalizer(Thread* t, object target, void (*finalize)(Thread*, object))
|
|||||||
object
|
object
|
||||||
objectMonitor(Thread* t, object o, bool createNew)
|
objectMonitor(Thread* t, object o, bool createNew)
|
||||||
{
|
{
|
||||||
assert(t, t->state == Thread::ActiveState);
|
assertT(t, t->state == Thread::ActiveState);
|
||||||
|
|
||||||
object m = hashMapFind
|
object m = hashMapFind
|
||||||
(t, cast<GcHashMap>(t, root(t, Machine::MonitorMap)), o, objectHash, objectEqual);
|
(t, cast<GcHashMap>(t, root(t, Machine::MonitorMap)), o, objectHash, objectEqual);
|
||||||
@ -5106,11 +5106,11 @@ makeTrace(Thread* t, Processor::StackWalker* walker)
|
|||||||
virtual bool visit(Processor::StackWalker* walker) {
|
virtual bool visit(Processor::StackWalker* walker) {
|
||||||
if (trace == 0) {
|
if (trace == 0) {
|
||||||
trace = makeObjectArray(t, walker->count());
|
trace = makeObjectArray(t, walker->count());
|
||||||
assert(t, trace);
|
assertT(t, trace);
|
||||||
}
|
}
|
||||||
|
|
||||||
object e = reinterpret_cast<object>(makeTraceElement(t, reinterpret_cast<object>(walker->method()), walker->ip()));
|
object e = reinterpret_cast<object>(makeTraceElement(t, reinterpret_cast<object>(walker->method()), walker->ip()));
|
||||||
assert(t, index < objectArrayLength(t, trace));
|
assertT(t, index < objectArrayLength(t, trace));
|
||||||
set(t, trace, ArrayBody + (index * BytesPerWord), e);
|
set(t, trace, ArrayBody + (index * BytesPerWord), e);
|
||||||
++ index;
|
++ index;
|
||||||
return true;
|
return true;
|
||||||
|
@ -230,7 +230,7 @@ resolveNative(Thread* t, GcMethod* method)
|
|||||||
{
|
{
|
||||||
PROTECT(t, method);
|
PROTECT(t, method);
|
||||||
|
|
||||||
assert(t, method->flags() & ACC_NATIVE);
|
assertT(t, method->flags() & ACC_NATIVE);
|
||||||
|
|
||||||
initClass(t, cast<GcClass>(t, method->class_()));
|
initClass(t, cast<GcClass>(t, method->class_()));
|
||||||
|
|
||||||
|
@ -686,7 +686,7 @@ class MySystem: public System {
|
|||||||
virtual Status visit(System::Thread* st UNUSED, System::Thread* sTarget,
|
virtual Status visit(System::Thread* st UNUSED, System::Thread* sTarget,
|
||||||
ThreadVisitor* visitor)
|
ThreadVisitor* visitor)
|
||||||
{
|
{
|
||||||
assert(this, st != sTarget);
|
assertT(this, st != sTarget);
|
||||||
|
|
||||||
Thread* target = static_cast<Thread*>(sTarget);
|
Thread* target = static_cast<Thread*>(sTarget);
|
||||||
|
|
||||||
|
@ -103,12 +103,12 @@ class MutexResource {
|
|||||||
public:
|
public:
|
||||||
MutexResource(System* s, HANDLE m): s(s), m(m) {
|
MutexResource(System* s, HANDLE m): s(s), m(m) {
|
||||||
int r UNUSED = WaitForSingleObject(m, INFINITE);
|
int r UNUSED = WaitForSingleObject(m, INFINITE);
|
||||||
assert(s, r == WAIT_OBJECT_0);
|
assertT(s, r == WAIT_OBJECT_0);
|
||||||
}
|
}
|
||||||
|
|
||||||
~MutexResource() {
|
~MutexResource() {
|
||||||
bool success UNUSED = ReleaseMutex(m);
|
bool success UNUSED = ReleaseMutex(m);
|
||||||
assert(s, success);
|
assertT(s, success);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -142,10 +142,10 @@ class MySystem: public System {
|
|||||||
flags(0)
|
flags(0)
|
||||||
{
|
{
|
||||||
mutex = CreateMutex(0, false, 0);
|
mutex = CreateMutex(0, false, 0);
|
||||||
assert(s, mutex);
|
assertT(s, mutex);
|
||||||
|
|
||||||
event = CreateEvent(0, true, false, 0);
|
event = CreateEvent(0, true, false, 0);
|
||||||
assert(s, event);
|
assertT(s, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void interrupt() {
|
virtual void interrupt() {
|
||||||
@ -155,7 +155,7 @@ class MySystem: public System {
|
|||||||
|
|
||||||
if (flags & Waiting) {
|
if (flags & Waiting) {
|
||||||
int r UNUSED = SetEvent(event);
|
int r UNUSED = SetEvent(event);
|
||||||
assert(s, r != 0);
|
assertT(s, r != 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ class MySystem: public System {
|
|||||||
|
|
||||||
virtual void join() {
|
virtual void join() {
|
||||||
int r UNUSED = WaitForSingleObject(thread, INFINITE);
|
int r UNUSED = WaitForSingleObject(thread, INFINITE);
|
||||||
assert(s, r == WAIT_OBJECT_0);
|
assertT(s, r == WAIT_OBJECT_0);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void dispose() {
|
virtual void dispose() {
|
||||||
@ -194,17 +194,17 @@ class MySystem: public System {
|
|||||||
public:
|
public:
|
||||||
Mutex(System* s): s(s) {
|
Mutex(System* s): s(s) {
|
||||||
mutex = CreateMutex(0, false, 0);
|
mutex = CreateMutex(0, false, 0);
|
||||||
assert(s, mutex);
|
assertT(s, mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void acquire() {
|
virtual void acquire() {
|
||||||
int r UNUSED = WaitForSingleObject(mutex, INFINITE);
|
int r UNUSED = WaitForSingleObject(mutex, INFINITE);
|
||||||
assert(s, r == WAIT_OBJECT_0);
|
assertT(s, r == WAIT_OBJECT_0);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void release() {
|
virtual void release() {
|
||||||
bool success UNUSED = ReleaseMutex(mutex);
|
bool success UNUSED = ReleaseMutex(mutex);
|
||||||
assert(s, success);
|
assertT(s, success);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void dispose() {
|
virtual void dispose() {
|
||||||
@ -220,12 +220,12 @@ class MySystem: public System {
|
|||||||
public:
|
public:
|
||||||
Monitor(System* s): s(s), owner_(0), first(0), last(0), depth(0) {
|
Monitor(System* s): s(s), owner_(0), first(0), last(0), depth(0) {
|
||||||
mutex = CreateMutex(0, false, 0);
|
mutex = CreateMutex(0, false, 0);
|
||||||
assert(s, mutex);
|
assertT(s, mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual bool tryAcquire(System::Thread* context) {
|
virtual bool tryAcquire(System::Thread* context) {
|
||||||
Thread* t = static_cast<Thread*>(context);
|
Thread* t = static_cast<Thread*>(context);
|
||||||
assert(s, t);
|
assertT(s, t);
|
||||||
|
|
||||||
if (owner_ == t) {
|
if (owner_ == t) {
|
||||||
++ depth;
|
++ depth;
|
||||||
@ -248,11 +248,11 @@ class MySystem: public System {
|
|||||||
|
|
||||||
virtual void acquire(System::Thread* context) {
|
virtual void acquire(System::Thread* context) {
|
||||||
Thread* t = static_cast<Thread*>(context);
|
Thread* t = static_cast<Thread*>(context);
|
||||||
assert(s, t);
|
assertT(s, t);
|
||||||
|
|
||||||
if (owner_ != t) {
|
if (owner_ != t) {
|
||||||
int r UNUSED = WaitForSingleObject(mutex, INFINITE);
|
int r UNUSED = WaitForSingleObject(mutex, INFINITE);
|
||||||
assert(s, r == WAIT_OBJECT_0);
|
assertT(s, r == WAIT_OBJECT_0);
|
||||||
owner_ = t;
|
owner_ = t;
|
||||||
}
|
}
|
||||||
++ depth;
|
++ depth;
|
||||||
@ -260,13 +260,13 @@ class MySystem: public System {
|
|||||||
|
|
||||||
virtual void release(System::Thread* context) {
|
virtual void release(System::Thread* context) {
|
||||||
Thread* t = static_cast<Thread*>(context);
|
Thread* t = static_cast<Thread*>(context);
|
||||||
assert(s, t);
|
assertT(s, t);
|
||||||
|
|
||||||
if (owner_ == t) {
|
if (owner_ == t) {
|
||||||
if (-- depth == 0) {
|
if (-- depth == 0) {
|
||||||
owner_ = 0;
|
owner_ = 0;
|
||||||
bool success UNUSED = ReleaseMutex(mutex);
|
bool success UNUSED = ReleaseMutex(mutex);
|
||||||
assert(s, success);
|
assertT(s, success);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sysAbort(s);
|
sysAbort(s);
|
||||||
@ -329,7 +329,7 @@ class MySystem: public System {
|
|||||||
|
|
||||||
bool wait(System::Thread* context, int64_t time, bool clearInterrupted) {
|
bool wait(System::Thread* context, int64_t time, bool clearInterrupted) {
|
||||||
Thread* t = static_cast<Thread*>(context);
|
Thread* t = static_cast<Thread*>(context);
|
||||||
assert(s, t);
|
assertT(s, t);
|
||||||
|
|
||||||
if (owner_ == t) {
|
if (owner_ == t) {
|
||||||
// Initialized here to make gcc 4.2 a happy compiler
|
// Initialized here to make gcc 4.2 a happy compiler
|
||||||
@ -357,20 +357,20 @@ class MySystem: public System {
|
|||||||
owner_ = 0;
|
owner_ = 0;
|
||||||
|
|
||||||
bool success UNUSED = ReleaseMutex(mutex);
|
bool success UNUSED = ReleaseMutex(mutex);
|
||||||
assert(s, success);
|
assertT(s, success);
|
||||||
|
|
||||||
if (not interrupted) {
|
if (not interrupted) {
|
||||||
success = ResetEvent(t->event);
|
success = ResetEvent(t->event);
|
||||||
assert(s, success);
|
assertT(s, success);
|
||||||
|
|
||||||
success = ReleaseMutex(t->mutex);
|
success = ReleaseMutex(t->mutex);
|
||||||
assert(s, success);
|
assertT(s, success);
|
||||||
|
|
||||||
r = WaitForSingleObject(t->event, (time ? time : INFINITE));
|
r = WaitForSingleObject(t->event, (time ? time : INFINITE));
|
||||||
assert(s, r == WAIT_OBJECT_0 or r == WAIT_TIMEOUT);
|
assertT(s, r == WAIT_OBJECT_0 or r == WAIT_TIMEOUT);
|
||||||
|
|
||||||
r = WaitForSingleObject(t->mutex, INFINITE);
|
r = WaitForSingleObject(t->mutex, INFINITE);
|
||||||
assert(s, r == WAIT_OBJECT_0);
|
assertT(s, r == WAIT_OBJECT_0);
|
||||||
|
|
||||||
interrupted = t->r->interrupted();
|
interrupted = t->r->interrupted();
|
||||||
if (interrupted and clearInterrupted) {
|
if (interrupted and clearInterrupted) {
|
||||||
@ -382,7 +382,7 @@ class MySystem: public System {
|
|||||||
}
|
}
|
||||||
|
|
||||||
r = WaitForSingleObject(mutex, INFINITE);
|
r = WaitForSingleObject(mutex, INFINITE);
|
||||||
assert(s, r == WAIT_OBJECT_0);
|
assertT(s, r == WAIT_OBJECT_0);
|
||||||
|
|
||||||
{ ACQUIRE(s, t->mutex);
|
{ ACQUIRE(s, t->mutex);
|
||||||
t->flags = 0;
|
t->flags = 0;
|
||||||
@ -415,12 +415,12 @@ class MySystem: public System {
|
|||||||
t->flags |= Notified;
|
t->flags |= Notified;
|
||||||
|
|
||||||
bool success UNUSED = SetEvent(t->event);
|
bool success UNUSED = SetEvent(t->event);
|
||||||
assert(s, success);
|
assertT(s, success);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void notify(System::Thread* context) {
|
virtual void notify(System::Thread* context) {
|
||||||
Thread* t = static_cast<Thread*>(context);
|
Thread* t = static_cast<Thread*>(context);
|
||||||
assert(s, t);
|
assertT(s, t);
|
||||||
|
|
||||||
if (owner_ == t) {
|
if (owner_ == t) {
|
||||||
if (first) {
|
if (first) {
|
||||||
@ -440,7 +440,7 @@ class MySystem: public System {
|
|||||||
|
|
||||||
virtual void notifyAll(System::Thread* context) {
|
virtual void notifyAll(System::Thread* context) {
|
||||||
Thread* t = static_cast<Thread*>(context);
|
Thread* t = static_cast<Thread*>(context);
|
||||||
assert(s, t);
|
assertT(s, t);
|
||||||
|
|
||||||
if (owner_ == t) {
|
if (owner_ == t) {
|
||||||
for (Thread* t = first; t; t = t->next) {
|
for (Thread* t = first; t; t = t->next) {
|
||||||
@ -457,7 +457,7 @@ class MySystem: public System {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual void dispose() {
|
virtual void dispose() {
|
||||||
assert(s, owner_ == 0);
|
assertT(s, owner_ == 0);
|
||||||
CloseHandle(mutex);
|
CloseHandle(mutex);
|
||||||
::free(this);
|
::free(this);
|
||||||
}
|
}
|
||||||
@ -474,7 +474,7 @@ class MySystem: public System {
|
|||||||
public:
|
public:
|
||||||
Local(System* s): s(s) {
|
Local(System* s): s(s) {
|
||||||
key = TlsAlloc();
|
key = TlsAlloc();
|
||||||
assert(s, key != TLS_OUT_OF_INDEXES);
|
assertT(s, key != TLS_OUT_OF_INDEXES);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void* get() {
|
virtual void* get() {
|
||||||
@ -483,12 +483,12 @@ class MySystem: public System {
|
|||||||
|
|
||||||
virtual void set(void* p) {
|
virtual void set(void* p) {
|
||||||
bool r UNUSED = TlsSetValue(key, p);
|
bool r UNUSED = TlsSetValue(key, p);
|
||||||
assert(s, r);
|
assertT(s, r);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void dispose() {
|
virtual void dispose() {
|
||||||
bool r UNUSED = TlsFree(key);
|
bool r UNUSED = TlsFree(key);
|
||||||
assert(s, r);
|
assertT(s, r);
|
||||||
|
|
||||||
::free(this);
|
::free(this);
|
||||||
}
|
}
|
||||||
@ -623,7 +623,7 @@ class MySystem: public System {
|
|||||||
system = this;
|
system = this;
|
||||||
|
|
||||||
mutex = CreateMutex(0, false, 0);
|
mutex = CreateMutex(0, false, 0);
|
||||||
assert(this, mutex);
|
assertT(this, mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void* tryAllocate(unsigned sizeInBytes) {
|
virtual void* tryAllocate(unsigned sizeInBytes) {
|
||||||
@ -642,7 +642,7 @@ class MySystem: public System {
|
|||||||
|
|
||||||
virtual void freeExecutable(const void* p, unsigned) {
|
virtual void freeExecutable(const void* p, unsigned) {
|
||||||
int r UNUSED = VirtualFree(const_cast<void*>(p), 0, MEM_RELEASE);
|
int r UNUSED = VirtualFree(const_cast<void*>(p), 0, MEM_RELEASE);
|
||||||
assert(this, r);
|
assertT(this, r);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -655,7 +655,7 @@ class MySystem: public System {
|
|||||||
bool success UNUSED = DuplicateHandle
|
bool success UNUSED = DuplicateHandle
|
||||||
(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(),
|
(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(),
|
||||||
&(t->thread), 0, false, DUPLICATE_SAME_ACCESS);
|
&(t->thread), 0, false, DUPLICATE_SAME_ACCESS);
|
||||||
assert(this, success);
|
assertT(this, success);
|
||||||
r->attach(t);
|
r->attach(t);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -665,7 +665,7 @@ class MySystem: public System {
|
|||||||
r->attach(t);
|
r->attach(t);
|
||||||
DWORD id;
|
DWORD id;
|
||||||
t->thread = CreateThread(0, 0, run, r, 0, &id);
|
t->thread = CreateThread(0, 0, run, r, 0, &id);
|
||||||
assert(this, t->thread);
|
assertT(this, t->thread);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -688,7 +688,7 @@ class MySystem: public System {
|
|||||||
ThreadVisitor* visitor)
|
ThreadVisitor* visitor)
|
||||||
{
|
{
|
||||||
#if !defined(WINAPI_FAMILY) || WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
|
#if !defined(WINAPI_FAMILY) || WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
|
||||||
assert(this, st != sTarget);
|
assertT(this, st != sTarget);
|
||||||
|
|
||||||
Thread* target = static_cast<Thread*>(sTarget);
|
Thread* target = static_cast<Thread*>(sTarget);
|
||||||
|
|
||||||
@ -873,7 +873,7 @@ class MySystem: public System {
|
|||||||
handle = GetModuleHandle(0);
|
handle = GetModuleHandle(0);
|
||||||
#else
|
#else
|
||||||
// Most of WinRT/WP8 applications can not host native object files inside main executable
|
// Most of WinRT/WP8 applications can not host native object files inside main executable
|
||||||
assert(this, false);
|
assertT(this, false);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1219,7 +1219,7 @@ writeAccessor(Output* out, Object* member, Object* offset, bool unsafe = false)
|
|||||||
|
|
||||||
if (memberOwner(member)->type == Object::Type) {
|
if (memberOwner(member)->type == Object::Type) {
|
||||||
if (not unsafe) {
|
if (not unsafe) {
|
||||||
out->write(" assert(t, t->m->unsafe or ");
|
out->write(" assertT(t, t->m->unsafe or ");
|
||||||
out->write("instanceOf(t, reinterpret_cast<GcClass*>(arrayBodyUnsafe");
|
out->write("instanceOf(t, reinterpret_cast<GcClass*>(arrayBodyUnsafe");
|
||||||
out->write("(t, t->m->types, Gc::");
|
out->write("(t, t->m->types, Gc::");
|
||||||
out->write(capitalize(local::typeName(memberOwner(member))));
|
out->write(capitalize(local::typeName(memberOwner(member))));
|
||||||
@ -1227,7 +1227,7 @@ writeAccessor(Output* out, Object* member, Object* offset, bool unsafe = false)
|
|||||||
out->write(", o));\n");
|
out->write(", o));\n");
|
||||||
|
|
||||||
if (member->type != Object::Scalar) {
|
if (member->type != Object::Scalar) {
|
||||||
out->write(" assert(t, i < ");
|
out->write(" assertT(t, i < ");
|
||||||
out->write(local::typeName(memberOwner(member)));
|
out->write(local::typeName(memberOwner(member)));
|
||||||
out->write("Length(t, o));\n");
|
out->write("Length(t, o));\n");
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user