corda/src/machine.cpp

5520 lines
153 KiB
C++
Raw Normal View History

2014-04-21 02:14:48 +00:00
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/jnienv.h"
#include "avian/machine.h"
#include "avian/util.h"
#include <avian/util/stream.h>
#include "avian/constants.h"
#include "avian/processor.h"
#include "avian/arch.h"
#include "avian/lzma.h"
2007-07-06 23:50:26 +00:00
2013-02-20 05:56:05 +00:00
#include <avian/util/runtime-array.h>
#include <avian/util/math.h>
#if defined(PLATFORM_WINDOWS)
# define WIN32_LEAN_AND_MEAN
2013-02-15 16:53:02 +00:00
# include <windows.h>
#endif
2007-07-06 23:50:26 +00:00
using namespace vm;
using namespace avian::util;
2007-07-06 23:50:26 +00:00
namespace {
2012-05-22 19:53:32 +00:00
const bool DebugClassReader = false;
2009-08-14 14:52:31 +00:00
const unsigned NoByte = 0xFFFF;
#ifdef USE_ATOMIC_OPERATIONS
void
atomicIncrement(uint32_t* p, int v)
{
for (uint32_t old = *p;
not atomicCompareAndSwap32(p, old, old + v);
old = *p)
{ }
}
#endif
2007-07-07 18:09:16 +00:00
void
join(Thread* t, Thread* o)
{
if (t != o) {
assertT(t, o->state != Thread::JoinedState);
assertT(t, (o->flags & Thread::SystemFlag) == 0);
if (o->flags & Thread::JoinFlag) {
o->systemThread->join();
}
2007-07-18 01:33:00 +00:00
o->state = Thread::JoinedState;
2007-07-07 18:09:16 +00:00
}
}
#ifndef NDEBUG
bool
find(Thread* t, Thread* o)
{
return (t == o)
or (t->peer and find(t->peer, o))
or (t->child and find(t->child, o));
}
unsigned
count(Thread* t, Thread* o)
{
unsigned c = 0;
if (t != o) ++ c;
if (t->peer) c += count(t->peer, o);
if (t->child) c += count(t->child, o);
return c;
}
Thread**
fill(Thread* t, Thread* o, Thread** array)
{
if (t != o) *(array++) = t;
if (t->peer) array = fill(t->peer, o, array);
if (t->child) array = fill(t->child, o, array);
return array;
}
#endif // not NDEBUG
2007-07-07 18:09:16 +00:00
void
dispose(Thread* t, Thread* o, bool remove)
{
if (remove) {
#ifndef NDEBUG
2007-11-27 22:23:00 +00:00
expect(t, find(t->m->rootThread, o));
unsigned c = count(t->m->rootThread, o);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RUNTIME_ARRAY(t, Thread*, threads, c);
fill(t->m->rootThread, o, RUNTIME_ARRAY_BODY(threads));
#endif
2007-11-27 22:23:00 +00:00
2007-07-07 18:09:16 +00:00
if (o->parent) {
2007-11-27 23:04:15 +00:00
Thread* previous = 0;
2007-11-27 22:23:00 +00:00
for (Thread* p = o->parent->child; p;) {
if (p == o) {
if (p == o->parent->child) {
o->parent->child = p->peer;
} else {
previous->peer = p->peer;
}
break;
} else {
previous = p;
p = p->peer;
2007-07-07 18:09:16 +00:00
}
2007-11-27 22:23:00 +00:00
}
for (Thread* p = o->child; p;) {
Thread* next = p->peer;
p->peer = o->parent->child;
o->parent->child = p;
p->parent = o->parent;
p = next;
2007-07-07 18:09:16 +00:00
}
} else if (o->child) {
t->m->rootThread = o->child;
2007-11-27 22:23:00 +00:00
for (Thread* p = o->peer; p;) {
Thread* next = p->peer;
p->peer = t->m->rootThread;
t->m->rootThread = p;
p = next;
}
2007-07-07 18:09:16 +00:00
} else if (o->peer) {
t->m->rootThread = o->peer;
2007-07-07 18:09:16 +00:00
} else {
abort(t);
}
#ifndef NDEBUG
2007-11-27 22:23:00 +00:00
expect(t, not find(t->m->rootThread, o));
for (unsigned i = 0; i < c; ++i) {
expect(t, find(t->m->rootThread, RUNTIME_ARRAY_BODY(threads)[i]));
2007-11-27 22:23:00 +00:00
}
#endif
2007-07-07 18:09:16 +00:00
}
o->dispose();
}
void
visitAll(Thread* m, Thread* o, void (*visit)(Thread*, Thread*))
2007-07-07 18:09:16 +00:00
{
for (Thread* p = o->child; p;) {
Thread* child = p;
p = p->peer;
visitAll(m, child, visit);
2007-07-07 18:09:16 +00:00
}
visit(m, o);
2007-07-07 18:09:16 +00:00
}
void
disposeNoRemove(Thread* m, Thread* o)
2007-07-07 18:09:16 +00:00
{
dispose(m, o, false);
}
void
interruptDaemon(Thread* m, Thread* o)
{
if (o->flags & Thread::DaemonFlag) {
interrupt(m, o);
}
}
void
turnOffTheLights(Thread* t)
{
expect(t, t->m->liveCount == 1);
visitAll(t, t->m->rootThread, join);
enter(t, Thread::ExitState);
2014-05-29 04:17:25 +00:00
{ GcFinalizer* p = 0;
PROTECT(t, p);
for (p = t->m->finalizers; p;) {
2014-05-29 04:17:25 +00:00
GcFinalizer* f = p;
p = cast<GcFinalizer>(t, p->next());
void (*function)(Thread*, object);
2014-05-29 04:17:25 +00:00
memcpy(&function, &f->finalize(), BytesPerWord);
if (function) {
2014-05-29 04:17:25 +00:00
function(t, f->target());
}
2009-12-06 02:40:46 +00:00
}
for (p = t->m->tenuredFinalizers; p;) {
2014-05-29 04:17:25 +00:00
GcFinalizer* f = p;
p = cast<GcFinalizer>(t, p->next());
void (*function)(Thread*, object);
2014-05-29 04:17:25 +00:00
memcpy(&function, &f->finalize(), BytesPerWord);
if (function) {
2014-05-29 04:17:25 +00:00
function(t, f->target());
}
2009-12-06 02:40:46 +00:00
}
}
if (root(t, Machine::VirtualFiles)) {
for (unsigned i = 0; i < arrayLength(t, root(t, Machine::VirtualFiles));
++i)
{
object region = arrayBody(t, root(t, Machine::VirtualFiles), i);
if (region) {
static_cast<System::Region*>(regionRegion(t, region))->dispose();
}
}
}
for (object p = root(t, Machine::VirtualFileFinders);
p; p = finderNext(t, p))
{
static_cast<Finder*>(finderFinder(t, p))->dispose();
}
Machine* m = t->m;
visitAll(t, t->m->rootThread, disposeNoRemove);
System* s = m->system;
expect(s, m->threadCount == 0);
Heap* h = m->heap;
Processor* p = m->processor;
Classpath* c = m->classpath;
Finder* bf = m->bootFinder;
Finder* af = m->appFinder;
c->dispose();
h->disposeFixies();
m->dispose();
p->dispose();
bf->dispose();
af->dispose();
h->dispose();
s->dispose();
}
2007-07-06 23:50:26 +00:00
void
2007-07-07 18:09:16 +00:00
killZombies(Thread* t, Thread* o)
2007-07-06 23:50:26 +00:00
{
2007-07-07 18:09:16 +00:00
for (Thread* p = o->child; p;) {
Thread* child = p;
p = p->peer;
killZombies(t, child);
}
if ((o->flags & Thread::SystemFlag) == 0) {
switch (o->state) {
case Thread::ZombieState:
join(t, o);
// fall through
case Thread::JoinedState:
dispose(t, o, true);
default: break;
}
2007-07-07 18:09:16 +00:00
}
}
unsigned
footprint(Thread* t)
{
expect(t, t->criticalLevel == 0);
unsigned n = t->heapOffset + t->heapIndex + t->backupHeapIndex;
for (Thread* c = t->child; c; c = c->peer) {
n += footprint(c);
}
return n;
}
2007-07-07 23:47:35 +00:00
void
visitRoots(Thread* t, Heap::Visitor* v)
{
if (t->state != Thread::ZombieState) {
v->visit(&(t->javaThread));
v->visit(&(t->exception));
2007-07-07 23:47:35 +00:00
t->m->processor->visitObjects(t, v);
2007-07-07 23:47:35 +00:00
for (Thread::Protector* p = t->protector; p; p = p->next) {
p->visit(v);
2007-07-07 23:47:35 +00:00
}
}
for (Thread* c = t->child; c; c = c->peer) {
visitRoots(c, v);
}
}
2009-05-17 23:43:48 +00:00
bool
2007-11-02 14:15:06 +00:00
walk(Thread*, Heap::Walker* w, uint32_t* mask, unsigned fixedSize,
unsigned arrayElementSize, unsigned arrayLength, unsigned start)
2007-11-02 14:15:06 +00:00
{
2013-02-11 01:06:15 +00:00
unsigned fixedSizeInWords = ceilingDivide(fixedSize, BytesPerWord);
2007-11-02 14:15:06 +00:00
unsigned arrayElementSizeInWords
2013-02-11 01:06:15 +00:00
= ceilingDivide(arrayElementSize, BytesPerWord);
2007-11-02 14:15:06 +00:00
for (unsigned i = start; i < fixedSizeInWords; ++i) {
2007-11-02 14:15:06 +00:00
if (mask[i / 32] & (static_cast<uint32_t>(1) << (i % 32))) {
if (not w->visit(i)) {
2009-05-17 23:43:48 +00:00
return false;
2007-11-02 14:15:06 +00:00
}
}
}
bool arrayObjectElements = false;
for (unsigned j = 0; j < arrayElementSizeInWords; ++j) {
unsigned k = fixedSizeInWords + j;
if (mask[k / 32] & (static_cast<uint32_t>(1) << (k % 32))) {
arrayObjectElements = true;
break;
}
}
if (arrayObjectElements) {
unsigned arrayStart;
unsigned elementStart;
if (start > fixedSizeInWords) {
unsigned s = start - fixedSizeInWords;
arrayStart = s / arrayElementSizeInWords;
elementStart = s % arrayElementSizeInWords;
} else {
arrayStart = 0;
elementStart = 0;
}
for (unsigned i = arrayStart; i < arrayLength; ++i) {
for (unsigned j = elementStart; j < arrayElementSizeInWords; ++j) {
2007-11-02 14:15:06 +00:00
unsigned k = fixedSizeInWords + j;
if (mask[k / 32] & (static_cast<uint32_t>(1) << (k % 32))) {
if (not w->visit
(fixedSizeInWords + (i * arrayElementSizeInWords) + j))
{
2009-05-17 23:43:48 +00:00
return false;
2007-11-02 14:15:06 +00:00
}
}
}
}
}
2009-05-17 23:43:48 +00:00
return true;
2007-11-02 14:15:06 +00:00
}
object
2014-05-29 04:17:25 +00:00
findInInterfaces(Thread* t, GcClass* class_, object name, object spec,
object (*find)(Thread*, GcClass*, object, object))
{
object result = 0;
2014-05-29 04:17:25 +00:00
if (class_->interfaceTable()) {
for (unsigned i = 0;
2014-05-29 04:17:25 +00:00
i < arrayLength(t, class_->interfaceTable()) and result == 0;
i += 2)
{
result = find
2014-05-29 04:17:25 +00:00
(t, cast<GcClass>(t, arrayBody(t, class_->interfaceTable(), i)), name, spec);
}
}
return result;
}
2007-10-28 01:54:30 +00:00
void
finalizerTargetUnreachable(Thread* t, Heap::Visitor* v, object* p)
{
v->visit(&finalizerTarget(t, *p));
object finalizer = *p;
*p = finalizerNext(t, finalizer);
void (*function)(Thread*, object);
memcpy(&function, &finalizerFinalize(t, finalizer), BytesPerWord);
if (function) {
finalizerNext(t, finalizer) = t->m->finalizeQueue;
t->m->finalizeQueue = finalizer;
} else {
set(t, finalizer, FinalizerQueueTarget, finalizerTarget(t, finalizer));
set(t, finalizer, FinalizerQueueNext, root(t, Machine::ObjectsToFinalize));
setRoot(t, Machine::ObjectsToFinalize, finalizer);
}
}
2007-07-20 01:07:30 +00:00
void
2007-10-28 01:54:30 +00:00
referenceTargetUnreachable(Thread* t, Heap::Visitor* v, object* p)
2007-07-20 01:07:30 +00:00
{
if (DebugReferences) {
fprintf(stderr, "target %p unreachable for reference %p\n",
jreferenceTarget(t, *p), *p);
}
v->visit(p);
2007-07-20 01:07:30 +00:00
jreferenceTarget(t, *p) = 0;
2014-05-29 04:17:25 +00:00
if (objectClass(t, *p) == type(t, GcCleaner::Type)) {
object reference = *p;
*p = jreferenceVmNext(t, reference);
set(t, reference, CleanerQueueNext, root(t, Machine::ObjectsToClean));
setRoot(t, Machine::ObjectsToClean, reference);
} else {
if (jreferenceQueue(t, *p)
and t->m->heap->status(jreferenceQueue(t, *p)) != Heap::Unreachable)
{
// queue is reachable - add the reference
2007-07-20 01:07:30 +00:00
v->visit(&jreferenceQueue(t, *p));
2007-07-20 01:07:30 +00:00
object q = jreferenceQueue(t, *p);
2007-07-20 01:07:30 +00:00
if (referenceQueueFront(t, q)) {
set(t, *p, JreferenceJNext, referenceQueueFront(t, q));
} else {
set(t, *p, JreferenceJNext, *p);
}
set(t, q, ReferenceQueueFront, *p);
jreferenceQueue(t, *p) = 0;
2007-07-20 01:07:30 +00:00
}
*p = jreferenceVmNext(t, *p);
2007-07-20 01:07:30 +00:00
}
}
void
2007-10-28 01:54:30 +00:00
referenceUnreachable(Thread* t, Heap::Visitor* v, object* p)
{
object r = static_cast<object>(t->m->heap->follow(*p));
if (DebugReferences) {
fprintf(stderr, "reference %p unreachable (target %p)\n",
*p, jreferenceTarget(t, r));
}
if (jreferenceQueue(t, r)
and t->m->heap->status(jreferenceQueue(t, r)) != Heap::Unreachable)
{
// queue is reachable - add the reference
2007-10-28 01:54:30 +00:00
referenceTargetUnreachable(t, v, p);
} else {
*p = jreferenceVmNext(t, *p);
}
}
2007-07-20 01:07:30 +00:00
void
2007-10-28 01:54:30 +00:00
referenceTargetReachable(Thread* t, Heap::Visitor* v, object* p)
2007-07-20 01:07:30 +00:00
{
if (DebugReferences) {
fprintf(stderr, "target %p reachable for reference %p\n",
jreferenceTarget(t, *p), *p);
}
v->visit(p);
v->visit(&jreferenceTarget(t, *p));
2007-07-20 01:07:30 +00:00
if (t->m->heap->status(jreferenceQueue(t, *p)) == Heap::Unreachable) {
2007-07-20 01:07:30 +00:00
jreferenceQueue(t, *p) = 0;
} else {
v->visit(&jreferenceQueue(t, *p));
2007-07-20 01:07:30 +00:00
}
}
bool
isFinalizable(Thread* t, object o)
{
return t->m->heap->status(o) == Heap::Unreachable
and (classVmFlags
(t, static_cast<object>(t->m->heap->follow(objectClass(t, o))))
& HasFinalizerFlag);
}
void
clearTargetIfFinalizable(Thread* t, object r)
{
if (isFinalizable
(t, static_cast<object>(t->m->heap->follow(jreferenceTarget(t, r)))))
{
jreferenceTarget(t, r) = 0;
}
}
void
postVisit(Thread* t, Heap::Visitor* v)
{
Machine* m = t->m;
2007-10-28 01:54:30 +00:00
bool major = m->heap->collectionType() == Heap::MajorCollection;
assertT(t, m->finalizeQueue == 0);
m->heap->postVisit();
for (object p = m->weakReferences; p;) {
object r = static_cast<object>(m->heap->follow(p));
p = jreferenceVmNext(t, r);
clearTargetIfFinalizable(t, r);
}
if (major) {
for (object p = m->tenuredWeakReferences; p;) {
object r = static_cast<object>(m->heap->follow(p));
p = jreferenceVmNext(t, r);
clearTargetIfFinalizable(t, r);
}
}
for (Reference* r = m->jniReferences; r; r = r->next) {
if (r->weak and isFinalizable
(t, static_cast<object>(t->m->heap->follow(r->target))))
{
r->target = 0;
}
}
object firstNewTenuredFinalizer = 0;
object lastNewTenuredFinalizer = 0;
{ object unreachable = 0;
2014-05-29 04:17:25 +00:00
for (GcFinalizer** p = &(m->finalizers); *p;) {
v->visit(p);
2014-05-29 04:17:25 +00:00
if (m->heap->status((*p)->target()) == Heap::Unreachable) {
GcFinalizer* finalizer = *p;
*p = cast<GcFinalizer>(t, finalizer->next());
2014-05-29 04:17:25 +00:00
finalizer->next() = unreachable;
unreachable = reinterpret_cast<object>(finalizer);
} else {
2014-05-29 04:17:25 +00:00
p = reinterpret_cast<GcFinalizer**>(&(*p)->next());
}
}
2014-05-29 04:17:25 +00:00
for (GcFinalizer** p = &(m->finalizers); *p;) {
// target is reachable
2014-05-29 04:17:25 +00:00
v->visit(&(*p)->target());
if (m->heap->status(*p) == Heap::Tenured) {
// the finalizer is tenured, so we remove it from
// m->finalizers and later add it to m->tenuredFinalizers
if (lastNewTenuredFinalizer == 0) {
2014-05-29 04:17:25 +00:00
lastNewTenuredFinalizer = reinterpret_cast<object>(*p);
}
2014-05-29 04:17:25 +00:00
GcFinalizer* finalizer = *p;
*p = cast<GcFinalizer>(t, finalizer->next());
finalizer->next() = firstNewTenuredFinalizer;
firstNewTenuredFinalizer = reinterpret_cast<object>(finalizer);
} else {
2014-05-29 04:17:25 +00:00
p = reinterpret_cast<GcFinalizer**>(&(*p)->next());
}
}
for (object* p = &unreachable; *p;) {
// target is unreachable - queue it up for finalization
finalizerTargetUnreachable(t, v, p);
}
}
object firstNewTenuredWeakReference = 0;
object lastNewTenuredWeakReference = 0;
for (object* p = &(m->weakReferences); *p;) {
if (m->heap->status(*p) == Heap::Unreachable) {
// reference is unreachable
2007-10-28 01:54:30 +00:00
referenceUnreachable(t, v, p);
} else if (m->heap->status
(jreferenceTarget
(t, static_cast<object>(m->heap->follow(*p))))
2007-08-14 00:37:00 +00:00
== Heap::Unreachable)
{
// target is unreachable
2007-10-28 01:54:30 +00:00
referenceTargetUnreachable(t, v, p);
} else {
// both reference and target are reachable
2007-10-28 01:54:30 +00:00
referenceTargetReachable(t, v, p);
if (m->heap->status(*p) == Heap::Tenured) {
// the reference is tenured, so we remove it from
// m->weakReferences and later add it to
// m->tenuredWeakReferences
if (lastNewTenuredWeakReference == 0) {
lastNewTenuredWeakReference = *p;
}
object reference = *p;
*p = jreferenceVmNext(t, reference);
jreferenceVmNext(t, reference) = firstNewTenuredWeakReference;
firstNewTenuredWeakReference = reference;
} else {
p = &jreferenceVmNext(t, *p);
}
}
}
2007-10-28 01:54:30 +00:00
if (major) {
{ object unreachable = 0;
2014-05-29 04:17:25 +00:00
for (GcFinalizer** p = &(m->tenuredFinalizers); *p;) {
v->visit(p);
2014-05-29 04:17:25 +00:00
if (m->heap->status((*p)->target()) == Heap::Unreachable) {
GcFinalizer* finalizer = *p;
*p = cast<GcFinalizer>(t, finalizer->next());
2014-05-29 04:17:25 +00:00
finalizer->next() = unreachable;
unreachable = reinterpret_cast<object>(finalizer);
} else {
2014-05-29 04:17:25 +00:00
p = reinterpret_cast<GcFinalizer**>(&(*p)->next());
}
}
2014-05-29 04:17:25 +00:00
for (GcFinalizer** p = &(m->tenuredFinalizers); *p;) {
// target is reachable
2014-05-29 04:17:25 +00:00
v->visit(&(*p)->target());
p = reinterpret_cast<GcFinalizer**>(&(*p)->next());
}
for (object* p = &unreachable; *p;) {
// target is unreachable - queue it up for finalization
finalizerTargetUnreachable(t, v, p);
}
}
for (object* p = &(m->tenuredWeakReferences); *p;) {
if (m->heap->status(*p) == Heap::Unreachable) {
// reference is unreachable
2007-10-28 01:54:30 +00:00
referenceUnreachable(t, v, p);
} else if (m->heap->status
(jreferenceTarget
(t, static_cast<object>(m->heap->follow(*p))))
== Heap::Unreachable)
{
// target is unreachable
2007-10-28 01:54:30 +00:00
referenceTargetUnreachable(t, v, p);
} else {
// both reference and target are reachable
2007-10-28 01:54:30 +00:00
referenceTargetReachable(t, v, p);
p = &jreferenceVmNext(t, *p);
}
}
}
if (lastNewTenuredFinalizer) {
2014-05-29 04:17:25 +00:00
finalizerNext(t, lastNewTenuredFinalizer) = reinterpret_cast<object>(m->tenuredFinalizers);
m->tenuredFinalizers = cast<GcFinalizer>(t, firstNewTenuredFinalizer);
}
if (lastNewTenuredWeakReference) {
jreferenceVmNext(t, lastNewTenuredWeakReference)
= m->tenuredWeakReferences;
m->tenuredWeakReferences = firstNewTenuredWeakReference;
}
for (Reference* r = m->jniReferences; r; r = r->next) {
if (r->weak) {
if (m->heap->status(r->target) == Heap::Unreachable) {
r->target = 0;
} else {
v->visit(&(r->target));
}
}
}
}
2007-07-07 23:47:35 +00:00
void
postCollect(Thread* t)
{
#ifdef VM_STRESS
t->m->heap->free(t->defaultHeap, ThreadHeapSizeInBytes);
2007-09-13 03:15:16 +00:00
t->defaultHeap = static_cast<uintptr_t*>
(t->m->heap->allocate(ThreadHeapSizeInBytes));
memset(t->defaultHeap, 0, ThreadHeapSizeInBytes);
#endif
if (t->heap == t->defaultHeap) {
memset(t->defaultHeap, 0, t->heapIndex * BytesPerWord);
} else {
memset(t->defaultHeap, 0, ThreadHeapSizeInBytes);
t->heap = t->defaultHeap;
}
t->heapOffset = 0;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (t->m->heap->limitExceeded()) {
// if we're out of memory, pretend the thread-local heap is
// already full so we don't make things worse:
t->heapIndex = ThreadHeapSizeInWords;
} else {
t->heapIndex = 0;
}
2007-07-07 23:47:35 +00:00
if (t->flags & Thread::UseBackupHeapFlag) {
memset(t->backupHeap, 0, ThreadBackupHeapSizeInBytes);
t->flags &= ~Thread::UseBackupHeapFlag;
2008-04-09 19:08:13 +00:00
t->backupHeapIndex = 0;
}
2007-07-07 23:47:35 +00:00
for (Thread* c = t->child; c; c = c->peer) {
postCollect(c);
}
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
uint64_t
invoke(Thread* t, uintptr_t* arguments)
{
2014-05-29 04:17:25 +00:00
GcMethod* m = cast<GcMethod>(t, *reinterpret_cast<object*>(arguments[0]));
object o = *reinterpret_cast<object*>(arguments[1]);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
t->m->processor->invoke(t, m, o);
return 1;
}
void
finalizeObject(Thread* t, object o, const char* name)
{
for (GcClass* c = objectClass(t, o); c; c = c->super()) {
2014-05-29 04:17:25 +00:00
for (unsigned i = 0; i < arrayLength(t, c->methodTable()); ++i) {
object m = arrayBody(t, c->methodTable(), i);
if (vm::strcmp(reinterpret_cast<const int8_t*>(name),
&byteArrayBody(t, methodName(t, m), 0)) == 0
and vm::strcmp(reinterpret_cast<const int8_t*>("()V"),
&byteArrayBody(t, methodSpec(t, m), 0)) == 0)
{
PROTECT(t, m);
PROTECT(t, o);
uintptr_t arguments[] = { reinterpret_cast<uintptr_t>(&m),
reinterpret_cast<uintptr_t>(&o) };
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
run(t, invoke, arguments);
t->exception = 0;
return;
}
}
}
abort(t);
}
2009-08-14 14:52:31 +00:00
unsigned
readByte(AbstractStream& s, unsigned* value)
2009-08-14 14:52:31 +00:00
{
if (*value == NoByte) {
return s.read1();
} else {
unsigned r = *value;
*value = NoByte;
return r;
}
}
object
parseUtf8NonAscii(Thread* t, AbstractStream& s, object bytesSoFar,
unsigned byteCount, unsigned sourceIndex, unsigned byteA,
unsigned byteB)
{
PROTECT(t, bytesSoFar);
unsigned length = byteArrayLength(t, bytesSoFar) - 1;
2014-05-29 04:17:25 +00:00
object value = reinterpret_cast<object>(makeCharArray(t, length + 1));
unsigned vi = 0;
for (; vi < byteCount; ++vi) {
charArrayBody(t, value, vi) = byteArrayBody(t, bytesSoFar, vi);
}
2009-08-14 14:52:31 +00:00
for (unsigned si = sourceIndex; si < length; ++si) {
unsigned a = readByte(s, &byteA);
if (a & 0x80) {
if (a & 0x20) {
2014-05-29 04:17:25 +00:00
// 3 bytes
si += 2;
assertT(t, si < length);
2009-08-14 14:52:31 +00:00
unsigned b = readByte(s, &byteB);
2014-05-29 04:17:25 +00:00
unsigned c = s.read1();
charArrayBody(t, value, vi++)
= ((a & 0xf) << 12) | ((b & 0x3f) << 6) | (c & 0x3f);
} else {
2014-05-29 04:17:25 +00:00
// 2 bytes
++ si;
assertT(t, si < length);
2009-08-14 14:52:31 +00:00
unsigned b = readByte(s, &byteB);
2014-05-29 04:17:25 +00:00
if (a == 0xC0 and b == 0x80) {
charArrayBody(t, value, vi++) = 0;
} else {
charArrayBody(t, value, vi++) = ((a & 0x1f) << 6) | (b & 0x3f);
}
}
} else {
charArrayBody(t, value, vi++) = a;
}
}
if (vi < length) {
PROTECT(t, value);
2014-05-29 04:17:25 +00:00
object v = reinterpret_cast<object>(makeCharArray(t, vi + 1));
memcpy(&charArrayBody(t, v, 0), &charArrayBody(t, value, 0), vi * 2);
value = v;
}
return value;
}
object
parseUtf8(Thread* t, AbstractStream& s, unsigned length)
{
2014-05-29 04:17:25 +00:00
object value = reinterpret_cast<object>(makeByteArray(t, length + 1));
unsigned vi = 0;
for (unsigned si = 0; si < length; ++si) {
unsigned a = s.read1();
if (a & 0x80) {
if (a & 0x20) {
2014-05-29 04:17:25 +00:00
// 3 bytes
2009-08-14 14:52:31 +00:00
return parseUtf8NonAscii(t, s, value, vi, si, a, NoByte);
} else {
2014-05-29 04:17:25 +00:00
// 2 bytes
unsigned b = s.read1();
2014-05-29 04:17:25 +00:00
if (a == 0xC0 and b == 0x80) {
++ si;
assertT(t, si < length);
2014-05-29 04:17:25 +00:00
byteArrayBody(t, value, vi++) = 0;
} else {
2009-08-14 14:52:31 +00:00
return parseUtf8NonAscii(t, s, value, vi, si, a, b);
2014-05-29 04:17:25 +00:00
}
}
} else {
2007-10-24 23:05:14 +00:00
byteArrayBody(t, value, vi++) = a;
}
}
if (vi < length) {
PROTECT(t, value);
2014-05-29 04:17:25 +00:00
object v = reinterpret_cast<object>(makeByteArray(t, vi + 1));
memcpy(&byteArrayBody(t, v, 0), &byteArrayBody(t, value, 0), vi);
value = v;
}
return value;
}
object
makeByteArray(Thread* t, Stream& s, unsigned length)
{
2014-05-29 04:17:25 +00:00
object value = reinterpret_cast<object>(makeByteArray(t, length + 1));
s.read(reinterpret_cast<uint8_t*>(&byteArrayBody(t, value, 0)), length);
return value;
}
void
removeByteArray(Thread* t, object o)
{
2014-05-29 04:17:25 +00:00
hashMapRemove(t,
cast<GcHashMap>(t, root(t, Machine::ByteArrayMap)),
o,
byteArrayHash,
objectEqual);
}
object
internByteArray(Thread* t, object array)
{
PROTECT(t, array);
ACQUIRE(t, t->m->referenceLock);
2014-06-28 20:41:27 +00:00
GcTriple* n = hashMapFindNode
2014-05-29 04:17:25 +00:00
(t, cast<GcHashMap>(t, root(t, Machine::ByteArrayMap)), array, byteArrayHash, byteArrayEqual);
if (n) {
2014-06-28 20:41:27 +00:00
return jreferenceTarget(t, n->first());
} else {
2014-05-29 04:17:25 +00:00
hashMapInsert(t, cast<GcHashMap>(t, root(t, Machine::ByteArrayMap)), array, 0, byteArrayHash);
addFinalizer(t, array, removeByteArray);
return array;
}
}
unsigned
2014-05-29 04:17:25 +00:00
parsePoolEntry(Thread* t, Stream& s, uint32_t* index, GcSingleton* pool, unsigned i)
{
PROTECT(t, pool);
s.setPosition(index[i]);
switch (s.read1()) {
case CONSTANT_Integer:
case CONSTANT_Float: {
2012-05-22 19:53:32 +00:00
uint32_t v = s.read4();
singletonValue(t, pool, i) = v;
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = int/float 0x%x\n", i, v);
}
} return 1;
case CONSTANT_Long:
case CONSTANT_Double: {
uint64_t v = s.read8();
memcpy(&singletonValue(t, pool, i), &v, 8);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = long/double <todo>\n", i);
}
} return 2;
case CONSTANT_Utf8: {
if (singletonObject(t, pool, i) == 0) {
object value = internByteArray(t, makeByteArray(t, s, s.read2()));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(pool), SingletonBody + (i * BytesPerWord), value);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = utf8 %s\n", i, &byteArrayBody(t, value, 0));
}
}
} return 1;
case CONSTANT_Class: {
if (singletonObject(t, pool, i) == 0) {
unsigned si = s.read2() - 1;
parsePoolEntry(t, s, index, pool, si);
2014-05-29 04:17:25 +00:00
object value = reinterpret_cast<object>(makeReference(t, 0, 0, singletonObject(t, pool, si), 0));
set(t, reinterpret_cast<object>(pool), SingletonBody + (i * BytesPerWord), value);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = class <todo>\n", i);
}
}
} return 1;
case CONSTANT_String: {
if (singletonObject(t, pool, i) == 0) {
unsigned si = s.read2() - 1;
parsePoolEntry(t, s, index, pool, si);
object value = parseUtf8(t, singletonObject(t, pool, si));
value = t->m->classpath->makeString
2013-02-11 00:38:51 +00:00
(t, value, 0, fieldAtOffset<uintptr_t>(value, BytesPerWord) - 1);
value = intern(t, value);
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(pool), SingletonBody + (i * BytesPerWord), value);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = string <todo>\n", i);
}
}
} return 1;
case CONSTANT_NameAndType: {
if (singletonObject(t, pool, i) == 0) {
unsigned ni = s.read2() - 1;
unsigned ti = s.read2() - 1;
parsePoolEntry(t, s, index, pool, ni);
parsePoolEntry(t, s, index, pool, ti);
object name = singletonObject(t, pool, ni);
object type = singletonObject(t, pool, ti);
2014-05-29 04:17:25 +00:00
object value = reinterpret_cast<object>(makePair(t, name, type));
set(t, reinterpret_cast<object>(pool), SingletonBody + (i * BytesPerWord), value);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = nameAndType %s%s\n", i, &byteArrayBody(t, name, 0), &byteArrayBody(t, type, 0));
}
}
} return 1;
case CONSTANT_Fieldref:
case CONSTANT_Methodref:
case CONSTANT_InterfaceMethodref: {
if (singletonObject(t, pool, i) == 0) {
unsigned ci = s.read2() - 1;
unsigned nti = s.read2() - 1;
parsePoolEntry(t, s, index, pool, ci);
parsePoolEntry(t, s, index, pool, nti);
2014-05-29 04:17:25 +00:00
object className = referenceName(t, singletonObject(t, pool, ci));
object nameAndType = singletonObject(t, pool, nti);
2014-05-29 04:17:25 +00:00
object value = reinterpret_cast<object>(makeReference
(t, 0, className, pairFirst(t, nameAndType), pairSecond(t, nameAndType)));
set(t, reinterpret_cast<object>(pool), SingletonBody + (i * BytesPerWord), value);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
2014-05-29 04:17:25 +00:00
fprintf(stderr, " consts[%d] = method %s.%s%s\n", i, &byteArrayBody(t, className, 0), &byteArrayBody(t, pairFirst(t, nameAndType), 0), &byteArrayBody(t, pairSecond(t, nameAndType), 0));
2012-05-22 19:53:32 +00:00
}
}
} return 1;
case CONSTANT_MethodHandle:
if (singletonObject(t, pool, i) == 0) {
unsigned kind = s.read1();
unsigned ri = s.read2() - 1;
parsePoolEntry(t, s, index, pool, ri);
object value = singletonObject(t, pool, ri);
if (DebugClassReader) {
fprintf(stderr, " consts[%d] = method handle %d %s.%s%s\n", i, kind,
&byteArrayBody(t, referenceClass(t, value), 0),
&byteArrayBody(t, referenceName(t, value), 0),
&byteArrayBody(t, referenceSpec(t, value), 0));
}
2014-05-29 04:17:25 +00:00
value = reinterpret_cast<object>(makeReference
(t, kind, referenceClass(t, value), referenceName(t, value),
2014-05-29 04:17:25 +00:00
referenceSpec(t, value)));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(pool), SingletonBody + (i * BytesPerWord), value);
} return 1;
case CONSTANT_MethodType:
if (singletonObject(t, pool, i) == 0) {
unsigned ni = s.read2() - 1;
parsePoolEntry(t, s, index, pool, ni);
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(pool), SingletonBody + (i * BytesPerWord),
singletonObject(t, pool, ni));
} return 1;
case CONSTANT_InvokeDynamic:
if (singletonObject(t, pool, i) == 0) {
unsigned bootstrap = s.read2();
unsigned nti = s.read2() - 1;
parsePoolEntry(t, s, index, pool, nti);
object nameAndType = singletonObject(t, pool, nti);
const char* specString = reinterpret_cast<const char*>
(&byteArrayBody(t, pairSecond(t, nameAndType), 0));
unsigned parameterCount;
unsigned parameterFootprint;
unsigned returnCode;
scanMethodSpec
(t, specString, true, &parameterCount, &parameterFootprint,
&returnCode);
2014-05-29 04:17:25 +00:00
GcMethod* template_ = makeMethod
(t, 0, returnCode, parameterCount, parameterFootprint, 0, 0, 0, 0,
cast<GcByteArray>(t, pairFirst(t, nameAndType)), cast<GcByteArray>(t, pairSecond(t, nameAndType)), 0, 0, 0);
2014-05-29 04:17:25 +00:00
object value = reinterpret_cast
<object>(makeInvocation(t,
bootstrap,
-1,
0,
reinterpret_cast<object>(pool),
reinterpret_cast<object>(template_),
0));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(pool), SingletonBody + (i * BytesPerWord), value);
} return 1;
default: abort(t);
}
}
2014-05-29 04:17:25 +00:00
GcSingleton*
parsePool(Thread* t, Stream& s)
{
unsigned count = s.read2() - 1;
2014-05-29 04:17:25 +00:00
GcSingleton* pool = makeSingletonOfSize(t, count + poolMaskSize(count));
PROTECT(t, pool);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " const pool entries %d\n", count);
}
if (count) {
2008-04-13 18:15:04 +00:00
uint32_t* index = static_cast<uint32_t*>(t->m->heap->allocate(count * 4));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RESOURCE2(t, uint32_t*, index, unsigned, count,
t->m->heap->free(index, count * 4));
for (unsigned i = 0; i < count; ++i) {
index[i] = s.position();
switch (s.read1()) {
case CONSTANT_Class:
case CONSTANT_String:
singletonMarkObject(t, pool, i);
s.skip(2);
break;
case CONSTANT_Integer:
s.skip(4);
break;
case CONSTANT_Float:
singletonSetBit(t, pool, count, i);
s.skip(4);
break;
case CONSTANT_NameAndType:
case CONSTANT_Fieldref:
case CONSTANT_Methodref:
case CONSTANT_InterfaceMethodref:
singletonMarkObject(t, pool, i);
s.skip(4);
break;
case CONSTANT_Long:
s.skip(8);
++ i;
break;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
case CONSTANT_Double:
singletonSetBit(t, pool, count, i);
singletonSetBit(t, pool, count, i + 1);
s.skip(8);
++ i;
break;
case CONSTANT_Utf8:
singletonMarkObject(t, pool, i);
s.skip(s.read2());
break;
case CONSTANT_MethodHandle:
singletonMarkObject(t, pool, i);
s.skip(3);
break;
case CONSTANT_MethodType:
singletonMarkObject(t, pool, i);
s.skip(2);
break;
case CONSTANT_InvokeDynamic:
singletonMarkObject(t, pool, i);
s.skip(4);
break;
default: abort(t);
}
}
unsigned end = s.position();
for (unsigned i = 0; i < count;) {
i += parsePoolEntry(t, s, index, pool, i);
}
s.setPosition(end);
}
return pool;
}
void
2014-05-29 04:17:25 +00:00
addInterfaces(Thread* t, GcClass* class_, GcHashMap* map)
{
2014-05-29 04:17:25 +00:00
object table = class_->interfaceTable();
if (table) {
unsigned increment = 2;
2014-05-29 04:17:25 +00:00
if (class_->flags() & ACC_INTERFACE) {
increment = 1;
}
PROTECT(t, map);
PROTECT(t, table);
for (unsigned i = 0; i < arrayLength(t, table); i += increment) {
2014-05-29 04:17:25 +00:00
GcClass* interface = cast<GcClass>(t, arrayBody(t, table, i));
GcByteArray* name = interface->name();
hashMapInsertMaybe(t,
map,
reinterpret_cast<object>(name),
reinterpret_cast<object>(interface),
byteArrayHash,
byteArrayEqual);
}
}
}
2014-05-29 04:17:25 +00:00
GcClassAddendum*
getClassAddendum(Thread* t, GcClass* class_, GcSingleton* pool)
{
GcClassAddendum* addendum = class_->addendum();
if (addendum == 0) {
PROTECT(t, class_);
2014-06-28 18:28:44 +00:00
addendum = makeClassAddendum(t, pool, 0, 0, 0, 0, -1, 0, 0);
2014-05-29 04:17:25 +00:00
set(t,
reinterpret_cast<object>(class_),
ClassAddendum,
reinterpret_cast<object>(addendum));
}
return addendum;
}
void
2014-05-29 04:17:25 +00:00
parseInterfaceTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool,
Gc::Type throwType)
{
PROTECT(t, class_);
PROTECT(t, pool);
2014-05-29 04:17:25 +00:00
GcHashMap* map = makeHashMap(t, 0, 0);
PROTECT(t, map);
2014-05-29 04:17:25 +00:00
if (class_->super()) {
addInterfaces(t, class_->super(), map);
}
2007-11-27 22:23:00 +00:00
unsigned count = s.read2();
object table = 0;
PROTECT(t, table);
if (count) {
2014-05-29 04:17:25 +00:00
table = reinterpret_cast<object>(makeArray(t, count));
2014-05-29 04:17:25 +00:00
object addendum = reinterpret_cast
<object>(getClassAddendum(t, class_, pool));
set(t, addendum, ClassAddendumInterfaceTable, table);
}
for (unsigned i = 0; i < count; ++i) {
object name = referenceName(t, singletonObject(t, pool, s.read2() - 1));
PROTECT(t, name);
2014-05-29 04:17:25 +00:00
GcClass* interface = resolveClass
2014-06-28 21:11:31 +00:00
(t, class_->loader(), name, true, throwType);
PROTECT(t, interface);
2014-05-29 04:17:25 +00:00
set(t, table, ArrayBody + (i * BytesPerWord), reinterpret_cast<object>(interface));
2014-05-29 04:17:25 +00:00
hashMapInsertMaybe(t, map, name, reinterpret_cast<object>(interface), byteArrayHash, byteArrayEqual);
addInterfaces(t, interface, map);
}
object interfaceTable = 0;
2014-05-29 04:17:25 +00:00
if (map->size()) {
unsigned length = map->size();
if ((class_->flags() & ACC_INTERFACE) == 0) {
length *= 2;
}
2014-05-29 04:17:25 +00:00
interfaceTable = reinterpret_cast<object>(makeArray(t, length));
PROTECT(t, interfaceTable);
unsigned i = 0;
for (HashMapIterator it(t, map); it.hasMore();) {
2014-06-28 20:41:27 +00:00
GcClass* interface = cast<GcClass>(t, it.next()->second());
2014-05-29 04:17:25 +00:00
set(t, interfaceTable, ArrayBody + (i * BytesPerWord), reinterpret_cast<object>(interface));
++ i;
2014-05-29 04:17:25 +00:00
if ((class_->flags() & ACC_INTERFACE) == 0) {
if (interface->virtualTable()) {
2007-08-14 00:37:00 +00:00
// we'll fill in this table in parseMethodTable():
2014-05-29 04:17:25 +00:00
object vtable = reinterpret_cast<object>(makeArray
(t, arrayLength(t, interface->virtualTable())));
2007-08-14 00:37:00 +00:00
set(t, interfaceTable, ArrayBody + (i * BytesPerWord), vtable);
2007-08-14 00:37:00 +00:00
}
++i;
}
}
}
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassInterfaceTable, interfaceTable);
}
void
2014-05-29 04:17:25 +00:00
parseFieldTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
{
PROTECT(t, class_);
PROTECT(t, pool);
unsigned memberOffset = BytesPerWord;
2014-05-29 04:17:25 +00:00
if (class_->super()) {
memberOffset = class_->super()->fixedSize();
}
unsigned count = s.read2();
if (count) {
unsigned staticOffset = BytesPerWord * 3;
2007-11-02 21:08:14 +00:00
unsigned staticCount = 0;
2014-05-29 04:17:25 +00:00
object fieldTable = reinterpret_cast<object>(makeArray(t, count));
PROTECT(t, fieldTable);
2014-05-29 04:17:25 +00:00
object staticValueTable = reinterpret_cast<object>(makeIntArray(t, count));
PROTECT(t, staticValueTable);
object addendum = 0;
PROTECT(t, addendum);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RUNTIME_ARRAY(t, uint8_t, staticTypes, count);
2007-11-02 21:08:14 +00:00
for (unsigned i = 0; i < count; ++i) {
unsigned flags = s.read2();
unsigned name = s.read2();
unsigned spec = s.read2();
2007-11-06 15:29:05 +00:00
unsigned value = 0;
addendum = 0;
unsigned code = fieldCode
(t, byteArrayBody(t, singletonObject(t, pool, spec - 1), 0));
unsigned attributeCount = s.read2();
for (unsigned j = 0; j < attributeCount; ++j) {
object name = singletonObject(t, pool, s.read2() - 1);
unsigned length = s.read4();
if (vm::strcmp(reinterpret_cast<const int8_t*>("ConstantValue"),
&byteArrayBody(t, name, 0)) == 0)
{
2007-11-06 15:29:05 +00:00
value = s.read2();
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("Signature"),
&byteArrayBody(t, name, 0)) == 0)
{
if (addendum == 0) {
2014-05-29 04:17:25 +00:00
addendum = reinterpret_cast<object>(
2014-06-28 18:28:44 +00:00
makeFieldAddendum(t, pool, 0, 0));
}
set(t, addendum, AddendumSignature,
singletonObject(t, pool, s.read2() - 1));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("RuntimeVisibleAnnotations"),
&byteArrayBody(t, name, 0)) == 0)
{
if (addendum == 0) {
2014-05-29 04:17:25 +00:00
addendum = reinterpret_cast<object>(
2014-06-28 18:28:44 +00:00
makeFieldAddendum(t, pool, 0, 0));
}
2014-05-29 04:17:25 +00:00
object body = reinterpret_cast<object>(makeByteArray(t, length));
s.read(reinterpret_cast<uint8_t*>(&byteArrayBody(t, body, 0)),
length);
set(t, addendum, AddendumAnnotationTable, body);
} else {
s.skip(length);
}
}
2014-05-29 04:17:25 +00:00
GcField* field = makeField
(t,
0, // vm flags
2007-11-02 21:08:14 +00:00
code,
flags,
0, // offset
0, // native ID
cast<GcByteArray>(t, singletonObject(t, pool, name - 1)),
cast<GcByteArray>(t, singletonObject(t, pool, spec - 1)),
cast<GcFieldAddendum>(t, addendum),
class_);
unsigned size = fieldSize(t, code);
if (flags & ACC_STATIC) {
staticOffset = pad(staticOffset, size);
2007-11-02 21:08:14 +00:00
2014-05-29 04:17:25 +00:00
field->offset() = staticOffset;
2007-11-02 21:08:14 +00:00
staticOffset += size;
2007-11-06 15:29:05 +00:00
intArrayBody(t, staticValueTable, staticCount) = value;
2007-11-02 21:08:14 +00:00
RUNTIME_ARRAY_BODY(staticTypes)[staticCount++] = code;
} else {
2009-03-03 03:18:15 +00:00
if (flags & ACC_FINAL) {
2014-05-29 04:17:25 +00:00
class_->vmFlags() |= HasFinalMemberFlag;
2009-03-03 03:18:15 +00:00
}
memberOffset = pad(memberOffset, size);
2014-05-29 04:17:25 +00:00
field->offset() = memberOffset;
memberOffset += size;
}
2014-05-29 04:17:25 +00:00
set(t, fieldTable, ArrayBody + (i * BytesPerWord), reinterpret_cast<object>(field));
}
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassFieldTable, fieldTable);
2007-11-02 21:08:14 +00:00
if (staticCount) {
2013-02-11 01:06:15 +00:00
unsigned footprint = ceilingDivide(staticOffset - (BytesPerWord * 2),
2007-11-02 21:08:14 +00:00
BytesPerWord);
2014-05-29 04:17:25 +00:00
GcSingleton* staticTable = makeSingletonOfSize(t, footprint);
2007-11-02 21:08:14 +00:00
uint8_t* body = reinterpret_cast<uint8_t*>
2014-05-29 04:17:25 +00:00
(&singletonBody(t, reinterpret_cast<object>(staticTable), 0));
2007-11-02 21:08:14 +00:00
memcpy(body, &class_, BytesPerWord);
singletonMarkObject(t, staticTable, 0);
for (unsigned i = 0, offset = BytesPerWord; i < staticCount; ++i) {
unsigned size = fieldSize(t, RUNTIME_ARRAY_BODY(staticTypes)[i]);
offset = pad(offset, size);
2007-11-02 21:08:14 +00:00
2007-11-06 15:29:05 +00:00
unsigned value = intArrayBody(t, staticValueTable, i);
2007-11-02 21:08:14 +00:00
if (value) {
switch (RUNTIME_ARRAY_BODY(staticTypes)[i]) {
2007-11-02 21:08:14 +00:00
case ByteField:
case BooleanField:
2007-11-06 15:29:05 +00:00
body[offset] = singletonValue(t, pool, value - 1);
2007-11-02 21:08:14 +00:00
break;
case CharField:
case ShortField:
2007-11-06 15:29:05 +00:00
*reinterpret_cast<uint16_t*>(body + offset)
= singletonValue(t, pool, value - 1);
2007-11-02 21:08:14 +00:00
break;
case IntField:
case FloatField:
2007-11-06 15:29:05 +00:00
*reinterpret_cast<uint32_t*>(body + offset)
= singletonValue(t, pool, value - 1);
2007-11-02 21:08:14 +00:00
break;
case LongField:
case DoubleField:
2007-11-06 15:29:05 +00:00
memcpy(body + offset, &singletonValue(t, pool, value - 1), 8);
2007-11-02 21:08:14 +00:00
break;
case ObjectField:
2007-11-06 15:29:05 +00:00
memcpy(body + offset,
&singletonObject(t, pool, value - 1),
BytesPerWord);
2007-11-02 21:08:14 +00:00
break;
default: abort(t);
}
}
if (RUNTIME_ARRAY_BODY(staticTypes)[i] == ObjectField) {
singletonMarkObject(t, staticTable, offset / BytesPerWord);
2007-11-02 21:08:14 +00:00
}
offset += size;
}
2014-05-29 04:17:25 +00:00
set(t,
reinterpret_cast<object>(class_),
ClassStaticTable,
reinterpret_cast<object>(staticTable));
}
}
class_->fixedSize() = memberOffset;
2014-05-29 04:17:25 +00:00
if (class_->super()
and memberOffset == class_->super()->fixedSize())
{
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassObjectMask,
reinterpret_cast<object>(class_->super()->objectMask()));
} else {
2014-05-29 04:17:25 +00:00
object mask = reinterpret_cast<object>(makeIntArray
(t, ceilingDivide(class_->fixedSize(), 32 * BytesPerWord)));
intArrayBody(t, mask, 0) = 1;
2007-08-14 00:37:00 +00:00
object superMask = 0;
2014-05-29 04:17:25 +00:00
if (class_->super()) {
superMask = reinterpret_cast<object>(class_->super()->objectMask());
2007-08-14 00:37:00 +00:00
if (superMask) {
memcpy(&intArrayBody(t, mask, 0),
&intArrayBody(t, superMask, 0),
ceilingDivide(class_->super()->fixedSize(),
32 * BytesPerWord)
2007-08-14 00:37:00 +00:00
* 4);
}
}
bool sawReferenceField = false;
2014-05-29 04:17:25 +00:00
object fieldTable = class_->fieldTable();
2007-08-14 00:37:00 +00:00
if (fieldTable) {
for (int i = arrayLength(t, fieldTable) - 1; i >= 0; --i) {
object field = arrayBody(t, fieldTable, i);
if ((fieldFlags(t, field) & ACC_STATIC) == 0
and fieldCode(t, field) == ObjectField)
{
unsigned index = fieldOffset(t, field) / BytesPerWord;
intArrayBody(t, mask, (index / 32)) |= 1 << (index % 32);
sawReferenceField = true;
}
}
}
2007-08-14 00:37:00 +00:00
if (superMask or sawReferenceField) {
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassObjectMask, mask);
}
}
}
2012-05-22 19:53:32 +00:00
uint16_t read16(uint8_t* code, unsigned& ip) {
uint16_t a = code[ip++];
uint16_t b = code[ip++];
return (a << 8) | b;
}
uint32_t read32(uint8_t* code, unsigned& ip) {
uint32_t b = code[ip++];
uint32_t a = code[ip++];
uint32_t c = code[ip++];
uint32_t d = code[ip++];
return (a << 24) | (b << 16) | (c << 8) | d;
}
void
disassembleCode(const char* prefix, uint8_t* code, unsigned length)
{
unsigned ip = 0;
while(ip < length) {
unsigned instr;
fprintf(stderr, "%s%x:\t", prefix, ip);
switch (instr = code[ip++]) {
case aaload: fprintf(stderr, "aaload\n"); break;
case aastore: fprintf(stderr, "aastore\n"); break;
case aconst_null: fprintf(stderr, "aconst_null\n"); break;
case aload: fprintf(stderr, "aload %02x\n", code[ip++]); break;
case aload_0: fprintf(stderr, "aload_0\n"); break;
case aload_1: fprintf(stderr, "aload_1\n"); break;
case aload_2: fprintf(stderr, "aload_2\n"); break;
case aload_3: fprintf(stderr, "aload_3\n"); break;
case anewarray: fprintf(stderr, "anewarray %04x\n", read16(code, ip)); break;
case areturn: fprintf(stderr, "areturn\n"); break;
case arraylength: fprintf(stderr, "arraylength\n"); break;
case astore: fprintf(stderr, "astore %02x\n", code[ip++]); break;
case astore_0: fprintf(stderr, "astore_0\n"); break;
case astore_1: fprintf(stderr, "astore_1\n"); break;
case astore_2: fprintf(stderr, "astore_2\n"); break;
case astore_3: fprintf(stderr, "astore_3\n"); break;
case athrow: fprintf(stderr, "athrow\n"); break;
case baload: fprintf(stderr, "baload\n"); break;
case bastore: fprintf(stderr, "bastore\n"); break;
case bipush: fprintf(stderr, "bipush %02x\n", code[ip++]); break;
case caload: fprintf(stderr, "caload\n"); break;
case castore: fprintf(stderr, "castore\n"); break;
case checkcast: fprintf(stderr, "checkcast %04x\n", read16(code, ip)); break;
case d2f: fprintf(stderr, "d2f\n"); break;
case d2i: fprintf(stderr, "d2i\n"); break;
case d2l: fprintf(stderr, "d2l\n"); break;
case dadd: fprintf(stderr, "dadd\n"); break;
case daload: fprintf(stderr, "daload\n"); break;
case dastore: fprintf(stderr, "dastore\n"); break;
case dcmpg: fprintf(stderr, "dcmpg\n"); break;
case dcmpl: fprintf(stderr, "dcmpl\n"); break;
case dconst_0: fprintf(stderr, "dconst_0\n"); break;
case dconst_1: fprintf(stderr, "dconst_1\n"); break;
case ddiv: fprintf(stderr, "ddiv\n"); break;
case dmul: fprintf(stderr, "dmul\n"); break;
case dneg: fprintf(stderr, "dneg\n"); break;
case vm::drem: fprintf(stderr, "drem\n"); break;
case dsub: fprintf(stderr, "dsub\n"); break;
case dup: fprintf(stderr, "dup\n"); break;
case dup_x1: fprintf(stderr, "dup_x1\n"); break;
case dup_x2: fprintf(stderr, "dup_x2\n"); break;
case dup2: fprintf(stderr, "dup2\n"); break;
case dup2_x1: fprintf(stderr, "dup2_x1\n"); break;
case dup2_x2: fprintf(stderr, "dup2_x2\n"); break;
case f2d: fprintf(stderr, "f2d\n"); break;
case f2i: fprintf(stderr, "f2i\n"); break;
case f2l: fprintf(stderr, "f2l\n"); break;
case fadd: fprintf(stderr, "fadd\n"); break;
case faload: fprintf(stderr, "faload\n"); break;
case fastore: fprintf(stderr, "fastore\n"); break;
case fcmpg: fprintf(stderr, "fcmpg\n"); break;
case fcmpl: fprintf(stderr, "fcmpl\n"); break;
case fconst_0: fprintf(stderr, "fconst_0\n"); break;
case fconst_1: fprintf(stderr, "fconst_1\n"); break;
case fconst_2: fprintf(stderr, "fconst_2\n"); break;
case fdiv: fprintf(stderr, "fdiv\n"); break;
case fmul: fprintf(stderr, "fmul\n"); break;
case fneg: fprintf(stderr, "fneg\n"); break;
case frem: fprintf(stderr, "frem\n"); break;
case fsub: fprintf(stderr, "fsub\n"); break;
case getfield: fprintf(stderr, "getfield %04x\n", read16(code, ip)); break;
case getstatic: fprintf(stderr, "getstatic %04x\n", read16(code, ip)); break;
case goto_: {
int16_t offset = read16(code, ip);
fprintf(stderr, "goto %04x\n", offset + ip - 3);
} break;
case goto_w: {
int32_t offset = read32(code, ip);
fprintf(stderr, "goto_w %08x\n", offset + ip - 5);
} break;
case i2b: fprintf(stderr, "i2b\n"); break;
case i2c: fprintf(stderr, "i2c\n"); break;
case i2d: fprintf(stderr, "i2d\n"); break;
case i2f: fprintf(stderr, "i2f\n"); break;
case i2l: fprintf(stderr, "i2l\n"); break;
case i2s: fprintf(stderr, "i2s\n"); break;
case iadd: fprintf(stderr, "iadd\n"); break;
case iaload: fprintf(stderr, "iaload\n"); break;
case iand: fprintf(stderr, "iand\n"); break;
case iastore: fprintf(stderr, "iastore\n"); break;
case iconst_m1: fprintf(stderr, "iconst_m1\n"); break;
case iconst_0: fprintf(stderr, "iconst_0\n"); break;
case iconst_1: fprintf(stderr, "iconst_1\n"); break;
case iconst_2: fprintf(stderr, "iconst_2\n"); break;
case iconst_3: fprintf(stderr, "iconst_3\n"); break;
case iconst_4: fprintf(stderr, "iconst_4\n"); break;
case iconst_5: fprintf(stderr, "iconst_5\n"); break;
case idiv: fprintf(stderr, "idiv\n"); break;
case if_acmpeq: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_acmpeq %04x\n", offset + ip - 3);
} break;
case if_acmpne: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_acmpne %04x\n", offset + ip - 3);
} break;
case if_icmpeq: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmpeq %04x\n", offset + ip - 3);
} break;
case if_icmpne: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmpne %04x\n", offset + ip - 3);
} break;
case if_icmpgt: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmpgt %04x\n", offset + ip - 3);
} break;
case if_icmpge: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmpge %04x\n", offset + ip - 3);
} break;
case if_icmplt: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmplt %04x\n", offset + ip - 3);
} break;
case if_icmple: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmple %04x\n", offset + ip - 3);
} break;
case ifeq: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifeq %04x\n", offset + ip - 3);
} break;
case ifne: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifne %04x\n", offset + ip - 3);
} break;
case ifgt: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifgt %04x\n", offset + ip - 3);
} break;
case ifge: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifge %04x\n", offset + ip - 3);
} break;
case iflt: {
int16_t offset = read16(code, ip);
fprintf(stderr, "iflt %04x\n", offset + ip - 3);
} break;
case ifle: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifle %04x\n", offset + ip - 3);
} break;
case ifnonnull: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifnonnull %04x\n", offset + ip - 3);
} break;
case ifnull: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifnull %04x\n", offset + ip - 3);
} break;
case iinc: {
uint8_t a = code[ip++];
uint8_t b = code[ip++];
fprintf(stderr, "iinc %02x %02x\n", a, b);
} break;
case iload: fprintf(stderr, "iload %02x\n", code[ip++]); break;
case fload: fprintf(stderr, "fload %02x\n", code[ip++]); break;
case iload_0: fprintf(stderr, "iload_0\n"); break;
case fload_0: fprintf(stderr, "fload_0\n"); break;
case iload_1: fprintf(stderr, "iload_1\n"); break;
case fload_1: fprintf(stderr, "fload_1\n"); break;
case iload_2: fprintf(stderr, "iload_2\n"); break;
case fload_2: fprintf(stderr, "fload_2\n"); break;
case iload_3: fprintf(stderr, "iload_3\n"); break;
case fload_3: fprintf(stderr, "fload_3\n"); break;
case imul: fprintf(stderr, "imul\n"); break;
case ineg: fprintf(stderr, "ineg\n"); break;
case instanceof: fprintf(stderr, "instanceof %04x\n", read16(code, ip)); break;
case invokeinterface: fprintf(stderr, "invokeinterface %04x\n", read16(code, ip)); break;
case invokespecial: fprintf(stderr, "invokespecial %04x\n", read16(code, ip)); break;
case invokestatic: fprintf(stderr, "invokestatic %04x\n", read16(code, ip)); break;
case invokevirtual: fprintf(stderr, "invokevirtual %04x\n", read16(code, ip)); break;
case ior: fprintf(stderr, "ior\n"); break;
case irem: fprintf(stderr, "irem\n"); break;
case ireturn: fprintf(stderr, "ireturn\n"); break;
case freturn: fprintf(stderr, "freturn\n"); break;
case ishl: fprintf(stderr, "ishl\n"); break;
case ishr: fprintf(stderr, "ishr\n"); break;
case istore: fprintf(stderr, "istore %02x\n", code[ip++]); break;
case fstore: fprintf(stderr, "fstore %02x\n", code[ip++]); break;
case istore_0: fprintf(stderr, "istore_0\n"); break;
case fstore_0: fprintf(stderr, "fstore_0\n"); break;
case istore_1: fprintf(stderr, "istore_1\n"); break;
case fstore_1: fprintf(stderr, "fstore_1\n"); break;
case istore_2: fprintf(stderr, "istore_2\n"); break;
case fstore_2: fprintf(stderr, "fstore_2\n"); break;
case istore_3: fprintf(stderr, "istore_3\n"); break;
case fstore_3: fprintf(stderr, "fstore_3\n"); break;
case isub: fprintf(stderr, "isub\n"); break;
case iushr: fprintf(stderr, "iushr\n"); break;
case ixor: fprintf(stderr, "ixor\n"); break;
case jsr: fprintf(stderr, "jsr %04x\n", read16(code, ip)); break;
case jsr_w: fprintf(stderr, "jsr_w %08x\n", read32(code, ip)); break;
case l2d: fprintf(stderr, "l2d\n"); break;
case l2f: fprintf(stderr, "l2f\n"); break;
case l2i: fprintf(stderr, "l2i\n"); break;
case ladd: fprintf(stderr, "ladd\n"); break;
case laload: fprintf(stderr, "laload\n"); break;
case land: fprintf(stderr, "land\n"); break;
case lastore: fprintf(stderr, "lastore\n"); break;
case lcmp: fprintf(stderr, "lcmp\n"); break;
case lconst_0: fprintf(stderr, "lconst_0\n"); break;
case lconst_1: fprintf(stderr, "lconst_1\n"); break;
case ldc: fprintf(stderr, "ldc %04x\n", read16(code, ip)); break;
case ldc_w: fprintf(stderr, "ldc_w %08x\n", read32(code, ip)); break;
case ldc2_w: fprintf(stderr, "ldc2_w %04x\n", read16(code, ip)); break;
case ldiv_: fprintf(stderr, "ldiv_\n"); break;
case lload: fprintf(stderr, "lload %02x\n", code[ip++]); break;
case dload: fprintf(stderr, "dload %02x\n", code[ip++]); break;
case lload_0: fprintf(stderr, "lload_0\n"); break;
case dload_0: fprintf(stderr, "dload_0\n"); break;
case lload_1: fprintf(stderr, "lload_1\n"); break;
case dload_1: fprintf(stderr, "dload_1\n"); break;
case lload_2: fprintf(stderr, "lload_2\n"); break;
case dload_2: fprintf(stderr, "dload_2\n"); break;
case lload_3: fprintf(stderr, "lload_3\n"); break;
case dload_3: fprintf(stderr, "dload_3\n"); break;
case lmul: fprintf(stderr, "lmul\n"); break;
case lneg: fprintf(stderr, "lneg\n"); break;
case lookupswitch: {
int32_t default_ = read32(code, ip);
int32_t pairCount = read32(code, ip);
fprintf(stderr, "lookupswitch default: %d pairCount: %d\n", default_, pairCount);
for (int i = 0; i < pairCount; i++) {
int32_t k = read32(code, ip);
int32_t d = read32(code, ip);
fprintf(stderr, "%s key: %02x dest: %2x\n", prefix, k, d);
}
} break;
case lor: fprintf(stderr, "lor\n"); break;
case lrem: fprintf(stderr, "lrem\n"); break;
case lreturn: fprintf(stderr, "lreturn\n"); break;
case dreturn: fprintf(stderr, "dreturn\n"); break;
case lshl: fprintf(stderr, "lshl\n"); break;
case lshr: fprintf(stderr, "lshr\n"); break;
case lstore: fprintf(stderr, "lstore %02x\n", code[ip++]); break;
case dstore: fprintf(stderr, "dstore %02x\n", code[ip++]); break;
case lstore_0: fprintf(stderr, "lstore_0\n"); break;
case dstore_0: fprintf(stderr, "dstore_0\n"); break;
case lstore_1: fprintf(stderr, "lstore_1\n"); break;
case dstore_1: fprintf(stderr, "dstore_1\n"); break;
case lstore_2: fprintf(stderr, "lstore_2\n"); break;
case dstore_2: fprintf(stderr, "dstore_2\n"); break;
case lstore_3: fprintf(stderr, "lstore_3\n"); break;
case dstore_3: fprintf(stderr, "dstore_3\n"); break;
case lsub: fprintf(stderr, "lsub\n"); break;
case lushr: fprintf(stderr, "lushr\n"); break;
case lxor: fprintf(stderr, "lxor\n"); break;
case monitorenter: fprintf(stderr, "monitorenter\n"); break;
case monitorexit: fprintf(stderr, "monitorexit\n"); break;
case multianewarray: {
unsigned type = read16(code, ip);
fprintf(stderr, "multianewarray %04x %02x\n", type, code[ip++]);
} break;
2012-05-22 19:53:32 +00:00
case new_: fprintf(stderr, "new %04x\n", read16(code, ip)); break;
case newarray: fprintf(stderr, "newarray %02x\n", code[ip++]); break;
case nop: fprintf(stderr, "nop\n"); break;
case pop_: fprintf(stderr, "pop\n"); break;
case pop2: fprintf(stderr, "pop2\n"); break;
case putfield: fprintf(stderr, "putfield %04x\n", read16(code, ip)); break;
case putstatic: fprintf(stderr, "putstatic %04x\n", read16(code, ip)); break;
case ret: fprintf(stderr, "ret %02x\n", code[ip++]); break;
case return_: fprintf(stderr, "return_\n"); break;
case saload: fprintf(stderr, "saload\n"); break;
case sastore: fprintf(stderr, "sastore\n"); break;
case sipush: fprintf(stderr, "sipush %04x\n", read16(code, ip)); break;
case swap: fprintf(stderr, "swap\n"); break;
case tableswitch: {
int32_t default_ = read32(code, ip);
int32_t bottom = read32(code, ip);
int32_t top = read32(code, ip);
fprintf(stderr, "tableswitch default: %d bottom: %d top: %d\n", default_, bottom, top);
for (int i = 0; i < top - bottom + 1; i++) {
int32_t d = read32(code, ip);
fprintf(stderr, "%s key: %d dest: %2x\n", prefix, i + bottom, d);
}
} break;
case wide: {
switch (code[ip++]) {
case aload: fprintf(stderr, "wide aload %04x\n", read16(code, ip)); break;
case astore: fprintf(stderr, "wide astore %04x\n", read16(code, ip)); break;
case iinc: fprintf(stderr, "wide iinc %04x %04x\n", read16(code, ip), read16(code, ip)); break;
case iload: fprintf(stderr, "wide iload %04x\n", read16(code, ip)); break;
case istore: fprintf(stderr, "wide istore %04x\n", read16(code, ip)); break;
case lload: fprintf(stderr, "wide lload %04x\n", read16(code, ip)); break;
case lstore: fprintf(stderr, "wide lstore %04x\n", read16(code, ip)); break;
case ret: fprintf(stderr, "wide ret %04x\n", read16(code, ip)); break;
default: {
fprintf(stderr, "unknown wide instruction %02x %04x\n", instr, read16(code, ip));
}
}
} break;
default: {
fprintf(stderr, "unknown instruction %02x\n", instr);
}
}
}
}
object
2014-05-29 04:17:25 +00:00
parseCode(Thread* t, Stream& s, GcSingleton* pool)
{
2007-07-19 23:45:44 +00:00
PROTECT(t, pool);
unsigned maxStack = s.read2();
unsigned maxLocals = s.read2();
unsigned length = s.read4();
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " code: maxStack %d maxLocals %d length %d\n", maxStack, maxLocals, length);
}
2014-06-28 03:51:07 +00:00
object code = reinterpret_cast<object>(makeCode(t, pool, 0, 0, 0, 0, 0, maxStack, maxLocals, length));
s.read(&codeBody(t, code, 0), length);
PROTECT(t, code);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
disassembleCode(" ", &codeBody(t, code, 0), length);
}
unsigned ehtLength = s.read2();
if (ehtLength) {
2014-05-29 04:17:25 +00:00
object eht = reinterpret_cast<object>(makeExceptionHandlerTable(t, ehtLength));
for (unsigned i = 0; i < ehtLength; ++i) {
unsigned start = s.read2();
unsigned end = s.read2();
unsigned ip = s.read2();
unsigned catchType = s.read2();
exceptionHandlerTableBody(t, eht, i) = exceptionHandler
(start, end, ip, catchType);
}
set(t, code, CodeExceptionHandlerTable, eht);
}
unsigned attributeCount = s.read2();
for (unsigned j = 0; j < attributeCount; ++j) {
object name = singletonObject(t, pool, s.read2() - 1);
unsigned length = s.read4();
if (vm::strcmp(reinterpret_cast<const int8_t*>("LineNumberTable"),
&byteArrayBody(t, name, 0)) == 0)
{
unsigned lntLength = s.read2();
2014-05-29 04:17:25 +00:00
object lnt = reinterpret_cast<object>(makeLineNumberTable(t, lntLength));
for (unsigned i = 0; i < lntLength; ++i) {
unsigned ip = s.read2();
unsigned line = s.read2();
lineNumberTableBody(t, lnt, i) = lineNumber(ip, line);
}
set(t, code, CodeLineNumberTable, lnt);
} else {
s.skip(length);
}
}
return code;
}
object
2014-05-29 04:17:25 +00:00
addInterfaceMethods(Thread* t, GcClass* class_, GcHashMap* virtualMap,
unsigned* virtualCount, bool makeList)
{
2014-05-29 04:17:25 +00:00
object itable = class_->interfaceTable();
if (itable) {
PROTECT(t, class_);
PROTECT(t, virtualMap);
PROTECT(t, itable);
object list = 0;
PROTECT(t, list);
2014-05-29 04:17:25 +00:00
GcMethod* method = 0;
PROTECT(t, method);
object vtable = 0;
PROTECT(t, vtable);
2014-05-29 04:17:25 +00:00
unsigned stride = (class_->flags() & ACC_INTERFACE) ? 1 : 2;
for (unsigned i = 0; i < arrayLength(t, itable); i += stride) {
vtable = classVirtualTable(t, arrayBody(t, itable, i));
if (vtable) {
for (unsigned j = 0; j < arrayLength(t, vtable); ++j) {
2014-05-29 04:17:25 +00:00
method = cast<GcMethod>(t, arrayBody(t, vtable, j));
2014-06-28 20:41:27 +00:00
GcTriple* n = hashMapFindNode
2014-05-29 04:17:25 +00:00
(t, virtualMap, reinterpret_cast<object>(method), methodHash, methodEqual);
if (n == 0) {
method = makeMethod
(t,
2014-05-29 04:17:25 +00:00
method->vmFlags(),
method->returnCode(),
method->parameterCount(),
method->parameterFootprint(),
method->flags(),
(*virtualCount)++,
0,
0,
2014-05-29 04:17:25 +00:00
method->name(),
method->spec(),
0,
class_,
0);
2014-05-29 04:17:25 +00:00
hashMapInsert(t,
virtualMap,
reinterpret_cast<object>(method),
reinterpret_cast<object>(method),
methodHash);
if (makeList) {
if (list == 0) {
2014-05-29 04:17:25 +00:00
list = reinterpret_cast<object>(vm::makeList(t, 0, 0, 0));
}
2014-06-28 20:41:27 +00:00
listAppend(t, cast<GcList>(t, list), reinterpret_cast<object>(method));
}
}
}
}
}
return list;
}
return 0;
}
void
2014-05-29 04:17:25 +00:00
parseMethodTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
{
PROTECT(t, class_);
PROTECT(t, pool);
2014-05-29 04:17:25 +00:00
GcHashMap* virtualMap = makeHashMap(t, 0, 0);
PROTECT(t, virtualMap);
unsigned virtualCount = 0;
unsigned declaredVirtualCount = 0;
object superVirtualTable = 0;
PROTECT(t, superVirtualTable);
2014-05-29 04:17:25 +00:00
if ((class_->flags() & ACC_INTERFACE) == 0) {
if (class_->super()) {
superVirtualTable = class_->super()->virtualTable();
}
if (superVirtualTable) {
virtualCount = arrayLength(t, superVirtualTable);
for (unsigned i = 0; i < virtualCount; ++i) {
object method = arrayBody(t, superVirtualTable, i);
hashMapInsert(t, virtualMap, method, method, methodHash);
}
}
}
2014-05-29 04:17:25 +00:00
object newVirtuals = reinterpret_cast<object>(makeList(t, 0, 0, 0));
PROTECT(t, newVirtuals);
unsigned count = s.read2();
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " method count %d\n", count);
}
if (count) {
2014-05-29 04:17:25 +00:00
object methodTable = reinterpret_cast<object>(makeArray(t, count));
PROTECT(t, methodTable);
object addendum = 0;
PROTECT(t, addendum);
object code = 0;
PROTECT(t, code);
for (unsigned i = 0; i < count; ++i) {
unsigned flags = s.read2();
unsigned name = s.read2();
unsigned spec = s.read2();
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " method flags %d name %d spec %d '%s%s'\n", flags, name, spec,
&byteArrayBody(t, singletonObject(t, pool, name - 1), 0),
&byteArrayBody(t, singletonObject(t, pool, spec - 1), 0));
}
addendum = 0;
code = 0;
unsigned attributeCount = s.read2();
for (unsigned j = 0; j < attributeCount; ++j) {
object attributeName = singletonObject(t, pool, s.read2() - 1);
unsigned length = s.read4();
if (vm::strcmp(reinterpret_cast<const int8_t*>("Code"),
&byteArrayBody(t, attributeName, 0)) == 0)
{
code = parseCode(t, s, pool);
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("Exceptions"),
&byteArrayBody(t, attributeName, 0)) == 0)
{
if (addendum == 0) {
2014-06-28 18:28:44 +00:00
addendum = reinterpret_cast<object>(makeMethodAddendum(t, pool, 0, 0, 0, 0, 0));
}
unsigned exceptionCount = s.read2();
2014-05-29 04:17:25 +00:00
object body = reinterpret_cast<object>(makeShortArray(t, exceptionCount));
for (unsigned i = 0; i < exceptionCount; ++i) {
shortArrayBody(t, body, i) = s.read2();
}
set(t, addendum, MethodAddendumExceptionTable, body);
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("AnnotationDefault"),
&byteArrayBody(t, attributeName, 0)) == 0)
{
if (addendum == 0) {
2014-06-28 18:28:44 +00:00
addendum = reinterpret_cast<object>(makeMethodAddendum(t, pool, 0, 0, 0, 0, 0));
}
2014-05-29 04:17:25 +00:00
object body = reinterpret_cast<object>(makeByteArray(t, length));
s.read(reinterpret_cast<uint8_t*>(&byteArrayBody(t, body, 0)),
length);
set(t, addendum, MethodAddendumAnnotationDefault, body);
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("Signature"),
&byteArrayBody(t, attributeName, 0)) == 0)
{
if (addendum == 0) {
2014-06-28 18:28:44 +00:00
addendum = reinterpret_cast<object>(makeMethodAddendum(t, pool, 0, 0, 0, 0, 0));
}
set(t, addendum, AddendumSignature,
singletonObject(t, pool, s.read2() - 1));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("RuntimeVisibleAnnotations"),
&byteArrayBody(t, attributeName, 0)) == 0)
{
if (addendum == 0) {
2014-06-28 18:28:44 +00:00
addendum = reinterpret_cast<object>(makeMethodAddendum(t, pool, 0, 0, 0, 0, 0));
}
2014-05-29 04:17:25 +00:00
object body = reinterpret_cast<object>(makeByteArray(t, length));
s.read(reinterpret_cast<uint8_t*>(&byteArrayBody(t, body, 0)),
length);
set(t, addendum, AddendumAnnotationTable, body);
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("RuntimeVisibleParameterAnnotations"),
&byteArrayBody(t, attributeName, 0)) == 0)
{
if (addendum == 0) {
2014-06-28 18:28:44 +00:00
addendum = reinterpret_cast<object>(makeMethodAddendum(t, pool, 0, 0, 0, 0, 0));
}
2014-05-29 04:17:25 +00:00
object body = reinterpret_cast<object>(makeByteArray(t, length));
s.read(reinterpret_cast<uint8_t*>(&byteArrayBody(t, body, 0)),
length);
set(t, addendum, MethodAddendumParameterAnnotationTable, body);
} else {
s.skip(length);
}
}
const char* specString = reinterpret_cast<const char*>
(&byteArrayBody(t, singletonObject(t, pool, spec - 1), 0));
unsigned parameterCount;
unsigned parameterFootprint;
unsigned returnCode;
scanMethodSpec(t, specString, flags & ACC_STATIC, &parameterCount,
&parameterFootprint, &returnCode);
2014-05-29 04:17:25 +00:00
GcMethod* method = t->m->processor->makeMethod
(t,
0, // vm flags
returnCode,
parameterCount,
parameterFootprint,
flags,
0, // offset
2014-05-29 04:17:25 +00:00
cast<GcByteArray>(t, singletonObject(t, pool, name - 1)),
cast<GcByteArray>(t, singletonObject(t, pool, spec - 1)),
cast<GcMethodAddendum>(t, addendum),
class_,
2014-05-29 04:17:25 +00:00
cast<GcCode>(t, code));
PROTECT(t, method);
2007-11-05 14:28:46 +00:00
if (methodVirtual(t, method)) {
++ declaredVirtualCount;
2014-06-28 20:41:27 +00:00
GcTriple* p = hashMapFindNode
2014-05-29 04:17:25 +00:00
(t, virtualMap, reinterpret_cast<object>(method), methodHash, methodEqual);
if (p) {
2014-06-28 20:41:27 +00:00
method->offset() = methodOffset(t, p->first());
2014-06-28 20:41:27 +00:00
set(t, reinterpret_cast<object>(p), TripleSecond, reinterpret_cast<object>(method));
} else {
2014-05-29 04:17:25 +00:00
method->offset() = virtualCount++;
2014-06-28 20:41:27 +00:00
listAppend(t, cast<GcList>(t, newVirtuals), reinterpret_cast<object>(method));
2014-05-29 04:17:25 +00:00
hashMapInsert(t, virtualMap, reinterpret_cast<object>(method), reinterpret_cast<object>(method), methodHash);
}
2014-05-29 04:17:25 +00:00
if (UNLIKELY((class_->flags() & ACC_INTERFACE) == 0
and vm::strcmp
(reinterpret_cast<const int8_t*>("finalize"),
method->name()->body().begin()) == 0
and vm::strcmp
(reinterpret_cast<const int8_t*>("()V"),
method->spec()->body().begin()) == 0
and (not emptyMethod(t, method))))
{
2014-05-29 04:17:25 +00:00
class_->vmFlags() |= HasFinalizerFlag;
}
2007-11-05 14:28:46 +00:00
} else {
2014-05-29 04:17:25 +00:00
method->offset() = i;
2007-11-05 14:28:46 +00:00
if (vm::strcmp(reinterpret_cast<const int8_t*>("<clinit>"),
method->name()->body().begin()) == 0)
2007-11-05 14:28:46 +00:00
{
2014-05-29 04:17:25 +00:00
method->vmFlags() |= ClassInitFlag;
class_->vmFlags() |= NeedInitFlag;
} else if (vm::strcmp
(reinterpret_cast<const int8_t*>("<init>"),
method->name()->body().begin()) == 0)
2009-03-03 03:18:15 +00:00
{
2014-05-29 04:17:25 +00:00
method->vmFlags() |= ConstructorFlag;
2007-11-05 14:28:46 +00:00
}
}
2014-05-29 04:17:25 +00:00
set(t, methodTable, ArrayBody + (i * BytesPerWord), reinterpret_cast<object>(method));
}
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassMethodTable, methodTable);
}
object abstractVirtuals = addInterfaceMethods
(t, class_, virtualMap, &virtualCount, true);
PROTECT(t, abstractVirtuals);
bool populateInterfaceVtables = false;
2007-08-14 13:27:10 +00:00
if (declaredVirtualCount == 0
and abstractVirtuals == 0
2014-05-29 04:17:25 +00:00
and (class_->flags() & ACC_INTERFACE) == 0)
2007-08-14 13:27:10 +00:00
{
2014-05-29 04:17:25 +00:00
if (class_->super()) {
// inherit virtual table from superclass
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassVirtualTable, superVirtualTable);
if (class_->super()->interfaceTable()
2014-05-29 04:17:25 +00:00
and arrayLength(t, class_->interfaceTable())
== arrayLength
(t, class_->super()->interfaceTable()))
{
// inherit interface table from superclass
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassInterfaceTable,
class_->super()->interfaceTable());
} else {
populateInterfaceVtables = true;
}
} else {
// apparently, Object does not have any virtual methods. We
// give it a vtable anyway so code doesn't break elsewhere.
2014-05-29 04:17:25 +00:00
object vtable = reinterpret_cast<object>(makeArray(t, 0));
set(t, reinterpret_cast<object>(class_), ClassVirtualTable, vtable);
2007-11-27 22:23:00 +00:00
}
} else if (virtualCount) {
// generate class vtable
2014-05-29 04:17:25 +00:00
object vtable = reinterpret_cast<object>(makeArray(t, virtualCount));
2007-08-14 00:37:00 +00:00
unsigned i = 0;
2014-05-29 04:17:25 +00:00
if (class_->flags() & ACC_INTERFACE) {
PROTECT(t, vtable);
for (HashMapIterator it(t, virtualMap); it.hasMore();) {
2014-06-28 20:41:27 +00:00
object method = it.next()->first();
assertT(t, arrayBody(t, vtable, methodOffset(t, method)) == 0);
set(t, vtable, ArrayBody + (methodOffset(t, method) * BytesPerWord),
method);
2007-08-14 00:37:00 +00:00
++ i;
}
} else {
populateInterfaceVtables = true;
if (superVirtualTable) {
for (; i < arrayLength(t, superVirtualTable); ++i) {
object method = arrayBody(t, superVirtualTable, i);
method = hashMapFind(t, virtualMap, method, methodHash, methodEqual);
set(t, vtable, ArrayBody + (i * BytesPerWord), method);
}
}
for (object p = listFront(t, newVirtuals); p; p = pairSecond(t, p)) {
set(t, vtable, ArrayBody + (i * BytesPerWord), pairFirst(t, p));
++ i;
}
}
if (abstractVirtuals) {
PROTECT(t, vtable);
2014-05-29 04:17:25 +00:00
object originalMethodTable = class_->methodTable();
PROTECT(t, originalMethodTable);
2014-05-29 04:17:25 +00:00
unsigned oldLength = class_->methodTable() ?
arrayLength(t, class_->methodTable()) : 0;
2014-05-29 04:17:25 +00:00
object addendum = reinterpret_cast<object>(getClassAddendum(t, class_, pool));
classAddendumDeclaredMethodCount(t, addendum) = oldLength;
2014-05-29 04:17:25 +00:00
object newMethodTable = reinterpret_cast<object>(makeArray
(t, oldLength + listSize(t, abstractVirtuals)));
if (oldLength) {
memcpy(&arrayBody(t, newMethodTable, 0),
2014-05-29 04:17:25 +00:00
&arrayBody(t, class_->methodTable(), 0),
oldLength * sizeof(object));
}
mark(t, newMethodTable, ArrayBody, oldLength);
unsigned mti = oldLength;
for (object p = listFront(t, abstractVirtuals);
p; p = pairSecond(t, p))
{
set(t, newMethodTable,
ArrayBody + ((mti++) * BytesPerWord), pairFirst(t, p));
2014-05-29 04:17:25 +00:00
if ((class_->flags() & ACC_INTERFACE) == 0) {
set(t, vtable,
ArrayBody + ((i++) * BytesPerWord), pairFirst(t, p));
}
}
assertT(t, arrayLength(t, newMethodTable) == mti);
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassMethodTable, newMethodTable);
}
assertT(t, arrayLength(t, vtable) == i);
2007-08-14 00:37:00 +00:00
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassVirtualTable, vtable);
}
if (populateInterfaceVtables) {
// generate interface vtables
2014-05-29 04:17:25 +00:00
object itable = class_->interfaceTable();
if (itable) {
PROTECT(t, itable);
for (unsigned i = 0; i < arrayLength(t, itable); i += 2) {
object ivtable = classVirtualTable(t, arrayBody(t, itable, i));
if (ivtable) {
object vtable = arrayBody(t, itable, i + 1);
for (unsigned j = 0; j < arrayLength(t, ivtable); ++j) {
object method = arrayBody(t, ivtable, j);
method = hashMapFind
(t, virtualMap, method, methodHash, methodEqual);
assertT(t, method);
2007-08-14 00:37:00 +00:00
set(t, vtable, ArrayBody + (j * BytesPerWord), method);
}
}
}
}
}
}
void
2014-05-29 04:17:25 +00:00
parseAttributeTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
{
PROTECT(t, class_);
PROTECT(t, pool);
unsigned attributeCount = s.read2();
for (unsigned j = 0; j < attributeCount; ++j) {
object name = singletonObject(t, pool, s.read2() - 1);
unsigned length = s.read4();
if (vm::strcmp(reinterpret_cast<const int8_t*>("SourceFile"),
&byteArrayBody(t, name, 0)) == 0)
{
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassSourceFile, singletonObject(t, pool, s.read2() - 1));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("Signature"),
&byteArrayBody(t, name, 0)) == 0)
{
2014-05-29 04:17:25 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
set(t, reinterpret_cast<object>(addendum), AddendumSignature,
singletonObject(t, pool, s.read2() - 1));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("InnerClasses"),
&byteArrayBody(t, name, 0)) == 0)
{
unsigned innerClassCount = s.read2();
2014-05-29 04:17:25 +00:00
object table = reinterpret_cast<object>(makeArray(t, innerClassCount));
PROTECT(t, table);
for (unsigned i = 0; i < innerClassCount; ++i) {
int16_t inner = s.read2();
int16_t outer = s.read2();
int16_t name = s.read2();
int16_t flags = s.read2();
2014-05-29 04:17:25 +00:00
object reference = reinterpret_cast<object>(makeInnerClassReference
(t,
cast<GcByteArray>(t, inner ? referenceName(t, singletonObject(t, pool, inner - 1)) : 0),
cast<GcByteArray>(t, outer ? referenceName(t, singletonObject(t, pool, outer - 1)) : 0),
cast<GcByteArray>(t, name ? singletonObject(t, pool, name - 1) : 0),
2014-05-29 04:17:25 +00:00
flags));
set(t, table, ArrayBody + (i * BytesPerWord), reference);
}
2014-05-29 04:17:25 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
set(t, reinterpret_cast<object>(addendum), ClassAddendumInnerClassTable, table);
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("RuntimeVisibleAnnotations"),
&byteArrayBody(t, name, 0)) == 0)
{
2014-05-29 04:17:25 +00:00
object body = reinterpret_cast<object>(makeByteArray(t, length));
PROTECT(t, body);
s.read(reinterpret_cast<uint8_t*>(&byteArrayBody(t, body, 0)), length);
2014-05-29 04:17:25 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
set(t, reinterpret_cast<object>(addendum), AddendumAnnotationTable, body);
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("EnclosingMethod"),
&byteArrayBody(t, name, 0)) == 0)
{
int16_t enclosingClass = s.read2();
int16_t enclosingMethod = s.read2();
2014-05-29 04:17:25 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(addendum), ClassAddendumEnclosingClass,
referenceName(t, singletonObject(t, pool, enclosingClass - 1)));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(addendum), ClassAddendumEnclosingMethod, enclosingMethod
? singletonObject(t, pool, enclosingMethod - 1) : 0);
} else {
s.skip(length);
}
}
}
void
2014-05-29 04:17:25 +00:00
updateClassTables(Thread* t, GcClass* newClass, GcClass* oldClass)
{
2014-05-29 04:17:25 +00:00
object fieldTable = newClass->fieldTable();
if (fieldTable) {
for (unsigned i = 0; i < arrayLength(t, fieldTable); ++i) {
2014-05-29 04:17:25 +00:00
set(t, arrayBody(t, fieldTable, i), FieldClass, reinterpret_cast<object>(newClass));
}
}
2014-06-28 18:28:44 +00:00
object staticTable = reinterpret_cast<object>(newClass->staticTable());
if (staticTable) {
2014-05-29 04:17:25 +00:00
set(t, staticTable, SingletonBody, reinterpret_cast<object>(newClass));
}
2014-05-29 04:17:25 +00:00
if (newClass->flags() & ACC_INTERFACE) {
object virtualTable = newClass->virtualTable();
if (virtualTable) {
for (unsigned i = 0; i < arrayLength(t, virtualTable); ++i) {
2014-05-29 04:17:25 +00:00
if (methodClass(t, arrayBody(t, virtualTable, i)) == reinterpret_cast<object>(oldClass)) {
set(t, arrayBody(t, virtualTable, i), MethodClass, reinterpret_cast<object>(newClass));
}
}
}
}
2014-05-29 04:17:25 +00:00
object methodTable = newClass->methodTable();
if (methodTable) {
for (unsigned i = 0; i < arrayLength(t, methodTable); ++i) {
2014-05-29 04:17:25 +00:00
set(t, arrayBody(t, methodTable, i), MethodClass, reinterpret_cast<object>(newClass));
}
}
}
void
2014-05-29 04:17:25 +00:00
updateBootstrapClass(Thread* t, GcClass* bootstrapClass, GcClass* class_)
{
expect(t, bootstrapClass != class_);
// verify that the classes have the same layout
2014-05-29 04:17:25 +00:00
expect(t, bootstrapClass->super() == class_->super());
2014-05-29 04:17:25 +00:00
expect(t, bootstrapClass->fixedSize() >= class_->fixedSize());
2014-05-29 04:17:25 +00:00
expect(t, (class_->vmFlags() & HasFinalizerFlag) == 0);
PROTECT(t, bootstrapClass);
PROTECT(t, class_);
ENTER(t, Thread::ExclusiveState);
2014-05-29 04:17:25 +00:00
bootstrapClass->vmFlags() &= ~BootstrapFlag;
bootstrapClass->vmFlags() |= class_->vmFlags();
bootstrapClass->flags() |= class_->flags();
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(bootstrapClass), ClassArrayElementClass, reinterpret_cast<object>(class_->arrayElementClass()));
set(t, reinterpret_cast<object>(bootstrapClass), ClassSuper, reinterpret_cast<object>(class_->super()));
set(t, reinterpret_cast<object>(bootstrapClass), ClassInterfaceTable, reinterpret_cast<object>(class_->interfaceTable()));
set(t, reinterpret_cast<object>(bootstrapClass), ClassVirtualTable, reinterpret_cast<object>(class_->virtualTable()));
set(t, reinterpret_cast<object>(bootstrapClass), ClassFieldTable, reinterpret_cast<object>(class_->fieldTable()));
set(t, reinterpret_cast<object>(bootstrapClass), ClassMethodTable, reinterpret_cast<object>(class_->methodTable()));
set(t, reinterpret_cast<object>(bootstrapClass), ClassStaticTable, reinterpret_cast<object>(class_->staticTable()));
set(t, reinterpret_cast<object>(bootstrapClass), ClassAddendum, reinterpret_cast<object>(class_->addendum()));
updateClassTables(t, bootstrapClass, class_);
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
makeArrayClass(Thread* t, GcClassLoader* loader, unsigned dimensions, object spec,
object elementClass)
{
2014-05-29 04:17:25 +00:00
if (type(t, GcJobject::Type)->vmFlags() & BootstrapFlag) {
PROTECT(t, loader);
PROTECT(t, spec);
PROTECT(t, elementClass);
// Load java.lang.Object if present so we can use its vtable, but
// don't throw an exception if we can't find it. This way, we
// avoid infinite recursion due to trying to create an array to
// make a stack trace for a ClassNotFoundException.
resolveSystemClass
2014-06-28 21:11:31 +00:00
(t, cast<GcClassLoader>(t, root(t, Machine::BootLoader)),
reinterpret_cast<object>(type(t, GcJobject::Type)->name()), false);
}
2014-05-29 04:17:25 +00:00
object vtable = type(t, GcJobject::Type)->virtualTable();
2014-05-29 04:17:25 +00:00
GcClass* c = t->m->processor->makeClass
(t,
0,
0,
2 * BytesPerWord,
BytesPerWord,
dimensions,
2014-06-28 00:32:20 +00:00
cast<GcClass>(t, elementClass),
type(t, GcArray::Type)->objectMask(),
cast<GcByteArray>(t, spec),
0,
type(t, GcJobject::Type),
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable),
vtable,
0,
2007-07-30 23:19:05 +00:00
0,
0,
2014-06-28 00:32:20 +00:00
0,
2014-06-28 21:11:31 +00:00
loader,
arrayLength(t, vtable));
2007-12-11 21:26:59 +00:00
PROTECT(t, c);
2007-12-11 21:26:59 +00:00
t->m->processor->initVtable(t, c);
return c;
}
void
2014-06-28 21:11:31 +00:00
saveLoadedClass(Thread* t, GcClassLoader* loader, GcClass* c)
{
PROTECT(t, loader);
PROTECT(t, c);
ACQUIRE(t, t->m->classLock);
2014-06-28 21:11:31 +00:00
if (loader->map() == 0) {
2014-05-29 04:17:25 +00:00
GcHashMap* map = makeHashMap(t, 0, 0);
2014-06-28 21:11:31 +00:00
set(t, reinterpret_cast<object>(loader), ClassLoaderMap, reinterpret_cast<object>(map));
}
2014-05-29 04:17:25 +00:00
hashMapInsert(t,
2014-06-28 21:11:31 +00:00
cast<GcHashMap>(t, loader->map()),
reinterpret_cast<object>(c->name()),
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(c),
byteArrayHash);
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
makeArrayClass(Thread* t, GcClassLoader* loader, object spec, bool throw_,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
{
PROTECT(t, loader);
PROTECT(t, spec);
const char* s = reinterpret_cast<const char*>(&byteArrayBody(t, spec, 0));
const char* start = s;
unsigned dimensions = 0;
for (; *s == '['; ++s) ++ dimensions;
object elementSpec;
switch (*s) {
case 'L': {
++ s;
const char* elementSpecStart = s;
while (*s and *s != ';') ++ s;
if (dimensions > 1) {
elementSpecStart -= dimensions;
++ s;
}
2014-05-29 04:17:25 +00:00
elementSpec = reinterpret_cast<object>(makeByteArray(t, s - elementSpecStart + 1));
memcpy(&byteArrayBody(t, elementSpec, 0),
&byteArrayBody(t, spec, elementSpecStart - start),
s - elementSpecStart);
byteArrayBody(t, elementSpec, s - elementSpecStart) = 0;
} break;
default:
if (dimensions > 1) {
char c = *s;
2014-05-29 04:17:25 +00:00
elementSpec = reinterpret_cast<object>(makeByteArray(t, dimensions + 1));
unsigned i;
for (i = 0; i < dimensions - 1; ++i) {
byteArrayBody(t, elementSpec, i) = '[';
}
byteArrayBody(t, elementSpec, i++) = c;
byteArrayBody(t, elementSpec, i) = 0;
-- dimensions;
} else {
abort(t);
}
}
2014-05-29 04:17:25 +00:00
GcClass* elementClass = cast<GcClass>(t, hashMapFind
(t, cast<GcHashMap>(t, root(t, Machine::BootstrapClassMap)), elementSpec, byteArrayHash,
byteArrayEqual));
if (elementClass == 0) {
elementClass = resolveClass(t, loader, elementSpec, throw_, throwType);
if (elementClass == 0) return 0;
}
PROTECT(t, elementClass);
ACQUIRE(t, t->m->classLock);
2014-06-28 21:11:31 +00:00
GcClass* class_ = findLoadedClass(t, elementClass->loader(), spec);
if (class_) {
return class_;
}
class_ = makeArrayClass
2014-06-28 21:11:31 +00:00
(t, elementClass->loader(), dimensions, spec, reinterpret_cast<object>(elementClass));
PROTECT(t, class_);
2014-06-28 21:11:31 +00:00
saveLoadedClass(t, elementClass->loader(), class_);
return class_;
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
resolveArrayClass(Thread* t, GcClassLoader* loader, object spec, bool throw_,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
{
2014-05-29 04:17:25 +00:00
GcClass* c = cast<GcClass>(t,
hashMapFind(t,
cast<GcHashMap>(t, root(t, Machine::BootstrapClassMap)),
spec,
byteArrayHash,
byteArrayEqual));
if (c) {
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(c), ClassVirtualTable,
type(t, GcJobject::Type)->virtualTable());
return c;
} else {
PROTECT(t, loader);
PROTECT(t, spec);
2014-06-28 21:11:31 +00:00
c = findLoadedClass(t, cast<GcClassLoader>(t, root(t, Machine::BootLoader)), spec);
if (c) {
return c;
} else {
return makeArrayClass(t, loader, spec, throw_, throwType);
}
}
}
void
removeMonitor(Thread* t, object o)
{
unsigned hash;
if (DebugMonitors) {
hash = objectHash(t, o);
}
object m = hashMapRemove
2014-05-29 04:17:25 +00:00
(t, cast<GcHashMap>(t, root(t, Machine::MonitorMap)), o, objectHash, objectEqual);
if (DebugMonitors) {
fprintf(stderr, "dispose monitor %p for object %x\n", m, hash);
}
}
2007-07-29 00:02:32 +00:00
void
removeString(Thread* t, object o)
{
2014-05-29 04:17:25 +00:00
hashMapRemove(t, cast<GcHashMap>(t, root(t, Machine::StringMap)), o, stringHash, objectEqual);
}
void
2014-05-29 04:17:25 +00:00
bootClass(Thread* t, Gc::Type type, int superType, uint32_t objectMask,
unsigned fixedSize, unsigned arrayElementSize, unsigned vtableLength)
{
2014-05-29 04:17:25 +00:00
GcClass* super = (superType >= 0
? vm::type(t, static_cast<Gc::Type>(superType)) : 0);
2007-11-06 15:29:05 +00:00
object mask;
if (objectMask) {
2007-11-06 15:29:05 +00:00
if (super
2014-05-29 04:17:25 +00:00
and super->objectMask()
and super->objectMask()->body()[0]
2007-11-06 15:29:05 +00:00
== static_cast<int32_t>(objectMask))
{
mask = reinterpret_cast<object>(vm::type(t, static_cast<Gc::Type>(superType))->objectMask());
2007-11-06 15:29:05 +00:00
} else {
2014-05-29 04:17:25 +00:00
mask = reinterpret_cast<object>(makeIntArray(t, 1));
2007-11-06 15:29:05 +00:00
intArrayBody(t, mask, 0) = objectMask;
}
} else {
mask = 0;
}
super = (superType >= 0
2014-05-29 04:17:25 +00:00
? vm::type(t, static_cast<Gc::Type>(superType)) : 0);
2014-05-29 04:17:25 +00:00
GcClass* class_ = t->m->processor->makeClass
2009-12-06 02:40:46 +00:00
(t, 0, BootstrapFlag, fixedSize, arrayElementSize,
arrayElementSize ? 1 : 0, 0, cast<GcIntArray>(t, mask), 0, 0, super, 0, 0, 0, 0, 0, 0,
cast<GcClassLoader>(t, root(t, Machine::BootLoader)), vtableLength);
setType(t, type, class_);
}
void
2014-05-29 04:17:25 +00:00
bootJavaClass(Thread* t, Gc::Type type, int superType, const char* name,
2007-11-05 14:28:46 +00:00
int vtableLength, object bootMethod)
{
PROTECT(t, bootMethod);
2014-05-29 04:17:25 +00:00
object n = reinterpret_cast<object>(makeByteArray(t, name));
2010-12-10 05:17:57 +00:00
PROTECT(t, n);
2014-05-29 04:17:25 +00:00
GcClass* class_ = vm::type(t, type);
2010-12-10 05:17:57 +00:00
PROTECT(t, class_);
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassName, n);
2007-11-05 14:28:46 +00:00
object vtable;
if (vtableLength >= 0) {
2014-05-29 04:17:25 +00:00
vtable = reinterpret_cast<object>(makeArray(t, vtableLength));
2007-11-05 14:28:46 +00:00
for (int i = 0; i < vtableLength; ++ i) {
arrayBody(t, vtable, i) = bootMethod;
}
} else {
2014-05-29 04:17:25 +00:00
vtable = vm::type(t, static_cast<Gc::Type>(superType))->virtualTable();
}
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassVirtualTable, vtable);
2007-12-11 21:26:59 +00:00
t->m->processor->initVtable(t, class_);
hashMapInsert
2014-05-29 04:17:25 +00:00
(t, cast<GcHashMap>(t, root(t, Machine::BootstrapClassMap)), n, reinterpret_cast<object>(class_), byteArrayHash);
}
void
2014-05-29 04:17:25 +00:00
nameClass(Thread* t, Gc::Type type, const char* name)
{
2014-05-29 04:17:25 +00:00
object n = reinterpret_cast<object>(makeByteArray(t, name));
set(t, arrayBody(t, t->m->types, type), ClassName, n);
}
void
makeArrayInterfaceTable(Thread* t)
{
2014-05-29 04:17:25 +00:00
object interfaceTable = reinterpret_cast<object>(makeArray(t, 4));
2014-05-29 04:17:25 +00:00
set(t, interfaceTable, ArrayBody, reinterpret_cast<object>(type
(t, GcSerializable::Type)));
set(t, interfaceTable, ArrayBody + (2 * BytesPerWord),
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcCloneable::Type)));
setRoot(t, Machine::ArrayInterfaceTable, interfaceTable);
}
void
boot(Thread* t)
{
Machine* m = t->m;
m->unsafe = true;
m->roots = allocate(t, pad((Machine::RootCount + 2) * BytesPerWord), true);
arrayLength(t, m->roots) = Machine::RootCount;
setRoot(t, Machine::BootLoader,
2014-05-29 04:17:25 +00:00
allocate(t, GcSystemClassLoader::FixedSize, true));
setRoot(t, Machine::AppLoader,
2014-05-29 04:17:25 +00:00
allocate(t, GcSystemClassLoader::FixedSize, true));
m->types = allocate(t, pad((TypeCount + 2) * BytesPerWord), true);
arrayLength(t, m->types) = TypeCount;
#include "type-initializations.cpp"
2014-05-29 04:17:25 +00:00
GcClass* arrayClass = type(t, GcArray::Type);
set(t, m->types, 0, reinterpret_cast<object>(arrayClass));
set(t, m->roots, 0, reinterpret_cast<object>(arrayClass));
2014-05-29 04:17:25 +00:00
GcClass* loaderClass = type(t, GcSystemClassLoader::Type);
set(t, root(t, Machine::BootLoader), 0, reinterpret_cast<object>(loaderClass));
set(t, root(t, Machine::AppLoader), 0, reinterpret_cast<object>(loaderClass));
2014-05-29 04:17:25 +00:00
GcClass* objectClass = type(t, GcJobject::Type);
2014-05-29 04:17:25 +00:00
GcClass* classClass = type(t, GcClass::Type);
set(t, reinterpret_cast<object>(classClass), 0, reinterpret_cast<object>(classClass));
set(t, reinterpret_cast<object>(classClass), ClassSuper, reinterpret_cast<object>(objectClass));
2014-05-29 04:17:25 +00:00
GcClass* intArrayClass = type(t, GcIntArray::Type);
set(t, reinterpret_cast<object>(intArrayClass), 0, reinterpret_cast<object>(classClass));
set(t, reinterpret_cast<object>(intArrayClass), ClassSuper, reinterpret_cast<object>(objectClass));
m->unsafe = false;
2014-05-29 04:17:25 +00:00
type(t, GcSingleton::Type)->vmFlags()
|= SingletonFlag;
2014-05-29 04:17:25 +00:00
type(t, GcContinuation::Type)->vmFlags()
2009-05-03 20:57:11 +00:00
|= ContinuationFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJreference::Type)->vmFlags()
|= ReferenceFlag;
2014-05-29 04:17:25 +00:00
type(t, GcWeakReference::Type)->vmFlags()
|= ReferenceFlag | WeakReferenceFlag;
2014-05-29 04:17:25 +00:00
type(t, GcSoftReference::Type)->vmFlags()
|= ReferenceFlag | WeakReferenceFlag;
2014-05-29 04:17:25 +00:00
type(t, GcPhantomReference::Type)->vmFlags()
|= ReferenceFlag | WeakReferenceFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJboolean::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJbyte::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJchar::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJshort::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJint::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJlong::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJfloat::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJdouble::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJvoid::Type)->vmFlags()
|= PrimitiveFlag;
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(type(t, GcBooleanArray::Type)), ClassArrayElementClass,
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcJboolean::Type)));
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(type(t, GcByteArray::Type)), ClassArrayElementClass,
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcJbyte::Type)));
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(type(t, GcCharArray::Type)), ClassArrayElementClass,
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcJchar::Type)));
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(type(t, GcShortArray::Type)), ClassArrayElementClass,
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcJshort::Type)));
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(type(t, GcIntArray::Type)), ClassArrayElementClass,
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcJint::Type)));
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(type(t, GcLongArray::Type)), ClassArrayElementClass,
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcJlong::Type)));
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(type(t, GcFloatArray::Type)), ClassArrayElementClass,
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcJfloat::Type)));
2014-06-28 00:32:20 +00:00
set(t, reinterpret_cast<object>(type(t, GcDoubleArray::Type)), ClassArrayElementClass,
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(type(t, GcJdouble::Type)));
{ GcHashMap* map = makeHashMap(t, 0, 0);
set(t, root(t, Machine::BootLoader), ClassLoaderMap, reinterpret_cast<object>(map));
}
2014-06-28 21:11:31 +00:00
cast<GcSystemClassLoader>(t, root(t, Machine::BootLoader))->finder() = m->bootFinder;
2014-05-29 04:17:25 +00:00
{ GcHashMap* map = makeHashMap(t, 0, 0);
set(t, root(t, Machine::AppLoader), ClassLoaderMap, reinterpret_cast<object>(map));
}
2014-06-28 21:11:31 +00:00
cast<GcSystemClassLoader>(t, root(t, Machine::AppLoader))->finder() = m->appFinder;
set(t, root(t, Machine::AppLoader), ClassLoaderParent,
root(t, Machine::BootLoader));
2014-05-29 04:17:25 +00:00
setRoot(t, Machine::BootstrapClassMap, reinterpret_cast<object>(makeHashMap(t, 0, 0)));
2014-05-29 04:17:25 +00:00
setRoot(t, Machine::StringMap, reinterpret_cast<object>(makeWeakHashMap(t, 0, 0)));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
makeArrayInterfaceTable(t);
2013-02-21 22:37:17 +00:00
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(type(t, GcBooleanArray::Type)), ClassInterfaceTable,
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(type(t, GcByteArray::Type)), ClassInterfaceTable,
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(type(t, GcCharArray::Type)), ClassInterfaceTable,
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(type(t, GcShortArray::Type)), ClassInterfaceTable,
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(type(t, GcIntArray::Type)), ClassInterfaceTable,
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(type(t, GcLongArray::Type)), ClassInterfaceTable,
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(type(t, GcFloatArray::Type)), ClassInterfaceTable,
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable));
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(type(t, GcDoubleArray::Type)), ClassInterfaceTable,
2013-02-21 22:37:17 +00:00
root(t, Machine::ArrayInterfaceTable));
m->processor->boot(t, 0, 0);
2008-12-04 02:09:57 +00:00
{ object bootCode = reinterpret_cast<object>(makeCode(t, 0, 0, 0, 0, 0, 0, 0, 0, 1));
codeBody(t, bootCode, 0) = impdep1;
2014-05-29 04:17:25 +00:00
object bootMethod = reinterpret_cast<object>(makeMethod
2014-06-28 04:00:05 +00:00
(t, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, cast<GcCode>(t, bootCode)));
PROTECT(t, bootMethod);
#include "type-java-initializations.cpp"
//#ifdef AVIAN_HEAPDUMP
# include "type-name-initializations.cpp"
//#endif
}
}
class HeapClient: public Heap::Client {
public:
HeapClient(Machine* m): m(m) { }
virtual void visitRoots(Heap::Visitor* v) {
::visitRoots(m, v);
postVisit(m->rootThread, v);
}
virtual void collect(void* context, Heap::CollectionType type) {
collect(static_cast<Thread*>(context), type);
}
virtual bool isFixed(void* p) {
return objectFixed(m->rootThread, static_cast<object>(p));
}
virtual unsigned sizeInWords(void* p) {
Thread* t = m->rootThread;
2013-02-11 00:14:16 +00:00
object o = static_cast<object>(m->heap->follow(maskAlignedPointer(p)));
2014-05-29 04:17:25 +00:00
unsigned n = baseSize(t, o, cast<GcClass>(t, static_cast<object>
(m->heap->follow(objectClass(t, o)))));
if (objectExtended(t, o)) {
++ n;
}
return n;
}
virtual unsigned copiedSizeInWords(void* p) {
Thread* t = m->rootThread;
2013-02-11 00:14:16 +00:00
object o = static_cast<object>(m->heap->follow(maskAlignedPointer(p)));
assertT(t, not objectFixed(t, o));
2014-05-29 04:17:25 +00:00
unsigned n = baseSize(t, o, cast<GcClass>(t, static_cast<object>
(m->heap->follow(objectClass(t, o)))));
if (objectExtended(t, o) or hashTaken(t, o)) {
++ n;
}
return n;
}
virtual void copy(void* srcp, void* dstp) {
Thread* t = m->rootThread;
2013-02-11 00:14:16 +00:00
object src = static_cast<object>(m->heap->follow(maskAlignedPointer(srcp)));
assertT(t, not objectFixed(t, src));
2014-05-29 04:17:25 +00:00
GcClass* class_ = cast<GcClass>(t, static_cast<object>
(m->heap->follow(objectClass(t, src))));
unsigned base = baseSize(t, src, class_);
unsigned n = extendedSize(t, src, base);
object dst = static_cast<object>(dstp);
memcpy(dst, src, n * BytesPerWord);
if (hashTaken(t, src)) {
alias(dst, 0) &= PointerMask;
alias(dst, 0) |= ExtendedMark;
extendedWord(t, dst, base) = takeHash(t, src);
}
}
virtual void walk(void* p, Heap::Walker* w) {
2013-02-11 00:14:16 +00:00
object o = static_cast<object>(m->heap->follow(maskAlignedPointer(p)));
::walk(m->rootThread, w, o, 0);
}
void dispose() {
2008-04-13 18:15:04 +00:00
m->heap->free(this, sizeof(*this));
}
private:
Machine* m;
};
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
void
doCollect(Thread* t, Heap::CollectionType type, int pendingAllocation)
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
{
expect(t, not t->m->collecting);
t->m->collecting = true;
THREAD_RESOURCE0(t, t->m->collecting = false);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
#ifdef VM_STRESS
bool stress = (t->flags & Thread::StressFlag) != 0;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (not stress) atomicOr(&(t->flags), Thread::StressFlag);
#endif
Machine* m = t->m;
m->unsafe = true;
m->heap->collect(type, footprint(m->rootThread), pendingAllocation
- (t->m->heapPoolIndex * ThreadHeapSizeInWords));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
m->unsafe = false;
postCollect(m->rootThread);
killZombies(t, m->rootThread);
for (unsigned i = 0; i < m->heapPoolIndex; ++i) {
m->heap->free(m->heapPool[i], ThreadHeapSizeInBytes);
}
m->heapPoolIndex = 0;
if (m->heap->limitExceeded()) {
// if we're out of memory, disallow further allocations of fixed
// objects:
m->fixedFootprint = FixedFootprintThresholdInBytes;
} else {
m->fixedFootprint = 0;
}
#ifdef VM_STRESS
if (not stress) atomicAnd(&(t->flags), ~Thread::StressFlag);
#endif
object finalizeQueue = t->m->finalizeQueue;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
t->m->finalizeQueue = 0;
for (; finalizeQueue; finalizeQueue = finalizerNext(t, finalizeQueue)) {
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
void (*function)(Thread*, object);
memcpy(&function, &finalizerFinalize(t, finalizeQueue), BytesPerWord);
function(t, finalizerTarget(t, finalizeQueue));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
if ((root(t, Machine::ObjectsToFinalize) or root(t, Machine::ObjectsToClean))
and m->finalizeThread == 0
and t->state != Thread::ExitState)
{
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
m->finalizeThread = m->processor->makeThread
(m, cast<GcThread>(t, root(t, Machine::FinalizerThread)), m->rootThread);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
addThread(t, m->finalizeThread);
if (not startThread(t, m->finalizeThread)) {
removeThread(t, m->finalizeThread);
m->finalizeThread = 0;
}
}
}
uint64_t
invokeLoadClass(Thread* t, uintptr_t* arguments)
{
2014-05-29 04:17:25 +00:00
GcMethod* method = cast<GcMethod>(t, reinterpret_cast<object>(arguments[0]));
object loader = reinterpret_cast<object>(arguments[1]);
object specString = reinterpret_cast<object>(arguments[2]);
return reinterpret_cast<uintptr_t>
(t->m->processor->invoke(t, method, loader, specString));
}
bool
2014-05-29 04:17:25 +00:00
isInitializing(Thread* t, GcClass* c)
{
for (Thread::ClassInitStack* s = t->classInitStack; s; s = s->next) {
if (s->class_ == c) {
return true;
}
}
return false;
}
object
findInTable(Thread* t, object table, object name, object spec,
object& (*getName)(Thread*, object),
object& (*getSpec)(Thread*, object))
{
if (table) {
for (unsigned i = 0; i < arrayLength(t, table); ++i) {
object o = arrayBody(t, table, i);
if (vm::strcmp(&byteArrayBody(t, getName(t, o), 0),
&byteArrayBody(t, name, 0)) == 0 and
vm::strcmp(&byteArrayBody(t, getSpec(t, o), 0),
&byteArrayBody(t, spec, 0)) == 0)
{
return o;
}
}
// fprintf(stderr, "%s %s not in\n",
// &byteArrayBody(t, name, 0),
// &byteArrayBody(t, spec, 0));
// for (unsigned i = 0; i < arrayLength(t, table); ++i) {
// object o = arrayBody(t, table, i);
// fprintf(stderr, "\t%s %s\n",
// &byteArrayBody(t, getName(t, o), 0),
// &byteArrayBody(t, getSpec(t, o), 0));
// }
}
return 0;
}
void
2014-05-29 04:17:25 +00:00
updatePackageMap(Thread* t, GcClass* class_)
{
PROTECT(t, class_);
if (root(t, Machine::PackageMap) == 0) {
2014-05-29 04:17:25 +00:00
setRoot(t, Machine::PackageMap, reinterpret_cast<object>(makeHashMap(t, 0, 0)));
}
object className = reinterpret_cast<object>(class_->name());
if ('[' != byteArrayBody(t, className, 0)) {
THREAD_RUNTIME_ARRAY
(t, char, packageName, byteArrayLength(t, className));
char* s = reinterpret_cast<char*>(&byteArrayBody(t, className, 0));
char* p = strrchr(s, '/');
if (p) {
int length = (p - s) + 1;
memcpy(RUNTIME_ARRAY_BODY(packageName),
&byteArrayBody(t, className, 0),
length);
RUNTIME_ARRAY_BODY(packageName)[length] = 0;
object key = vm::makeByteArray
(t, "%s", RUNTIME_ARRAY_BODY(packageName));
PROTECT(t, key);
hashMapRemove
2014-05-29 04:17:25 +00:00
(t, cast<GcHashMap>(t, root(t, Machine::PackageMap)), key, byteArrayHash,
byteArrayEqual);
object source = reinterpret_cast<object>(class_->source());
if (source) {
// note that we strip the "file:" prefix, since OpenJDK's
// Package.defineSystemPackage expects an unadorned filename:
const unsigned PrefixLength = 5;
unsigned sourceNameLength = byteArrayLength(t, source)
- PrefixLength;
THREAD_RUNTIME_ARRAY(t, char, sourceName, sourceNameLength);
memcpy(RUNTIME_ARRAY_BODY(sourceName),
&byteArrayBody(t, source, PrefixLength),
sourceNameLength);
source = vm::makeByteArray(t, "%s", RUNTIME_ARRAY_BODY(sourceName));
} else {
source = vm::makeByteArray(t, "avian-dummy-package-source");
}
hashMapInsert
2014-05-29 04:17:25 +00:00
(t, cast<GcHashMap>(t, root(t, Machine::PackageMap)), key, source, byteArrayHash);
}
}
}
2007-07-06 23:50:26 +00:00
} // namespace
namespace vm {
Machine::Machine(System* system, Heap* heap, Finder* bootFinder,
Finder* appFinder, Processor* processor, Classpath* classpath,
const char** properties, unsigned propertyCount,
2012-03-14 18:36:42 +00:00
const char** arguments, unsigned argumentCount,
unsigned stackSizeInBytes):
vtable(&javaVMVTable),
2007-07-06 23:50:26 +00:00
system(system),
2008-04-13 18:15:04 +00:00
heapClient(new (heap->allocate(sizeof(HeapClient)))
HeapClient(this)),
heap(heap),
bootFinder(bootFinder),
appFinder(appFinder),
processor(processor),
classpath(classpath),
2007-07-06 23:50:26 +00:00
rootThread(0),
exclusive(0),
finalizeThread(0),
2007-09-07 23:20:21 +00:00
jniReferences(0),
propertyCount(propertyCount),
arguments(arguments),
argumentCount(argumentCount),
threadCount(0),
2007-07-06 23:50:26 +00:00
activeCount(0),
liveCount(0),
daemonCount(0),
fixedFootprint(0),
2012-03-14 18:36:42 +00:00
stackSizeInBytes(stackSizeInBytes),
localThread(0),
2007-07-06 23:50:26 +00:00
stateLock(0),
heapLock(0),
classLock(0),
referenceLock(0),
shutdownLock(0),
libraries(0),
errorLog(0),
2012-06-02 21:43:42 +00:00
bootimage(0),
2007-07-06 23:50:26 +00:00
types(0),
roots(0),
2007-07-06 23:50:26 +00:00
finalizers(0),
tenuredFinalizers(0),
finalizeQueue(0),
weakReferences(0),
tenuredWeakReferences(0),
unsafe(false),
collecting(false),
triedBuiltinOnLoad(false),
dumpedHeapOnOOM(false),
alive(true),
heapPoolIndex(0)
2007-07-06 23:50:26 +00:00
{
heap->setClient(heapClient);
populateJNITables(&javaVMVTable, &jniEnvVTable);
2007-07-06 23:50:26 +00:00
// Copying the properties memory (to avoid memory crashes)
this->properties = (char**)heap->allocate(sizeof(char*) * propertyCount);
for (unsigned int i = 0; i < propertyCount; i++)
{
size_t length = strlen(properties[i]) + 1; // +1 for null-terminating char
this->properties[i] = (char*)heap->allocate(sizeof(char) * length);
memcpy(this->properties[i], properties[i], length);
}
2014-04-09 12:02:48 +00:00
const char* bootstrapProperty = findProperty(this, BOOTSTRAP_PROPERTY);
const char* bootstrapPropertyDup = bootstrapProperty ? strdup(bootstrapProperty) : 0;
const char* bootstrapPropertyEnd = bootstrapPropertyDup + (bootstrapPropertyDup ? strlen(bootstrapPropertyDup) : 0);
char* codeLibraryName = (char*)bootstrapPropertyDup;
char* codeLibraryNameEnd = 0;
if (codeLibraryName && (codeLibraryNameEnd = strchr(codeLibraryName, system->pathSeparator())))
*codeLibraryNameEnd = 0;
if (not system->success(system->make(&localThread)) or
not system->success(system->make(&stateLock)) or
2007-07-06 23:50:26 +00:00
not system->success(system->make(&heapLock)) or
not system->success(system->make(&classLock)) or
not system->success(system->make(&referenceLock)) or
not system->success(system->make(&shutdownLock)) or
not system->success
(system->load(&libraries, bootstrapPropertyDup)))
2007-07-06 23:50:26 +00:00
{
system->abort();
}
System::Library* additionalLibrary = 0;
while (codeLibraryNameEnd && codeLibraryNameEnd + 1 < bootstrapPropertyEnd) {
codeLibraryName = codeLibraryNameEnd + 1;
codeLibraryNameEnd = strchr(codeLibraryName, system->pathSeparator());
if (codeLibraryNameEnd)
*codeLibraryNameEnd = 0;
if (!system->success(system->load(&additionalLibrary, codeLibraryName)))
system->abort();
libraries->setNext(additionalLibrary);
}
if(bootstrapPropertyDup)
free((void*)bootstrapPropertyDup);
2007-07-06 23:50:26 +00:00
}
void
Machine::dispose()
{
localThread->dispose();
2007-07-06 23:50:26 +00:00
stateLock->dispose();
heapLock->dispose();
classLock->dispose();
referenceLock->dispose();
shutdownLock->dispose();
2007-07-06 23:50:26 +00:00
if (libraries) {
libraries->disposeAll();
2007-07-06 23:50:26 +00:00
}
2007-09-07 23:20:21 +00:00
for (Reference* r = jniReferences; r;) {
Reference* tmp = r;
2007-09-07 23:20:21 +00:00
r = r->next;
2008-04-13 18:15:04 +00:00
heap->free(tmp, sizeof(*tmp));
2007-09-07 23:20:21 +00:00
}
2007-10-28 01:54:30 +00:00
for (unsigned i = 0; i < heapPoolIndex; ++i) {
heap->free(heapPool[i], ThreadHeapSizeInBytes);
2007-10-28 01:54:30 +00:00
}
if (bootimage) {
heap->free(bootimage, bootimageSize);
}
heap->free(arguments, sizeof(const char*) * argumentCount);
for (unsigned int i = 0; i < propertyCount; i++)
{
heap->free(properties[i], sizeof(char) * (strlen(properties[i]) + 1));
}
heap->free(properties, sizeof(const char*) * propertyCount);
static_cast<HeapClient*>(heapClient)->dispose();
2008-04-13 18:15:04 +00:00
heap->free(this, sizeof(*this));
2007-07-06 23:50:26 +00:00
}
2007-07-24 01:44:20 +00:00
Thread::Thread(Machine* m, object javaThread, Thread* parent):
2007-07-06 23:50:26 +00:00
vtable(&(m->jniEnvVTable)),
m(m),
2007-07-07 18:09:16 +00:00
parent(parent),
peer(0),
2007-07-06 23:50:26 +00:00
child(0),
waitNext(0),
2007-07-06 23:50:26 +00:00
state(NoState),
2007-09-07 23:20:21 +00:00
criticalLevel(0),
2007-07-07 18:09:16 +00:00
systemThread(0),
lock(0),
2007-07-07 18:09:16 +00:00
javaThread(javaThread),
2007-07-06 23:50:26 +00:00
exception(0),
heapIndex(0),
heapOffset(0),
2007-07-28 21:28:25 +00:00
protector(0),
classInitStack(0),
libraryLoadStack(0),
runnable(this),
defaultHeap(static_cast<uintptr_t*>
(m->heap->allocate(ThreadHeapSizeInBytes))),
2008-04-09 19:08:13 +00:00
heap(defaultHeap),
backupHeapIndex(0),
flags(ActiveFlag)
{ }
void
Thread::init()
2007-07-06 23:50:26 +00:00
{
memset(defaultHeap, 0, ThreadHeapSizeInBytes);
memset(backupHeap, 0, ThreadBackupHeapSizeInBytes);
2007-07-07 18:09:16 +00:00
if (parent == 0) {
assertT(this, m->rootThread == 0);
assertT(this, javaThread == 0);
2007-07-07 18:09:16 +00:00
2007-07-06 23:50:26 +00:00
m->rootThread = this;
m->unsafe = true;
2007-07-28 21:28:25 +00:00
if (not m->system->success(m->system->attach(&runnable))) {
2007-07-07 18:09:16 +00:00
abort(this);
}
BootImage* image = 0;
uint8_t* code = 0;
const char* imageFunctionName = findProperty(m, "avian.bootimage");
if (imageFunctionName) {
bool lzma = strncmp("lzma:", imageFunctionName, 5) == 0;
const char* symbolName
= lzma ? imageFunctionName + 5 : imageFunctionName;
void* imagep = m->libraries->resolve(symbolName);
if (imagep) {
uint8_t* (*imageFunction)(unsigned*);
memcpy(&imageFunction, &imagep, BytesPerWord);
2007-07-06 23:50:26 +00:00
unsigned size;
uint8_t* imageBytes = imageFunction(&size);
if (lzma) {
#ifdef AVIAN_USE_LZMA
m->bootimage = image = reinterpret_cast<BootImage*>
(decodeLZMA
(m->system, m->heap, imageBytes, size, &(m->bootimageSize)));
#else
abort(this);
#endif
} else {
image = reinterpret_cast<BootImage*>(imageBytes);
}
const char* codeFunctionName = findProperty(m, "avian.codeimage");
if (codeFunctionName) {
void* codep = m->libraries->resolve(codeFunctionName);
if (codep) {
uint8_t* (*codeFunction)(unsigned*);
memcpy(&codeFunction, &codep, BytesPerWord);
code = codeFunction(&size);
}
}
}
}
2007-07-06 23:50:26 +00:00
m->unsafe = false;
enter(this, ActiveState);
2011-10-03 14:04:58 +00:00
if (image and code) {
m->processor->boot(this, image, code);
makeArrayInterfaceTable(this);
} else {
boot(this);
}
2007-07-30 23:19:05 +00:00
2014-05-29 04:17:25 +00:00
setRoot(this, Machine::ByteArrayMap, reinterpret_cast<object>(makeWeakHashMap(this, 0, 0)));
setRoot(this, Machine::MonitorMap, reinterpret_cast<object>(makeWeakHashMap(this, 0, 0)));
2014-05-29 04:17:25 +00:00
setRoot(this, Machine::ClassRuntimeDataTable, reinterpret_cast<object>(makeVector(this, 0, 0)));
setRoot(this, Machine::MethodRuntimeDataTable, reinterpret_cast<object>(makeVector(this, 0, 0)));
setRoot(this, Machine::JNIMethodTable, reinterpret_cast<object>(makeVector(this, 0, 0)));
setRoot(this, Machine::JNIFieldTable, reinterpret_cast<object>(makeVector(this, 0, 0)));
m->localThread->set(this);
2007-07-07 18:09:16 +00:00
}
expect(this, m->system->success(m->system->make(&lock)));
2007-07-07 18:09:16 +00:00
}
void
Thread::exit()
{
if (state != Thread::ExitState and
state != Thread::ZombieState)
{
enter(this, Thread::ExclusiveState);
if (m->liveCount == 1) {
turnOffTheLights(this);
2007-07-07 18:09:16 +00:00
} else {
threadPeer(this, javaThread) = 0;
enter(this, Thread::ZombieState);
2007-07-07 18:09:16 +00:00
}
2007-07-06 23:50:26 +00:00
}
}
void
Thread::dispose()
{
if (lock) {
lock->dispose();
}
if (systemThread) {
systemThread->dispose();
2007-07-07 18:09:16 +00:00
}
-- m->threadCount;
m->heap->free(defaultHeap, ThreadHeapSizeInBytes);
2007-07-17 00:23:23 +00:00
m->processor->dispose(this);
2007-07-06 23:50:26 +00:00
}
2007-07-07 18:09:16 +00:00
void
shutDown(Thread* t)
2007-07-07 18:09:16 +00:00
{
ACQUIRE(t, t->m->shutdownLock);
2007-07-07 18:09:16 +00:00
object hooks = root(t, Machine::ShutdownHooks);
PROTECT(t, hooks);
2007-07-07 18:09:16 +00:00
setRoot(t, Machine::ShutdownHooks, 0);
2007-07-17 01:55:49 +00:00
object h = hooks;
PROTECT(t, h);
for (; h; h = pairSecond(t, h)) {
startThread(t, cast<GcThread>(t, pairFirst(t, h)));
2007-07-07 18:09:16 +00:00
}
// wait for hooks to exit
h = hooks;
for (; h; h = pairSecond(t, h)) {
while (true) {
Thread* ht = reinterpret_cast<Thread*>(threadPeer(t, pairFirst(t, h)));
2007-07-17 01:55:49 +00:00
{ ACQUIRE(t, t->m->stateLock);
2007-07-11 04:19:26 +00:00
if (ht == 0
or ht->state == Thread::ZombieState
or ht->state == Thread::JoinedState)
{
break;
} else {
ENTER(t, Thread::IdleState);
t->m->stateLock->wait(t->systemThread, 0);
}
}
}
}
// tell finalize thread to exit and wait for it to do so
{ ACQUIRE(t, t->m->stateLock);
Thread* finalizeThread = t->m->finalizeThread;
if (finalizeThread) {
t->m->finalizeThread = 0;
t->m->stateLock->notifyAll(t->systemThread);
while (finalizeThread->state != Thread::ZombieState
and finalizeThread->state != Thread::JoinedState)
{
ENTER(t, Thread::IdleState);
t->m->stateLock->wait(t->systemThread, 0);
}
}
}
// interrupt daemon threads and tell them to die
// todo: be more aggressive about killing daemon threads, e.g. at
// any GC point, not just at waits/sleeps
{ ACQUIRE(t, t->m->stateLock);
t->m->alive = false;
visitAll(t, t->m->rootThread, interruptDaemon);
}
2007-07-07 18:09:16 +00:00
}
2007-07-06 23:50:26 +00:00
void
enter(Thread* t, Thread::State s)
{
stress(t);
2007-07-06 23:50:26 +00:00
if (s == t->state) return;
2007-07-18 01:33:00 +00:00
if (t->state == Thread::ExitState) {
// once in exit state, we stay that way
return;
}
#ifdef USE_ATOMIC_OPERATIONS
# define INCREMENT atomicIncrement
# define ACQUIRE_LOCK ACQUIRE_RAW(t, t->m->stateLock)
# define STORE_LOAD_MEMORY_BARRIER storeLoadMemoryBarrier()
#else
# define INCREMENT(pointer, value) *(pointer) += value;
# define ACQUIRE_LOCK
# define STORE_LOAD_MEMORY_BARRIER
ACQUIRE_RAW(t, t->m->stateLock);
#endif // not USE_ATOMIC_OPERATIONS
2007-07-06 23:50:26 +00:00
switch (s) {
case Thread::ExclusiveState: {
ACQUIRE_LOCK;
while (t->m->exclusive) {
2007-07-06 23:50:26 +00:00
// another thread got here first.
2007-07-07 18:09:16 +00:00
ENTER(t, Thread::IdleState);
t->m->stateLock->wait(t->systemThread, 0);
2007-07-06 23:50:26 +00:00
}
switch (t->state) {
case Thread::ActiveState: break;
case Thread::IdleState: {
INCREMENT(&(t->m->activeCount), 1);
} break;
default: abort(t);
}
2007-07-06 23:50:26 +00:00
t->state = Thread::ExclusiveState;
t->m->exclusive = t;
STORE_LOAD_MEMORY_BARRIER;
while (t->m->activeCount > 1) {
t->m->stateLock->wait(t->systemThread, 0);
2007-07-06 23:50:26 +00:00
}
} break;
case Thread::IdleState:
if (LIKELY(t->state == Thread::ActiveState)) {
// fast path
assertT(t, t->m->activeCount > 0);
INCREMENT(&(t->m->activeCount), -1);
t->state = s;
if (t->m->exclusive) {
ACQUIRE_LOCK;
t->m->stateLock->notifyAll(t->systemThread);
}
break;
} else {
// fall through to slow path
}
2007-07-06 23:50:26 +00:00
case Thread::ZombieState: {
ACQUIRE_LOCK;
2007-07-06 23:50:26 +00:00
switch (t->state) {
case Thread::ExclusiveState: {
assertT(t, t->m->exclusive == t);
t->m->exclusive = 0;
2007-07-06 23:50:26 +00:00
} break;
case Thread::ActiveState: break;
default: abort(t);
}
assertT(t, t->m->activeCount > 0);
INCREMENT(&(t->m->activeCount), -1);
2007-07-18 01:33:00 +00:00
2007-07-06 23:50:26 +00:00
if (s == Thread::ZombieState) {
assertT(t, t->m->liveCount > 0);
-- t->m->liveCount;
if (t->flags & Thread::DaemonFlag) {
-- t->m->daemonCount;
}
2007-07-06 23:50:26 +00:00
}
2007-07-06 23:50:26 +00:00
t->state = s;
t->m->stateLock->notifyAll(t->systemThread);
2007-07-06 23:50:26 +00:00
} break;
case Thread::ActiveState:
if (LIKELY(t->state == Thread::IdleState and t->m->exclusive == 0)) {
// fast path
INCREMENT(&(t->m->activeCount), 1);
t->state = s;
if (t->m->exclusive) {
// another thread has entered the exclusive state, so we
// return to idle and use the slow path to become active
enter(t, Thread::IdleState);
} else {
break;
}
}
{ ACQUIRE_LOCK;
2007-07-06 23:50:26 +00:00
switch (t->state) {
case Thread::ExclusiveState: {
assertT(t, t->m->exclusive == t);
2007-07-06 23:50:26 +00:00
t->state = s;
t->m->exclusive = 0;
t->m->stateLock->notifyAll(t->systemThread);
} break;
case Thread::NoState:
case Thread::IdleState: {
while (t->m->exclusive) {
t->m->stateLock->wait(t->systemThread, 0);
}
INCREMENT(&(t->m->activeCount), 1);
if (t->state == Thread::NoState) {
++ t->m->liveCount;
++ t->m->threadCount;
}
t->state = s;
} break;
2007-07-06 23:50:26 +00:00
default: abort(t);
2007-07-06 23:50:26 +00:00
}
} break;
case Thread::ExitState: {
ACQUIRE_LOCK;
2007-07-06 23:50:26 +00:00
switch (t->state) {
case Thread::ExclusiveState: {
assertT(t, t->m->exclusive == t);
// exit state should also be exclusive, so don't set exclusive = 0
2007-11-27 22:23:00 +00:00
t->m->stateLock->notifyAll(t->systemThread);
2007-07-06 23:50:26 +00:00
} break;
case Thread::ActiveState: break;
default: abort(t);
}
2007-07-18 01:33:00 +00:00
assertT(t, t->m->activeCount > 0);
INCREMENT(&(t->m->activeCount), -1);
2007-07-18 01:33:00 +00:00
2007-07-06 23:50:26 +00:00
t->state = s;
while (t->m->liveCount - t->m->daemonCount > 1) {
t->m->stateLock->wait(t->systemThread, 0);
2007-07-06 23:50:26 +00:00
}
} break;
default: abort(t);
}
}
object
allocate2(Thread* t, unsigned sizeInBytes, bool objectMask)
{
return allocate3
(t, t->m->heap,
2013-02-11 01:06:15 +00:00
ceilingDivide(sizeInBytes, BytesPerWord) > ThreadHeapSizeInWords ?
Machine::FixedAllocation : Machine::MovableAllocation,
2008-04-13 18:15:04 +00:00
sizeInBytes, objectMask);
}
object
allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
2008-04-13 18:15:04 +00:00
unsigned sizeInBytes, bool objectMask)
2007-10-28 01:54:30 +00:00
{
expect(t, t->criticalLevel == 0);
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
2013-02-11 01:06:15 +00:00
expect(t, t->backupHeapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
<= ThreadBackupHeapSizeInWords);
2008-04-09 19:08:13 +00:00
object o = reinterpret_cast<object>(t->backupHeap + t->backupHeapIndex);
2013-02-11 01:06:15 +00:00
t->backupHeapIndex += ceilingDivide(sizeInBytes, BytesPerWord);
2013-02-11 00:38:51 +00:00
fieldAtOffset<object>(o, 0) = 0;
2008-04-09 19:08:13 +00:00
return o;
} else if (UNLIKELY(t->flags & Thread::TracingFlag)) {
2013-02-11 01:06:15 +00:00
expect(t, t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
<= ThreadHeapSizeInWords);
return allocateSmall(t, sizeInBytes);
2008-04-09 19:08:13 +00:00
}
ACQUIRE_RAW(t, t->m->stateLock);
2007-07-06 23:50:26 +00:00
while (t->m->exclusive and t->m->exclusive != t) {
2007-07-06 23:50:26 +00:00
// another thread wants to enter the exclusive state, either for a
// collection or some other reason. We give it a chance here.
2007-07-07 18:09:16 +00:00
ENTER(t, Thread::IdleState);
while (t->m->exclusive) {
t->m->stateLock->wait(t->systemThread, 0);
}
2007-07-06 23:50:26 +00:00
}
do {
switch (type) {
case Machine::MovableAllocation:
2013-02-11 01:06:15 +00:00
if (t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
> ThreadHeapSizeInWords)
{
t->heap = 0;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if ((not t->m->heap->limitExceeded())
and t->m->heapPoolIndex < ThreadHeapPoolSize)
{
t->heap = static_cast<uintptr_t*>
(t->m->heap->tryAllocate(ThreadHeapSizeInBytes));
if (t->heap) {
memset(t->heap, 0, ThreadHeapSizeInBytes);
t->m->heapPool[t->m->heapPoolIndex++] = t->heap;
t->heapOffset += t->heapIndex;
t->heapIndex = 0;
}
}
}
break;
2007-07-06 23:50:26 +00:00
case Machine::FixedAllocation:
if (t->m->fixedFootprint + sizeInBytes > FixedFootprintThresholdInBytes)
{
t->heap = 0;
}
break;
case Machine::ImmortalAllocation:
break;
}
int pendingAllocation = t->m->heap->fixedFootprint
(ceilingDivide(sizeInBytes, BytesPerWord), objectMask);
if (t->heap == 0 or t->m->heap->limitExceeded(pendingAllocation)) {
// fprintf(stderr, "gc");
// vmPrintTrace(t);
collect(t, Heap::MinorCollection, pendingAllocation);
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (t->m->heap->limitExceeded(pendingAllocation)) {
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
throw_(t, root(t, Machine::OutOfMemoryError));
}
} while (type == Machine::MovableAllocation
2013-02-11 01:06:15 +00:00
and t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
> ThreadHeapSizeInWords);
switch (type) {
case Machine::MovableAllocation: {
return allocateSmall(t, sizeInBytes);
}
2007-07-06 23:50:26 +00:00
case Machine::FixedAllocation: {
object o = static_cast<object>
(t->m->heap->allocateFixed
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask));
memset(o, 0, sizeInBytes);
alias(o, 0) = FixedMark;
t->m->fixedFootprint += t->m->heap->fixedFootprint
(ceilingDivide(sizeInBytes, BytesPerWord), objectMask);
return o;
}
case Machine::ImmortalAllocation: {
object o = static_cast<object>
(t->m->heap->allocateImmortalFixed
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask));
memset(o, 0, sizeInBytes);
alias(o, 0) = FixedMark;
return o;
}
default: abort(t);
2007-07-06 23:50:26 +00:00
}
}
void
collect(Thread* t, Heap::CollectionType type, int pendingAllocation)
{
ENTER(t, Thread::ExclusiveState);
unsigned pending = pendingAllocation
- (t->m->heapPoolIndex * ThreadHeapSizeInWords);
if (t->m->heap->limitExceeded(pending)) {
type = Heap::MajorCollection;
}
doCollect(t, type, pendingAllocation);
if (t->m->heap->limitExceeded(pending)) {
// try once more, giving the heap a chance to squeeze everything
// into the smallest possible space:
doCollect(t, Heap::MajorCollection, pendingAllocation);
}
}
object
2014-05-29 04:17:25 +00:00
makeNewGeneral(Thread* t, GcClass* class_)
{
assertT(t, t->state == Thread::ActiveState);
2009-08-10 23:35:44 +00:00
PROTECT(t, class_);
object instance = makeNew(t, class_);
PROTECT(t, instance);
2014-05-29 04:17:25 +00:00
if (class_->vmFlags() & WeakReferenceFlag) {
ACQUIRE(t, t->m->referenceLock);
jreferenceVmNext(t, instance) = t->m->weakReferences;
t->m->weakReferences = instance;
}
2014-05-29 04:17:25 +00:00
if (class_->vmFlags() & HasFinalizerFlag) {
addFinalizer(t, instance, 0);
}
return instance;
}
2011-01-27 18:54:41 +00:00
void
popResources(Thread* t)
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
{
2011-01-27 18:54:41 +00:00
while (t->resource != t->checkpoint->resource) {
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
Thread::Resource* r = t->resource;
t->resource = r->next;
r->release();
}
2011-01-27 18:54:41 +00:00
t->protector = t->checkpoint->protector;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
object
makeByteArrayV(Thread* t, const char* format, va_list a, int size)
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
{
THREAD_RUNTIME_ARRAY(t, char, buffer, size);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
int r = vm::vsnprintf(RUNTIME_ARRAY_BODY(buffer), size - 1, format, a);
if (r >= 0 and r < size - 1) {
2014-05-29 04:17:25 +00:00
object s = reinterpret_cast<object>(makeByteArray(t, strlen(RUNTIME_ARRAY_BODY(buffer)) + 1));
memcpy(&byteArrayBody(t, s, 0), RUNTIME_ARRAY_BODY(buffer),
byteArrayLength(t, s));
return s;
} else {
return 0;
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
2007-07-06 23:50:26 +00:00
object
makeByteArray(Thread* t, const char* format, ...)
{
int size = 256;
while (true) {
va_list a;
va_start(a, format);
object s = makeByteArrayV(t, format, a, size);
va_end(a);
2007-07-06 23:50:26 +00:00
if (s) {
return s;
} else {
size *= 2;
}
}
2007-07-06 23:50:26 +00:00
}
object
makeString(Thread* t, const char* format, ...)
{
int size = 256;
while (true) {
va_list a;
va_start(a, format);
object s = makeByteArrayV(t, format, a, size);
va_end(a);
2007-07-06 23:50:26 +00:00
if (s) {
return t->m->classpath->makeString(t, s, 0, byteArrayLength(t, s) - 1);
} else {
size *= 2;
}
}
2007-07-06 23:50:26 +00:00
}
int
stringUTFLength(Thread* t, object string, unsigned start, unsigned length)
{
unsigned result = 0;
if (length) {
object data = stringData(t, string);
2014-05-29 04:17:25 +00:00
if (objectClass(t, data) == type(t, GcByteArray::Type)) {
result = length;
} else {
for (unsigned i = 0; i < length; ++i) {
uint16_t c = charArrayBody
(t, data, stringOffset(t, string) + start + i);
if (c == 0) result += 1; // null char (was 2 bytes in Java)
else if (c < 0x80) result += 1; // ASCII char
else if (c < 0x800) result += 2; // two-byte char
else result += 3; // three-byte char
}
}
}
return result;
}
void
stringChars(Thread* t, object string, unsigned start, unsigned length,
char* chars)
{
if (length) {
object data = stringData(t, string);
2014-05-29 04:17:25 +00:00
if (objectClass(t, data) == type(t, GcByteArray::Type)) {
memcpy(chars,
&byteArrayBody(t, data, stringOffset(t, string) + start),
length);
} else {
for (unsigned i = 0; i < length; ++i) {
chars[i] = charArrayBody(t, data, stringOffset(t, string) + start + i);
}
}
}
chars[length] = 0;
}
void
stringChars(Thread* t, object string, unsigned start, unsigned length,
uint16_t* chars)
{
if (length) {
object data = stringData(t, string);
2014-05-29 04:17:25 +00:00
if (objectClass(t, data) == type(t, GcByteArray::Type)) {
for (unsigned i = 0; i < length; ++i) {
chars[i] = byteArrayBody(t, data, stringOffset(t, string) + start + i);
}
} else {
memcpy(chars,
&charArrayBody(t, data, stringOffset(t, string) + start),
length * sizeof(uint16_t));
}
}
chars[length] = 0;
}
void
stringUTFChars(Thread* t, object string, unsigned start, unsigned length,
char* chars, unsigned charsLength UNUSED)
{
assertT(t, static_cast<unsigned>
(stringUTFLength(t, string, start, length)) == charsLength);
object data = stringData(t, string);
2014-05-29 04:17:25 +00:00
if (objectClass(t, data) == type(t, GcByteArray::Type)) {
memcpy(chars,
&byteArrayBody(t, data, stringOffset(t, string) + start),
length);
chars[length] = 0;
} else {
int j = 0;
for (unsigned i = 0; i < length; ++i) {
uint16_t c = charArrayBody
(t, data, stringOffset(t, string) + start + i);
if(!c) { // null char
chars[j++] = 0;
} else if (c < 0x80) { // ASCII char
chars[j++] = static_cast<char>(c);
} else if (c < 0x800) { // two-byte char
chars[j++] = static_cast<char>(0x0c0 | (c >> 6));
chars[j++] = static_cast<char>(0x080 | (c & 0x03f));
} else { // three-byte char
chars[j++] = static_cast<char>(0x0e0 | ((c >> 12) & 0x0f));
chars[j++] = static_cast<char>(0x080 | ((c >> 6) & 0x03f));
chars[j++] = static_cast<char>(0x080 | (c & 0x03f));
}
}
chars[j] = 0;
}
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
uint64_t
resolveBootstrap(Thread* t, uintptr_t* arguments)
{
object name = reinterpret_cast<object>(arguments[0]);
2014-06-28 21:11:31 +00:00
resolveSystemClass(t, cast<GcClassLoader>(t, root(t, Machine::BootLoader)), name);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
return 1;
}
2007-07-24 01:44:20 +00:00
bool
2014-05-29 04:17:25 +00:00
isAssignableFrom(Thread* t, GcClass* a, GcClass* b)
2007-07-24 01:44:20 +00:00
{
assertT(t, a);
assertT(t, b);
2007-11-05 14:28:46 +00:00
if (a == b) return true;
2014-05-29 04:17:25 +00:00
if (a->flags() & ACC_INTERFACE) {
if (b->vmFlags() & BootstrapFlag) {
uintptr_t arguments[] = { reinterpret_cast<uintptr_t>(b->name()) };
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (run(t, resolveBootstrap, arguments) == 0) {
t->exception = 0;
return false;
}
2007-11-04 23:10:33 +00:00
}
2007-08-20 02:57:32 +00:00
2014-05-29 04:17:25 +00:00
object itable = b->interfaceTable();
if (itable) {
2014-05-29 04:17:25 +00:00
unsigned stride = (b->flags() & ACC_INTERFACE) ? 1 : 2;
for (unsigned i = 0; i < arrayLength(t, itable); i += stride) {
2014-05-29 04:17:25 +00:00
if (arrayBody(t, itable, i) == reinterpret_cast<object>(a)) {
return true;
2007-07-24 01:44:20 +00:00
}
}
}
2014-05-29 04:17:25 +00:00
} else if (a->arrayDimensions()) {
if (b->arrayDimensions()) {
2007-08-20 02:57:32 +00:00
return isAssignableFrom
2014-06-28 00:32:20 +00:00
(t, a->arrayElementClass(), b->arrayElementClass());
2007-08-20 02:57:32 +00:00
}
2014-05-29 04:17:25 +00:00
} else if ((a->vmFlags() & PrimitiveFlag)
== (b->vmFlags() & PrimitiveFlag))
{
for (; b; b = b->super()) {
2007-07-24 01:44:20 +00:00
if (b == a) {
return true;
}
}
}
return false;
}
2008-07-05 20:21:13 +00:00
bool
2014-05-29 04:17:25 +00:00
instanceOf(Thread* t, GcClass* class_, object o)
2007-07-24 01:44:20 +00:00
{
if (o == 0) {
return false;
2007-07-24 03:16:59 +00:00
} else {
return isAssignableFrom(t, class_, objectClass(t, o));
2007-07-24 01:44:20 +00:00
}
}
2007-07-28 16:10:13 +00:00
object
2014-05-29 04:17:25 +00:00
classInitializer(Thread* t, GcClass* class_)
2007-07-28 16:10:13 +00:00
{
2014-05-29 04:17:25 +00:00
if (class_->methodTable()) {
for (unsigned i = 0; i < arrayLength(t, class_->methodTable()); ++i)
2007-07-28 16:10:13 +00:00
{
2014-05-29 04:17:25 +00:00
object o = arrayBody(t, class_->methodTable(), i);
if (methodVmFlags(t, o) & ClassInitFlag) {
return o;
}
}
2007-07-28 16:10:13 +00:00
}
return 0;
2007-07-28 16:10:13 +00:00
}
unsigned
fieldCode(Thread* t, unsigned javaCode)
2007-07-06 23:50:26 +00:00
{
switch (javaCode) {
case 'B':
return ByteField;
case 'C':
return CharField;
case 'D':
return DoubleField;
case 'F':
return FloatField;
case 'I':
return IntField;
case 'J':
return LongField;
case 'S':
return ShortField;
case 'V':
return VoidField;
case 'Z':
return BooleanField;
case 'L':
case '[':
return ObjectField;
2007-07-06 23:50:26 +00:00
default: abort(t);
}
}
2007-07-06 23:50:26 +00:00
unsigned
fieldType(Thread* t, unsigned code)
{
switch (code) {
case VoidField:
return VOID_TYPE;
case ByteField:
case BooleanField:
return INT8_TYPE;
case CharField:
case ShortField:
return INT16_TYPE;
case DoubleField:
return DOUBLE_TYPE;
case FloatField:
return FLOAT_TYPE;
case IntField:
return INT32_TYPE;
case LongField:
return INT64_TYPE;
case ObjectField:
return POINTER_TYPE;
default: abort(t);
}
2007-07-06 23:50:26 +00:00
}
unsigned
primitiveSize(Thread* t, unsigned code)
2007-07-11 04:19:26 +00:00
{
switch (code) {
case VoidField:
return 0;
case ByteField:
case BooleanField:
return 1;
case CharField:
case ShortField:
return 2;
case FloatField:
case IntField:
return 4;
case DoubleField:
case LongField:
return 8;
2007-07-11 04:19:26 +00:00
default: abort(t);
}
}
2007-07-11 04:19:26 +00:00
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
parseClass(Thread* t, GcClassLoader* loader, const uint8_t* data, unsigned size,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
2007-07-30 23:19:05 +00:00
{
PROTECT(t, loader);
class Client: public Stream::Client {
2007-07-30 23:19:05 +00:00
public:
Client(Thread* t): t(t) { }
virtual void NO_RETURN handleError() {
abort(t);
2007-07-30 23:19:05 +00:00
}
private:
Thread* t;
} client(t);
Stream s(&client, data, size);
uint32_t magic = s.read4();
expect(t, magic == 0xCAFEBABE);
2012-05-22 19:53:32 +00:00
unsigned minorVer = s.read2(); // minor version
unsigned majorVer = s.read2(); // major version
if(DebugClassReader) {
fprintf(stderr, "read class (minor %d major %d)\n", minorVer, majorVer);
}
2007-07-30 23:19:05 +00:00
2014-05-29 04:17:25 +00:00
GcSingleton* pool = parsePool(t, s);
2007-07-30 23:19:05 +00:00
PROTECT(t, pool);
unsigned flags = s.read2();
unsigned name = s.read2();
2014-05-29 04:17:25 +00:00
GcClass* class_ = (GcClass*)makeClass(t,
2007-07-30 23:19:05 +00:00
flags,
0, // VM flags
0, // fixed size
0, // array size
0, // array dimensions
2014-06-28 00:32:20 +00:00
0, // array element class
0, // runtime data index
2007-07-30 23:19:05 +00:00
0, // object mask
cast<GcByteArray>(t, referenceName(t, singletonObject(t, pool, name - 1))),
0, // source file
2007-07-30 23:19:05 +00:00
0, // super
0, // interfaces
0, // vtable
0, // fields
0, // methods
0, // addendum
2007-07-30 23:19:05 +00:00
0, // static table
2014-06-28 21:11:31 +00:00
loader,
0, // source
0);// vtable length
2007-07-30 23:19:05 +00:00
PROTECT(t, class_);
unsigned super = s.read2();
if (super) {
2014-05-29 04:17:25 +00:00
GcClass* sc = resolveClass
(t, loader, referenceName(t, singletonObject(t, pool, super - 1)),
true, throwType);
2007-07-30 23:19:05 +00:00
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassSuper, reinterpret_cast<object>(sc));
2007-07-30 23:19:05 +00:00
2014-05-29 04:17:25 +00:00
class_->vmFlags()
|= (sc->vmFlags()
& (ReferenceFlag | WeakReferenceFlag | HasFinalizerFlag
| NeedInitFlag));
2007-07-30 23:19:05 +00:00
}
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " flags %d name %d super %d\n", flags, name, super);
}
2007-07-30 23:19:05 +00:00
parseInterfaceTable(t, s, class_, pool, throwType);
2007-07-30 23:19:05 +00:00
parseFieldTable(t, s, class_, pool);
parseMethodTable(t, s, class_, pool);
parseAttributeTable(t, s, class_, pool);
2014-05-29 04:17:25 +00:00
object vtable = class_->virtualTable();
unsigned vtableLength = (vtable ? arrayLength(t, vtable) : 0);
2014-05-29 04:17:25 +00:00
GcClass* real = t->m->processor->makeClass
(t,
2014-05-29 04:17:25 +00:00
class_->flags(),
class_->vmFlags(),
class_->fixedSize(),
class_->arrayElementSize(),
class_->arrayDimensions(),
2014-06-28 00:32:20 +00:00
class_->arrayElementClass(),
class_->objectMask(),
class_->name(),
class_->sourceFile(),
class_->super(),
class_->interfaceTable(),
class_->virtualTable(),
class_->fieldTable(),
class_->methodTable(),
2014-06-28 00:32:20 +00:00
class_->addendum(),
2014-06-28 18:28:44 +00:00
class_->staticTable(),
class_->loader(),
vtableLength);
PROTECT(t, real);
2007-12-11 21:26:59 +00:00
t->m->processor->initVtable(t, real);
updateClassTables(t, real, class_);
if (root(t, Machine::PoolMap)) {
object bootstrapClass = hashMapFind
(t, cast<GcHashMap>(t, root(t, Machine::BootstrapClassMap)), reinterpret_cast<object>(class_->name()),
byteArrayHash, byteArrayEqual);
2014-05-29 04:17:25 +00:00
hashMapInsert(
t,
cast<GcHashMap>(t, root(t, Machine::PoolMap)),
bootstrapClass ? bootstrapClass : reinterpret_cast<object>(real),
reinterpret_cast<object>(pool),
objectHash);
}
return real;
2007-07-30 23:19:05 +00:00
}
uint64_t
runParseClass(Thread* t, uintptr_t* arguments)
{
2014-06-28 21:11:31 +00:00
GcClassLoader* loader = cast<GcClassLoader>(t, reinterpret_cast<object>(arguments[0]));
System::Region* region = reinterpret_cast<System::Region*>(arguments[1]);
2014-05-29 04:17:25 +00:00
Gc::Type throwType = static_cast<Gc::Type>(arguments[2]);
return reinterpret_cast<uintptr_t>
(parseClass(t, loader, region->start(), region->length(), throwType));
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
resolveSystemClass(Thread* t, GcClassLoader* loader, object spec, bool throw_,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
{
PROTECT(t, loader);
PROTECT(t, spec);
ACQUIRE(t, t->m->classLock);
2014-05-29 04:17:25 +00:00
GcClass* class_ = cast<GcClass>(t, hashMapFind
2014-06-28 21:11:31 +00:00
(t, cast<GcHashMap>(t, loader->map()), spec, byteArrayHash, byteArrayEqual));
2008-12-02 02:38:00 +00:00
if (class_ == 0) {
PROTECT(t, class_);
2014-06-28 21:11:31 +00:00
if (loader->parent()) {
class_ = resolveSystemClass
2014-06-28 21:11:31 +00:00
(t, loader->parent(), spec, false);
if (class_) {
return class_;
}
}
if (byteArrayBody(t, spec, 0) == '[') {
class_ = resolveArrayClass(t, loader, spec, throw_, throwType);
} else {
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RUNTIME_ARRAY(t, char, file, byteArrayLength(t, spec) + 6);
memcpy(RUNTIME_ARRAY_BODY(file),
&byteArrayBody(t, spec, 0),
byteArrayLength(t, spec) - 1);
memcpy(RUNTIME_ARRAY_BODY(file) + byteArrayLength(t, spec) - 1,
".class",
7);
System::Region* region = static_cast<Finder*>
2014-06-28 21:11:31 +00:00
(loader->as<GcSystemClassLoader>(t)->finder())->find
(RUNTIME_ARRAY_BODY(file));
2007-09-17 00:13:36 +00:00
if (region) {
if (Verbose) {
fprintf(stderr, "parsing %s\n", &byteArrayBody(t, spec, 0));
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
{ THREAD_RESOURCE(t, System::Region*, region, region->dispose());
uintptr_t arguments[] = { reinterpret_cast<uintptr_t>(loader),
reinterpret_cast<uintptr_t>(region),
static_cast<uintptr_t>(throwType) };
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
// parse class file
2014-05-29 04:17:25 +00:00
class_ = cast<GcClass>
(t, reinterpret_cast<object>(runRaw(t, runParseClass, arguments)));
if (UNLIKELY(t->exception)) {
if (throw_) {
object e = t->exception;
t->exception = 0;
vm::throw_(t, e);
} else {
t->exception = 0;
return 0;
}
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
if (Verbose) {
fprintf(stderr, "done parsing %s: %p\n",
&byteArrayBody(t, spec, 0),
class_);
}
{ const char* source = static_cast<Finder*>
2014-06-28 21:11:31 +00:00
(loader->as<GcSystemClassLoader>(t)->finder())->sourceUrl
(RUNTIME_ARRAY_BODY(file));
if (source) {
unsigned length = strlen(source);
2014-05-29 04:17:25 +00:00
object array = reinterpret_cast<object>(makeByteArray(t, length + 1));
memcpy(&byteArrayBody(t, array, 0), source, length);
array = internByteArray(t, array);
2014-05-29 04:17:25 +00:00
set(t, reinterpret_cast<object>(class_), ClassSource, array);
}
}
2014-05-29 04:17:25 +00:00
GcClass* bootstrapClass = cast<GcClass>(t, hashMapFind
(t, cast<GcHashMap>(t, root(t, Machine::BootstrapClassMap)), spec, byteArrayHash,
byteArrayEqual));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (bootstrapClass) {
PROTECT(t, bootstrapClass);
updateBootstrapClass(t, bootstrapClass, class_);
class_ = bootstrapClass;
}
}
}
if (class_) {
2014-06-28 21:11:31 +00:00
hashMapInsert(t, cast<GcHashMap>(t, loader->map()), spec, reinterpret_cast<object>(class_), byteArrayHash);
updatePackageMap(t, class_);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
} else if (throw_) {
throwNew(t, throwType, "%s", &byteArrayBody(t, spec, 0));
}
2007-07-11 04:19:26 +00:00
}
return class_;
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
findLoadedClass(Thread* t, GcClassLoader* loader, object spec)
{
PROTECT(t, loader);
PROTECT(t, spec);
ACQUIRE(t, t->m->classLock);
2014-06-28 21:11:31 +00:00
return loader->map() ? cast<GcClass>(t, hashMapFind
(t, cast<GcHashMap>(t, loader->map()), spec, byteArrayHash, byteArrayEqual)) : 0;
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
resolveClass(Thread* t, GcClassLoader* loader, object spec, bool throw_,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
{
2014-05-29 04:17:25 +00:00
if (objectClass(t, loader) == type(t, GcSystemClassLoader::Type)) {
return resolveSystemClass(t, loader, spec, throw_, throwType);
} else {
2010-11-27 21:44:49 +00:00
PROTECT(t, loader);
PROTECT(t, spec);
2014-05-29 04:17:25 +00:00
GcClass* c = findLoadedClass(t, loader, spec);
if (c) {
return c;
}
if (byteArrayBody(t, spec, 0) == '[') {
c = resolveArrayClass(t, loader, spec, throw_, throwType);
} else {
if (root(t, Machine::LoadClassMethod) == 0) {
2014-05-29 04:17:25 +00:00
GcMethod* m = resolveMethod
2014-06-28 21:11:31 +00:00
(t, cast<GcClassLoader>(t, root(t, Machine::BootLoader)), "java/lang/ClassLoader",
"loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
if (m) {
2014-05-29 04:17:25 +00:00
setRoot(t, Machine::LoadClassMethod, reinterpret_cast<object>(m));
2014-05-29 04:17:25 +00:00
GcClass* classLoaderClass = type(t, GcClassLoader::Type);
2014-05-29 04:17:25 +00:00
if (classLoaderClass->vmFlags() & BootstrapFlag) {
resolveSystemClass
2014-06-28 21:11:31 +00:00
(t, cast<GcClassLoader>(t, root(t, Machine::BootLoader)),
reinterpret_cast<object>(classLoaderClass->name()));
}
}
}
2014-05-29 04:17:25 +00:00
GcMethod* method = findVirtualMethod
(t, cast<GcMethod>(t, root(t, Machine::LoadClassMethod)), objectClass(t, loader));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
PROTECT(t, method);
THREAD_RUNTIME_ARRAY(t, char, s, byteArrayLength(t, spec));
replace('/', '.', RUNTIME_ARRAY_BODY(s), reinterpret_cast<char*>
(&byteArrayBody(t, spec, 0)));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
object specString = makeString(t, "%s", RUNTIME_ARRAY_BODY(s));
PROTECT(t, specString);
uintptr_t arguments[] = { reinterpret_cast<uintptr_t>(method),
reinterpret_cast<uintptr_t>(loader),
reinterpret_cast<uintptr_t>(specString) };
object jc = reinterpret_cast<object>
(runRaw(t, invokeLoadClass, arguments));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (LIKELY(jc)) {
2014-05-29 04:17:25 +00:00
c = cast<GcClass>(t, jclassVmClass(t, jc));
} else if (t->exception) {
if (throw_) {
object e = type(t, throwType) == objectClass(t, t->exception)
? t->exception
: makeThrowable(t, throwType, specString, 0, t->exception);
t->exception = 0;
vm::throw_(t, e);
} else {
t->exception = 0;
}
}
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (LIKELY(c)) {
PROTECT(t, c);
saveLoadedClass(t, loader, c);
} else if (throw_) {
throwNew(t, throwType, "%s", &byteArrayBody(t, spec, 0));
}
return c;
}
}
2014-05-29 04:17:25 +00:00
GcMethod*
resolveMethod(Thread* t, GcClass* class_, const char* methodName,
const char* methodSpec)
{
PROTECT(t, class_);
2014-05-29 04:17:25 +00:00
object name = reinterpret_cast<object>(makeByteArray(t, methodName));
PROTECT(t, name);
2014-05-29 04:17:25 +00:00
object spec = reinterpret_cast<object>(makeByteArray(t, methodSpec));
2014-05-29 04:17:25 +00:00
GcMethod* method = cast<GcMethod>(t, findMethodInClass(t, class_, name, spec));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (method == 0) {
2014-05-29 04:17:25 +00:00
throwNew(t, GcNoSuchMethodError::Type, "%s %s not found in %s",
methodName, methodSpec, class_->name()->body().begin());
} else {
return method;
}
}
object
2014-05-29 04:17:25 +00:00
resolveField(Thread* t, GcClass* class_, const char* fieldName,
const char* fieldSpec)
{
PROTECT(t, class_);
2014-05-29 04:17:25 +00:00
object name = reinterpret_cast<object>(makeByteArray(t, fieldName));
PROTECT(t, name);
2014-05-29 04:17:25 +00:00
object spec = reinterpret_cast<object>(makeByteArray(t, fieldSpec));
PROTECT(t, spec);
object field = findInInterfaces(t, class_, name, spec, findFieldInClass);
2014-05-29 04:17:25 +00:00
GcClass* c = class_;
PROTECT(t, c);
for (; c != 0 and field == 0; c = c->super()) {
field = findFieldInClass(t, c, name, spec);
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (field == 0) {
2014-05-29 04:17:25 +00:00
throwNew(t, GcNoSuchFieldError::Type, "%s %s not found in %s",
fieldName, fieldSpec, class_->name()->body().begin());
} else {
return field;
}
}
bool
2014-05-29 04:17:25 +00:00
classNeedsInit(Thread* t, GcClass* c)
{
2014-05-29 04:17:25 +00:00
if (c->vmFlags() & NeedInitFlag) {
if (c->vmFlags() & InitFlag) {
// the class is currently being initialized. If this the thread
// which is initializing it, we should not try to initialize it
// recursively. Otherwise, we must wait for the responsible
// thread to finish.
for (Thread::ClassInitStack* s = t->classInitStack; s; s = s->next) {
if (s->class_ == c) {
return false;
}
}
}
return true;
} else {
return false;
}
}
bool
2014-05-29 04:17:25 +00:00
preInitClass(Thread* t, GcClass* c)
{
2014-05-29 04:17:25 +00:00
int flags = c->vmFlags();
loadMemoryBarrier();
if (flags & NeedInitFlag) {
PROTECT(t, c);
ACQUIRE(t, t->m->classLock);
2014-05-29 04:17:25 +00:00
if (c->vmFlags() & NeedInitFlag) {
if (c->vmFlags() & InitFlag) {
// If the class is currently being initialized and this the thread
// which is initializing it, we should not try to initialize it
// recursively.
if (isInitializing(t, c)) {
return false;
}
// some other thread is on the job - wait for it to finish.
2014-05-29 04:17:25 +00:00
while (c->vmFlags() & InitFlag) {
ENTER(t, Thread::IdleState);
t->m->classLock->wait(t->systemThread, 0);
}
2014-05-29 04:17:25 +00:00
} else if (c->vmFlags() & InitErrorFlag) {
throwNew(t, GcNoClassDefFoundError::Type, "%s",
c->name()->body().begin());
} else {
2014-05-29 04:17:25 +00:00
c->vmFlags() |= InitFlag;
return true;
}
}
}
return false;
}
void
postInitClass(Thread* t, object c)
{
PROTECT(t, c);
ACQUIRE(t, t->m->classLock);
if (t->exception
2014-05-29 04:17:25 +00:00
and instanceOf(t, type(t, GcException::Type), t->exception)) {
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
classVmFlags(t, c) |= NeedInitFlag | InitErrorFlag;
classVmFlags(t, c) &= ~InitFlag;
object exception = t->exception;
t->exception = 0;
exception = makeThrowable
2014-05-29 04:17:25 +00:00
(t, GcExceptionInInitializerError::Type, 0, 0, exception);
set(t, exception, ExceptionInInitializerErrorException,
throwableCause(t, exception));
throw_(t, exception);
} else {
classVmFlags(t, c) &= ~(NeedInitFlag | InitFlag);
}
t->m->classLock->notifyAll(t->systemThread);
}
void
2014-05-29 04:17:25 +00:00
initClass(Thread* t, GcClass* c)
{
PROTECT(t, c);
object super = reinterpret_cast<object>(c->super());
if (super) {
2014-05-29 04:17:25 +00:00
initClass(t, cast<GcClass>(t, super));
}
if (preInitClass(t, c)) {
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
OBJECT_RESOURCE(t, c, postInitClass(t, c));
2014-05-29 04:17:25 +00:00
GcMethod* initializer = cast<GcMethod>(t, classInitializer(t, c));
if (initializer) {
Thread::ClassInitStack stack(t, c);
t->m->processor->invoke(t, initializer, 0);
}
}
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
resolveObjectArrayClass(Thread* t, GcClassLoader* loader, object elementClass)
{
PROTECT(t, loader);
PROTECT(t, elementClass);
2014-05-29 04:17:25 +00:00
{ GcClass* arrayClass = cast<GcClass>(t, classRuntimeDataArrayClass
(t, getClassRuntimeData(t, cast<GcClass>(t, elementClass))));
if (arrayClass) {
return arrayClass;
}
}
object elementSpec = className(t, elementClass);
PROTECT(t, elementSpec);
object spec;
if (byteArrayBody(t, elementSpec, 0) == '[') {
2014-05-29 04:17:25 +00:00
spec = reinterpret_cast<object>(makeByteArray(t, byteArrayLength(t, elementSpec) + 1));
byteArrayBody(t, spec, 0) = '[';
memcpy(&byteArrayBody(t, spec, 1),
&byteArrayBody(t, elementSpec, 0),
byteArrayLength(t, elementSpec));
} else {
2014-05-29 04:17:25 +00:00
spec = reinterpret_cast<object>(makeByteArray(t, byteArrayLength(t, elementSpec) + 3));
byteArrayBody(t, spec, 0) = '[';
byteArrayBody(t, spec, 1) = 'L';
memcpy(&byteArrayBody(t, spec, 2),
&byteArrayBody(t, elementSpec, 0),
byteArrayLength(t, elementSpec) - 1);
byteArrayBody(t, spec, byteArrayLength(t, elementSpec) + 1) = ';';
byteArrayBody(t, spec, byteArrayLength(t, elementSpec) + 2) = 0;
}
2014-05-29 04:17:25 +00:00
GcClass* arrayClass = resolveClass(t, loader, spec);
2014-05-29 04:17:25 +00:00
set(t, getClassRuntimeData(t, cast<GcClass>(t, elementClass)), ClassRuntimeDataArrayClass,
reinterpret_cast<object>(arrayClass));
return arrayClass;
}
object
2014-05-29 04:17:25 +00:00
makeObjectArray(Thread* t, GcClass* elementClass, unsigned count)
{
2014-05-29 04:17:25 +00:00
GcClass* arrayClass = resolveObjectArrayClass
2014-06-28 21:11:31 +00:00
(t, elementClass->loader(), reinterpret_cast<object>(elementClass));
PROTECT(t, arrayClass);
2014-05-29 04:17:25 +00:00
object array = reinterpret_cast<object>(makeArray(t, count));
setObjectClass(t, array, arrayClass);
return array;
}
object
2014-05-29 04:17:25 +00:00
findFieldInClass(Thread* t, GcClass* class_, object name, object spec)
{
return findInTable
2014-05-29 04:17:25 +00:00
(t, class_->fieldTable(), name, spec, fieldName, fieldSpec);
}
object
2014-05-29 04:17:25 +00:00
findMethodInClass(Thread* t, GcClass* class_, object name, object spec)
{
return findInTable
2014-05-29 04:17:25 +00:00
(t, class_->methodTable(), name, spec, methodName, methodSpec);
}
object
2014-05-29 04:17:25 +00:00
findInHierarchyOrNull(Thread* t, GcClass* class_, object name, object spec,
object (*find)(Thread*, GcClass*, object, object))
{
2014-05-29 04:17:25 +00:00
GcClass* originalClass = class_;
object o = 0;
2014-05-29 04:17:25 +00:00
if ((class_->flags() & ACC_INTERFACE)
and class_->virtualTable())
{
o = findInTable
2014-05-29 04:17:25 +00:00
(t, class_->virtualTable(), name, spec, methodName, methodSpec);
}
if (o == 0) {
for (; o == 0 and class_; class_ = class_->super()) {
o = find(t, class_, name, spec);
}
if (o == 0 and find == findFieldInClass) {
o = findInInterfaces(t, originalClass, name, spec, find);
}
}
return o;
}
unsigned
parameterFootprint(Thread* t, const char* s, bool static_)
{
unsigned footprint = 0;
for (MethodSpecIterator it(t, s); it.hasNext();) {
switch (*it.next()) {
case 'J':
case 'D':
footprint += 2;
break;
default:
++ footprint;
break;
}
}
if (not static_) {
++ footprint;
}
return footprint;
}
void
addFinalizer(Thread* t, object target, void (*finalize)(Thread*, object))
{
PROTECT(t, target);
ACQUIRE(t, t->m->referenceLock);
void* function;
memcpy(&function, &finalize, BytesPerWord);
2014-05-29 04:17:25 +00:00
GcFinalizer* f = makeFinalizer(t, 0, function, 0, 0, 0);
f->target() = target;
f->next() = reinterpret_cast<object>(t->m->finalizers);
t->m->finalizers = f;
2007-07-11 04:19:26 +00:00
}
object
2007-11-27 22:23:00 +00:00
objectMonitor(Thread* t, object o, bool createNew)
2007-07-06 23:50:26 +00:00
{
assertT(t, t->state == Thread::ActiveState);
object m = hashMapFind
2014-05-29 04:17:25 +00:00
(t, cast<GcHashMap>(t, root(t, Machine::MonitorMap)), o, objectHash, objectEqual);
2007-07-06 23:50:26 +00:00
if (m) {
if (DebugMonitors) {
fprintf(stderr, "found monitor %p for object %x\n", m, objectHash(t, o));
}
return m;
2007-11-27 22:23:00 +00:00
} else if (createNew) {
2007-07-06 23:50:26 +00:00
PROTECT(t, o);
PROTECT(t, m);
{ ENTER(t, Thread::ExclusiveState);
2007-07-06 23:50:26 +00:00
m = hashMapFind
2014-05-29 04:17:25 +00:00
(t, cast<GcHashMap>(t, root(t, Machine::MonitorMap)), o, objectHash, objectEqual);
if (m) {
if (DebugMonitors) {
fprintf(stderr, "found monitor %p for object %x\n",
m, objectHash(t, o));
}
return m;
}
2014-05-29 04:17:25 +00:00
object head = reinterpret_cast<object>(makeMonitorNode(t, 0, 0));
m = reinterpret_cast<object>(makeMonitor(t, 0, 0, 0, head, head, 0));
2007-07-06 23:50:26 +00:00
if (DebugMonitors) {
fprintf(stderr, "made monitor %p for object %x\n", m,
objectHash(t, o));
}
2014-05-29 04:17:25 +00:00
hashMapInsert(t, cast<GcHashMap>(t, root(t, Machine::MonitorMap)), o, m, objectHash);
2007-07-06 23:50:26 +00:00
addFinalizer(t, o, removeMonitor);
}
2007-07-06 23:50:26 +00:00
return m;
2007-11-27 22:23:00 +00:00
} else {
return 0;
2007-07-06 23:50:26 +00:00
}
}
2007-07-29 00:02:32 +00:00
object
intern(Thread* t, object s)
{
PROTECT(t, s);
ACQUIRE(t, t->m->referenceLock);
2007-07-29 00:02:32 +00:00
2014-06-28 20:41:27 +00:00
GcTriple* n = hashMapFindNode
2014-05-29 04:17:25 +00:00
(t, cast<GcHashMap>(t, root(t, Machine::StringMap)), s, stringHash, stringEqual);
2007-07-29 00:02:32 +00:00
if (n) {
2014-06-28 20:41:27 +00:00
return jreferenceTarget(t, n->first());
2007-07-29 00:02:32 +00:00
} else {
2014-05-29 04:17:25 +00:00
hashMapInsert(t, cast<GcHashMap>(t, root(t, Machine::StringMap)), s, 0, stringHash);
2007-07-29 00:02:32 +00:00
addFinalizer(t, s, removeString);
return s;
}
}
void
walk(Thread* t, Heap::Walker* w, object o, unsigned start)
{
2014-05-29 04:17:25 +00:00
GcClass* class_ = cast<GcClass>(t, static_cast<object>(t->m->heap->follow(objectClass(t, o))));
object objectMask = static_cast<object>
2014-05-29 04:17:25 +00:00
(t->m->heap->follow(class_->objectMask()));
2009-05-17 23:43:48 +00:00
bool more = true;
if (objectMask) {
2014-05-29 04:17:25 +00:00
unsigned fixedSize = class_->fixedSize();
unsigned arrayElementSize = class_->arrayElementSize();
unsigned arrayLength
= (arrayElementSize ?
2013-02-11 00:38:51 +00:00
fieldAtOffset<uintptr_t>(o, fixedSize - BytesPerWord) : 0);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RUNTIME_ARRAY(t, uint32_t, mask, intArrayLength(t, objectMask));
memcpy(RUNTIME_ARRAY_BODY(mask), &intArrayBody(t, objectMask, 0),
intArrayLength(t, objectMask) * 4);
more = ::walk(t, w, RUNTIME_ARRAY_BODY(mask), fixedSize, arrayElementSize,
arrayLength, start);
2014-05-29 04:17:25 +00:00
} else if (class_->vmFlags() & SingletonFlag) {
unsigned length = singletonLength(t, o);
if (length) {
2014-05-29 04:17:25 +00:00
more = ::walk(t, w, singletonMask(t, cast<GcSingleton>(t, o)),
(singletonCount(t, cast<GcSingleton>(t, o)) + 2) * BytesPerWord, 0, 0, start);
} else if (start == 0) {
2009-05-17 23:43:48 +00:00
more = w->visit(0);
}
} else if (start == 0) {
2009-05-17 23:43:48 +00:00
more = w->visit(0);
}
2009-05-03 20:57:11 +00:00
2014-05-29 04:17:25 +00:00
if (more and class_->vmFlags() & ContinuationFlag) {
2009-05-03 20:57:11 +00:00
t->m->processor->walkContinuationBody(t, w, o, start);
}
}
int
walkNext(Thread* t, object o, int previous)
{
class Walker: public Heap::Walker {
public:
Walker(): value(-1) { }
bool visit(unsigned offset) {
value = offset;
return false;
}
int value;
} walker;
walk(t, &walker, o, previous + 1);
return walker.value;
}
void
visitRoots(Machine* m, Heap::Visitor* v)
{
v->visit(&(m->types));
v->visit(&(m->roots));
for (Thread* t = m->rootThread; t; t = t->peer) {
::visitRoots(t, v);
}
2009-05-03 20:57:11 +00:00
for (Reference* r = m->jniReferences; r; r = r->next) {
if (not r->weak) {
v->visit(&(r->target));
}
2009-05-03 20:57:11 +00:00
}
}
2013-02-05 06:41:37 +00:00
void
logTrace(FILE* f, const char* fmt, ...)
{
va_list a;
va_start(a, fmt);
#ifdef PLATFORM_WINDOWS
const unsigned length = _vscprintf(fmt, a);
#else
const unsigned length = vsnprintf(0, 0, fmt, a);
#endif
va_end(a);
2013-02-05 06:41:37 +00:00
RUNTIME_ARRAY(char, buffer, length + 1);
va_start(a, fmt);
vsnprintf(RUNTIME_ARRAY_BODY(buffer), length + 1, fmt, a);
2013-02-05 06:41:37 +00:00
va_end(a);
RUNTIME_ARRAY_BODY(buffer)[length] = 0;
2013-02-05 06:41:37 +00:00
::fprintf(f, "%s", RUNTIME_ARRAY_BODY(buffer));
2013-02-05 06:41:37 +00:00
#ifdef PLATFORM_WINDOWS
::OutputDebugStringA(RUNTIME_ARRAY_BODY(buffer));
2013-02-05 06:41:37 +00:00
#endif
}
2007-07-24 03:16:59 +00:00
void
printTrace(Thread* t, object exception)
{
if (exception == 0) {
2014-05-29 04:17:25 +00:00
exception = makeThrowable(t, GcNullPointerException::Type);
}
2007-10-13 00:22:52 +00:00
for (object e = exception; e; e = throwableCause(t, e)) {
2007-07-24 03:16:59 +00:00
if (e != exception) {
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "caused by: ");
2007-07-24 03:16:59 +00:00
}
logTrace(errorLog(t), "%s", objectClass(t, e)->name()->body().begin());
if (throwableMessage(t, e)) {
object m = throwableMessage(t, e);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RUNTIME_ARRAY(t, char, message, stringLength(t, m) + 1);
stringChars(t, m, RUNTIME_ARRAY_BODY(message));
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), ": %s\n", RUNTIME_ARRAY_BODY(message));
2007-07-24 03:16:59 +00:00
} else {
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "\n");
2007-07-24 03:16:59 +00:00
}
object trace = throwableTrace(t, e);
if (trace) {
for (unsigned i = 0; i < objectArrayLength(t, trace); ++i) {
object e = objectArrayBody(t, trace, i);
const int8_t* class_ = &byteArrayBody
(t, className(t, methodClass(t, traceElementMethod(t, e))), 0);
const int8_t* method = &byteArrayBody
(t, methodName(t, traceElementMethod(t, e)), 0);
int line = t->m->processor->lineNumber
2014-05-29 04:17:25 +00:00
(t, cast<GcMethod>(t, traceElementMethod(t, e)), traceElementIp(t, e));
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), " at %s.%s ", class_, method);
switch (line) {
case NativeLine:
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "(native)\n");
break;
case UnknownLine:
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "(unknown line)\n");
break;
default:
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "(line %d)\n", line);
}
2007-07-24 03:16:59 +00:00
}
}
if (e == throwableCause(t, e)) {
break;
}
2007-07-24 03:16:59 +00:00
}
2013-02-05 06:41:37 +00:00
::fflush(errorLog(t));
2007-07-24 03:16:59 +00:00
}
object
makeTrace(Thread* t, Processor::StackWalker* walker)
{
class Visitor: public Processor::StackVisitor {
public:
Visitor(Thread* t): t(t), trace(0), index(0), protector(t, &trace) { }
2007-09-24 13:46:48 +00:00
virtual bool visit(Processor::StackWalker* walker) {
if (trace == 0) {
trace = makeObjectArray(t, walker->count());
assertT(t, trace);
}
2014-05-29 04:17:25 +00:00
object e = reinterpret_cast<object>(makeTraceElement(t, reinterpret_cast<object>(walker->method()), walker->ip()));
assertT(t, index < objectArrayLength(t, trace));
set(t, trace, ArrayBody + (index * BytesPerWord), e);
++ index;
return true;
}
Thread* t;
object trace;
unsigned index;
Thread::SingleProtector protector;
} v(t);
walker->walk(&v);
return v.trace ? v.trace : makeObjectArray(t, 0);
}
object
2008-04-09 19:08:13 +00:00
makeTrace(Thread* t, Thread* target)
{
class Visitor: public Processor::StackVisitor {
public:
Visitor(Thread* t): t(t), trace(0) { }
virtual bool visit(Processor::StackWalker* walker) {
trace = vm::makeTrace(t, walker);
return false;
}
Thread* t;
object trace;
} v(t);
2008-04-09 19:08:13 +00:00
t->m->processor->walkStack(target, &v);
return v.trace ? v.trace : makeObjectArray(t, 0);
}
void
runFinalizeThread(Thread* t)
{
object finalizeList = 0;
PROTECT(t, finalizeList);
object cleanList = 0;
PROTECT(t, cleanList);
while (true) {
{ ACQUIRE(t, t->m->stateLock);
while (t->m->finalizeThread
and root(t, Machine::ObjectsToFinalize) == 0
and root(t, Machine::ObjectsToClean) == 0)
{
ENTER(t, Thread::IdleState);
t->m->stateLock->wait(t->systemThread, 0);
}
if (t->m->finalizeThread == 0) {
return;
} else {
finalizeList = root(t, Machine::ObjectsToFinalize);
setRoot(t, Machine::ObjectsToFinalize, 0);
cleanList = root(t, Machine::ObjectsToClean);
setRoot(t, Machine::ObjectsToClean, 0);
}
}
for (; finalizeList; finalizeList = finalizerQueueNext(t, finalizeList)) {
finalizeObject(t, finalizerQueueTarget(t, finalizeList), "finalize");
}
for (; cleanList; cleanList = cleanerQueueNext(t, cleanList)) {
finalizeObject(t, cleanList, "clean");
}
}
}
object
parseUtf8(Thread* t, const char* data, unsigned length)
{
class Client: public Stream::Client {
public:
Client(Thread* t): t(t) { }
virtual void handleError() {
2013-07-03 20:33:46 +00:00
if (false) abort(t);
}
private:
Thread* t;
} client(t);
Stream s(&client, reinterpret_cast<const uint8_t*>(data), length);
return ::parseUtf8(t, s, length);
}
object
parseUtf8(Thread* t, object array)
{
for (unsigned i = 0; i < byteArrayLength(t, array) - 1; ++i) {
if (byteArrayBody(t, array, i) & 0x80) {
goto slow_path;
}
}
return array;
slow_path:
class Client: public Stream::Client {
public:
Client(Thread* t): t(t) { }
virtual void handleError() {
2013-07-03 20:33:46 +00:00
if (false) abort(t);
}
private:
Thread* t;
} client(t);
class MyStream: public AbstractStream {
public:
class MyProtector: public Thread::Protector {
public:
MyProtector(Thread* t, MyStream* s):
Protector(t), s(s)
{ }
virtual void visit(Heap::Visitor* v) {
v->visit(&(s->array));
}
MyStream* s;
};
MyStream(Thread* t, Client* client, object array):
AbstractStream(client, byteArrayLength(t, array) - 1),
array(array),
protector(t, this)
{ }
virtual void copy(uint8_t* dst, unsigned offset, unsigned size) {
memcpy(dst, &byteArrayBody(protector.t, array, offset), size);
}
object array;
MyProtector protector;
} s(t, &client, array);
return ::parseUtf8(t, s, byteArrayLength(t, array) - 1);
}
2014-05-29 04:17:25 +00:00
GcMethod*
getCaller(Thread* t, unsigned target, bool skipMethodInvoke)
{
if (static_cast<int>(target) == -1) {
target = 2;
}
class Visitor: public Processor::StackVisitor {
public:
Visitor(Thread* t, unsigned target, bool skipMethodInvoke):
t(t), method(0), count(0), target(target),
skipMethodInvoke(skipMethodInvoke)
{ }
virtual bool visit(Processor::StackWalker* walker) {
if (skipMethodInvoke
and walker->method()->class_()
2014-05-29 04:17:25 +00:00
== type(t, GcJmethod::Type)
and strcmp(walker->method()->name()->body().begin(),
2014-05-29 04:17:25 +00:00
reinterpret_cast<const int8_t*>("invoke")) == 0) {
return true;
}
if (count == target) {
method = walker->method();
return false;
} else {
++ count;
return true;
}
}
Thread* t;
2014-05-29 04:17:25 +00:00
GcMethod* method;
unsigned count;
unsigned target;
bool skipMethodInvoke;
} v(t, target, skipMethodInvoke);
t->m->processor->walkStack(t, &v);
return v.method;
}
object
2014-06-28 21:11:31 +00:00
defineClass(Thread* t, GcClassLoader* loader, const uint8_t* buffer, unsigned length)
{
PROTECT(t, loader);
2014-05-29 04:17:25 +00:00
object c = reinterpret_cast<object>(parseClass(t, loader, buffer, length));
// char name[byteArrayLength(t, className(t, c))];
// memcpy(name, &byteArrayBody(t, className(t, c), 0),
// byteArrayLength(t, className(t, c)));
// replace('/', '-', name);
// const unsigned BufferSize = 1024;
// char path[BufferSize];
// snprintf(path, BufferSize, "/tmp/avian-define-class/%s.class", name);
// FILE* file = fopen(path, "wb");
// if (file) {
// fwrite(buffer, length, 1, file);
// fclose(file);
// }
PROTECT(t, c);
2014-05-29 04:17:25 +00:00
saveLoadedClass(t, loader, cast<GcClass>(t, c));
return c;
}
void
populateMultiArray(Thread* t, object array, int32_t* counts,
unsigned index, unsigned dimensions)
{
if (index + 1 == dimensions or counts[index] == 0) {
return;
}
PROTECT(t, array);
object spec = reinterpret_cast<object>(objectClass(t, array)->name());
PROTECT(t, spec);
2014-05-29 04:17:25 +00:00
object elementSpec = reinterpret_cast<object>(makeByteArray(t, byteArrayLength(t, spec) - 1));
memcpy(&byteArrayBody(t, elementSpec, 0),
&byteArrayBody(t, spec, 1),
byteArrayLength(t, spec) - 1);
2014-05-29 04:17:25 +00:00
GcClass* class_ = resolveClass
2014-06-28 21:11:31 +00:00
(t, objectClass(t, array)->loader(), elementSpec);
PROTECT(t, class_);
for (int32_t i = 0; i < counts[index]; ++i) {
2014-05-29 04:17:25 +00:00
object a = reinterpret_cast<object>(makeArray
2013-02-11 01:06:15 +00:00
(t, ceilingDivide
2014-05-29 04:17:25 +00:00
(counts[index + 1] * class_->arrayElementSize(), BytesPerWord)));
arrayLength(t, a) = counts[index + 1];
setObjectClass(t, a, class_);
set(t, array, ArrayBody + (i * BytesPerWord), a);
populateMultiArray(t, a, counts, index + 1, dimensions);
}
}
object
interruptLock(Thread* t, object thread)
{
object lock = threadInterruptLock(t, thread);
loadMemoryBarrier();
if (lock == 0) {
PROTECT(t, thread);
ACQUIRE(t, t->m->referenceLock);
if (threadInterruptLock(t, thread) == 0) {
2014-05-29 04:17:25 +00:00
object head = reinterpret_cast<object>(makeMonitorNode(t, 0, 0));
object lock = reinterpret_cast<object>(makeMonitor(t, 0, 0, 0, head, head, 0));
storeStoreMemoryBarrier();
set(t, thread, ThreadInterruptLock, lock);
}
}
return threadInterruptLock(t, thread);
}
void
clearInterrupted(Thread* t)
{
monitorAcquire(t, interruptLock(t, t->javaThread));
threadInterrupted(t, t->javaThread) = false;
monitorRelease(t, interruptLock(t, t->javaThread));
}
void
threadInterrupt(Thread* t, object thread)
{
PROTECT(t, thread);
monitorAcquire(t, interruptLock(t, thread));
Thread* p = reinterpret_cast<Thread*>(threadPeer(t, thread));
if (p) {
interrupt(t, p);
}
threadInterrupted(t, thread) = true;
monitorRelease(t, interruptLock(t, thread));
}
bool
threadIsInterrupted(Thread* t, object thread, bool clear)
{
PROTECT(t, thread);
monitorAcquire(t, interruptLock(t, thread));
bool v = threadInterrupted(t, thread);
if (clear) {
threadInterrupted(t, thread) = false;
}
monitorRelease(t, interruptLock(t, thread));
return v;
}
void
noop()
{ }
2007-07-06 23:50:26 +00:00
#include "type-constructors.cpp"
} // namespace vm
// for debugging
AVIAN_EXPORT void
vmfPrintTrace(Thread* t, FILE* out)
{
class Visitor: public Processor::StackVisitor {
public:
Visitor(Thread* t, FILE* out): t(t), out(out) { }
virtual bool visit(Processor::StackWalker* walker) {
const int8_t* class_ = walker->method()->class_()->name()->body().begin();
const int8_t* method = walker->method()->name()->body().begin();
int line = t->m->processor->lineNumber
(t, walker->method(), walker->ip());
fprintf(out, " at %s.%s ", class_, method);
switch (line) {
case NativeLine:
fprintf(out, "(native)\n");
break;
case UnknownLine:
fprintf(out, "(unknown line)\n");
break;
default:
fprintf(out, "(line %d)\n", line);
}
return true;
}
Thread* t;
FILE* out;
} v(t, out);
fprintf(out, "debug trace for thread %p\n", t);
t->m->processor->walkStack(t, &v);
fflush(out);
}
AVIAN_EXPORT void
vmPrintTrace(Thread* t)
{
vmfPrintTrace(t, stderr);
}
// also for debugging
AVIAN_EXPORT void*
vmAddressFromLine(Thread* t, object m, unsigned line)
{
object code = methodCode(t, m);
printf("code: %p\n", code);
object lnt = reinterpret_cast<object>(codeLineNumberTable(t, code));
printf("lnt: %p\n", lnt);
2014-05-29 04:17:25 +00:00
if (lnt) {
unsigned last = 0;
unsigned bottom = 0;
unsigned top = lineNumberTableLength(t, lnt);
for(unsigned i = bottom; i < top; i++)
{
uint64_t ln = lineNumberTableBody(t, lnt, i);
if(lineNumberLine(ln) == line)
return reinterpret_cast<void*>(lineNumberIp(ln));
else if(lineNumberLine(ln) > line)
return reinterpret_cast<void*>(last);
last = lineNumberIp(ln);
}
}
return 0;
}