corda/src/machine.cpp

5549 lines
151 KiB
C++
Raw Normal View History

2014-04-21 02:14:48 +00:00
/* Copyright (c) 2008-2014, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
#include "avian/jnienv.h"
#include "avian/machine.h"
#include "avian/util.h"
#include <avian/util/stream.h>
#include "avian/constants.h"
#include "avian/processor.h"
#include "avian/arch.h"
#include "avian/lzma.h"
2007-07-06 23:50:26 +00:00
2013-02-20 05:56:05 +00:00
#include <avian/util/runtime-array.h>
#include <avian/util/math.h>
#if defined(PLATFORM_WINDOWS)
# define WIN32_LEAN_AND_MEAN
2013-02-15 16:53:02 +00:00
# include <windows.h>
#endif
2007-07-06 23:50:26 +00:00
using namespace vm;
using namespace avian::util;
2007-07-06 23:50:26 +00:00
namespace {
2012-05-22 19:53:32 +00:00
const bool DebugClassReader = false;
2009-08-14 14:52:31 +00:00
const unsigned NoByte = 0xFFFF;
#ifdef USE_ATOMIC_OPERATIONS
void
atomicIncrement(uint32_t* p, int v)
{
for (uint32_t old = *p;
not atomicCompareAndSwap32(p, old, old + v);
old = *p)
{ }
}
#endif
2007-07-07 18:09:16 +00:00
void
join(Thread* t, Thread* o)
{
if (t != o) {
assertT(t, o->state != Thread::JoinedState);
assertT(t, (o->flags & Thread::SystemFlag) == 0);
if (o->flags & Thread::JoinFlag) {
o->systemThread->join();
}
2007-07-18 01:33:00 +00:00
o->state = Thread::JoinedState;
2007-07-07 18:09:16 +00:00
}
}
#ifndef NDEBUG
bool
find(Thread* t, Thread* o)
{
return (t == o)
or (t->peer and find(t->peer, o))
or (t->child and find(t->child, o));
}
unsigned
count(Thread* t, Thread* o)
{
unsigned c = 0;
if (t != o) ++ c;
if (t->peer) c += count(t->peer, o);
if (t->child) c += count(t->child, o);
return c;
}
Thread**
fill(Thread* t, Thread* o, Thread** array)
{
if (t != o) *(array++) = t;
if (t->peer) array = fill(t->peer, o, array);
if (t->child) array = fill(t->child, o, array);
return array;
}
#endif // not NDEBUG
2007-07-07 18:09:16 +00:00
void
dispose(Thread* t, Thread* o, bool remove)
{
if (remove) {
#ifndef NDEBUG
2007-11-27 22:23:00 +00:00
expect(t, find(t->m->rootThread, o));
unsigned c = count(t->m->rootThread, o);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RUNTIME_ARRAY(t, Thread*, threads, c);
fill(t->m->rootThread, o, RUNTIME_ARRAY_BODY(threads));
#endif
2007-11-27 22:23:00 +00:00
2007-07-07 18:09:16 +00:00
if (o->parent) {
2007-11-27 23:04:15 +00:00
Thread* previous = 0;
2007-11-27 22:23:00 +00:00
for (Thread* p = o->parent->child; p;) {
if (p == o) {
if (p == o->parent->child) {
o->parent->child = p->peer;
} else {
previous->peer = p->peer;
}
break;
} else {
previous = p;
p = p->peer;
2007-07-07 18:09:16 +00:00
}
2007-11-27 22:23:00 +00:00
}
for (Thread* p = o->child; p;) {
Thread* next = p->peer;
p->peer = o->parent->child;
o->parent->child = p;
p->parent = o->parent;
p = next;
2007-07-07 18:09:16 +00:00
}
} else if (o->child) {
t->m->rootThread = o->child;
2007-11-27 22:23:00 +00:00
for (Thread* p = o->peer; p;) {
Thread* next = p->peer;
p->peer = t->m->rootThread;
t->m->rootThread = p;
p = next;
}
2007-07-07 18:09:16 +00:00
} else if (o->peer) {
t->m->rootThread = o->peer;
2007-07-07 18:09:16 +00:00
} else {
abort(t);
}
#ifndef NDEBUG
2007-11-27 22:23:00 +00:00
expect(t, not find(t->m->rootThread, o));
for (unsigned i = 0; i < c; ++i) {
expect(t, find(t->m->rootThread, RUNTIME_ARRAY_BODY(threads)[i]));
2007-11-27 22:23:00 +00:00
}
#endif
2007-07-07 18:09:16 +00:00
}
o->dispose();
}
void
visitAll(Thread* m, Thread* o, void (*visit)(Thread*, Thread*))
2007-07-07 18:09:16 +00:00
{
for (Thread* p = o->child; p;) {
Thread* child = p;
p = p->peer;
visitAll(m, child, visit);
2007-07-07 18:09:16 +00:00
}
visit(m, o);
2007-07-07 18:09:16 +00:00
}
void
disposeNoRemove(Thread* m, Thread* o)
2007-07-07 18:09:16 +00:00
{
dispose(m, o, false);
}
void
interruptDaemon(Thread* m, Thread* o)
{
if (o->flags & Thread::DaemonFlag) {
interrupt(m, o);
}
}
void
turnOffTheLights(Thread* t)
{
expect(t, t->m->liveCount == 1);
visitAll(t, t->m->rootThread, join);
enter(t, Thread::ExitState);
2014-05-29 04:17:25 +00:00
{ GcFinalizer* p = 0;
PROTECT(t, p);
for (p = t->m->finalizers; p;) {
2014-05-29 04:17:25 +00:00
GcFinalizer* f = p;
p = cast<GcFinalizer>(t, p->next());
void (*function)(Thread*, object);
2014-05-29 04:17:25 +00:00
memcpy(&function, &f->finalize(), BytesPerWord);
if (function) {
2014-05-29 04:17:25 +00:00
function(t, f->target());
}
2009-12-06 02:40:46 +00:00
}
for (p = t->m->tenuredFinalizers; p;) {
2014-05-29 04:17:25 +00:00
GcFinalizer* f = p;
p = cast<GcFinalizer>(t, p->next());
void (*function)(Thread*, object);
2014-05-29 04:17:25 +00:00
memcpy(&function, &f->finalize(), BytesPerWord);
if (function) {
2014-05-29 04:17:25 +00:00
function(t, f->target());
}
2009-12-06 02:40:46 +00:00
}
}
2014-06-30 01:44:41 +00:00
if (GcArray* files = roots(t)->virtualFiles()) {
2014-06-29 05:48:17 +00:00
PROTECT(t, files);
for (unsigned i = 0; i < files->length();
++i)
{
2014-06-29 05:48:17 +00:00
object region = files->body()[i];
if (region) {
2014-06-29 05:48:17 +00:00
static_cast<System::Region*>(cast<GcRegion>(t, region)->region())->dispose();
}
}
}
2014-06-30 01:44:41 +00:00
for (GcFinder* p = roots(t)->virtualFileFinders();
2014-06-29 05:48:17 +00:00
p; p = p->next())
{
2014-06-29 05:48:17 +00:00
static_cast<Finder*>(p->finder())->dispose();
}
Machine* m = t->m;
visitAll(t, t->m->rootThread, disposeNoRemove);
System* s = m->system;
expect(s, m->threadCount == 0);
Heap* h = m->heap;
Processor* p = m->processor;
Classpath* c = m->classpath;
Finder* bf = m->bootFinder;
Finder* af = m->appFinder;
c->dispose();
h->disposeFixies();
m->dispose();
p->dispose();
bf->dispose();
af->dispose();
h->dispose();
s->dispose();
}
2007-07-06 23:50:26 +00:00
void
2007-07-07 18:09:16 +00:00
killZombies(Thread* t, Thread* o)
2007-07-06 23:50:26 +00:00
{
2007-07-07 18:09:16 +00:00
for (Thread* p = o->child; p;) {
Thread* child = p;
p = p->peer;
killZombies(t, child);
}
if ((o->flags & Thread::SystemFlag) == 0) {
switch (o->state) {
case Thread::ZombieState:
join(t, o);
// fall through
case Thread::JoinedState:
dispose(t, o, true);
default: break;
}
2007-07-07 18:09:16 +00:00
}
}
unsigned
footprint(Thread* t)
{
expect(t, t->criticalLevel == 0);
unsigned n = t->heapOffset + t->heapIndex + t->backupHeapIndex;
for (Thread* c = t->child; c; c = c->peer) {
n += footprint(c);
}
return n;
}
2007-07-07 23:47:35 +00:00
void
visitRoots(Thread* t, Heap::Visitor* v)
{
if (t->state != Thread::ZombieState) {
v->visit(&(t->javaThread));
v->visit(&(t->exception));
2007-07-07 23:47:35 +00:00
t->m->processor->visitObjects(t, v);
2007-07-07 23:47:35 +00:00
for (Thread::Protector* p = t->protector; p; p = p->next) {
p->visit(v);
2007-07-07 23:47:35 +00:00
}
}
for (Thread* c = t->child; c; c = c->peer) {
visitRoots(c, v);
}
}
2009-05-17 23:43:48 +00:00
bool
2007-11-02 14:15:06 +00:00
walk(Thread*, Heap::Walker* w, uint32_t* mask, unsigned fixedSize,
unsigned arrayElementSize, unsigned arrayLength, unsigned start)
2007-11-02 14:15:06 +00:00
{
2013-02-11 01:06:15 +00:00
unsigned fixedSizeInWords = ceilingDivide(fixedSize, BytesPerWord);
2007-11-02 14:15:06 +00:00
unsigned arrayElementSizeInWords
2013-02-11 01:06:15 +00:00
= ceilingDivide(arrayElementSize, BytesPerWord);
2007-11-02 14:15:06 +00:00
for (unsigned i = start; i < fixedSizeInWords; ++i) {
2007-11-02 14:15:06 +00:00
if (mask[i / 32] & (static_cast<uint32_t>(1) << (i % 32))) {
if (not w->visit(i)) {
2009-05-17 23:43:48 +00:00
return false;
2007-11-02 14:15:06 +00:00
}
}
}
bool arrayObjectElements = false;
for (unsigned j = 0; j < arrayElementSizeInWords; ++j) {
unsigned k = fixedSizeInWords + j;
if (mask[k / 32] & (static_cast<uint32_t>(1) << (k % 32))) {
arrayObjectElements = true;
break;
}
}
if (arrayObjectElements) {
unsigned arrayStart;
unsigned elementStart;
if (start > fixedSizeInWords) {
unsigned s = start - fixedSizeInWords;
arrayStart = s / arrayElementSizeInWords;
elementStart = s % arrayElementSizeInWords;
} else {
arrayStart = 0;
elementStart = 0;
}
for (unsigned i = arrayStart; i < arrayLength; ++i) {
for (unsigned j = elementStart; j < arrayElementSizeInWords; ++j) {
2007-11-02 14:15:06 +00:00
unsigned k = fixedSizeInWords + j;
if (mask[k / 32] & (static_cast<uint32_t>(1) << (k % 32))) {
if (not w->visit
(fixedSizeInWords + (i * arrayElementSizeInWords) + j))
{
2009-05-17 23:43:48 +00:00
return false;
2007-11-02 14:15:06 +00:00
}
}
}
}
}
2009-05-17 23:43:48 +00:00
return true;
2007-11-02 14:15:06 +00:00
}
object
2014-06-28 23:24:24 +00:00
findInInterfaces(Thread* t, GcClass* class_, GcByteArray* name, GcByteArray* spec,
object (*find)(Thread*, GcClass*, GcByteArray*, GcByteArray*))
{
object result = 0;
2014-06-29 05:48:17 +00:00
if (GcArray* itable = cast<GcArray>(t, class_->interfaceTable())) {
PROTECT(t, itable);
for (unsigned i = 0;
2014-06-29 05:48:17 +00:00
i < itable->length() and result == 0;
i += 2)
{
result = find
2014-06-29 05:48:17 +00:00
(t, cast<GcClass>(t, itable->body()[i]), name, spec);
}
}
return result;
}
2007-10-28 01:54:30 +00:00
void
2014-06-29 05:48:17 +00:00
finalizerTargetUnreachable(Thread* t, Heap::Visitor* v, GcFinalizer** p)
{
2014-06-29 05:48:17 +00:00
v->visit(&(*p)->target());
2014-06-29 05:48:17 +00:00
GcFinalizer* finalizer = *p;
*p = cast<GcFinalizer>(t, finalizer->next());
void (*function)(Thread*, object);
2014-06-29 05:48:17 +00:00
memcpy(&function, &finalizer->finalize(), BytesPerWord);
if (function) {
2014-06-29 05:48:17 +00:00
// TODO: use set() here?
finalizer->next() = reinterpret_cast<object>(t->m->finalizeQueue);
t->m->finalizeQueue = finalizer;
} else {
2014-06-26 01:42:16 +00:00
finalizer->setQueueTarget(t, finalizer->target());
finalizer->setQueueNext(t, roots(t)->objectsToFinalize());
roots(t)->setObjectsToFinalize(t, finalizer);
}
}
2007-07-20 01:07:30 +00:00
void
2014-06-29 05:48:17 +00:00
referenceTargetUnreachable(Thread* t, Heap::Visitor* v, GcJreference** p)
2007-07-20 01:07:30 +00:00
{
if (DebugReferences) {
fprintf(stderr, "target %p unreachable for reference %p\n",
2014-06-29 05:48:17 +00:00
(*p)->target(), *p);
}
v->visit(p);
2014-06-29 05:48:17 +00:00
(*p)->target() = 0;
2007-07-20 01:07:30 +00:00
2014-05-29 04:17:25 +00:00
if (objectClass(t, *p) == type(t, GcCleaner::Type)) {
2014-06-26 01:42:16 +00:00
*p = cast<GcJreference>(t, (*p)->vmNext());
// In openjdk, sun/misc/Cleaner extends PhantomReference
GcCleaner* cleaner = (*p)->as<GcCleaner>(t);
2014-06-26 01:42:16 +00:00
cleaner->setQueueNext(t, roots(t)->objectsToClean());
roots(t)->setObjectsToClean(t, cleaner);
} else {
2014-06-29 05:48:17 +00:00
if ((*p)->queue()
and t->m->heap->status((*p)->queue()) != Heap::Unreachable)
{
// queue is reachable - add the reference
2007-07-20 01:07:30 +00:00
2014-06-29 05:48:17 +00:00
v->visit(&(*p)->queue());
2007-07-20 01:07:30 +00:00
2014-06-29 05:48:17 +00:00
GcReferenceQueue* q = (*p)->queue();
2007-07-20 01:07:30 +00:00
2014-06-29 05:48:17 +00:00
if (q->front()) {
2014-06-26 01:42:16 +00:00
(*p)->setJNext(t, q->front());
} else {
2014-06-26 01:42:16 +00:00
(*p)->setJNext(t, *p);
}
2014-06-26 01:42:16 +00:00
q->setFront(t, *p);
2014-06-29 05:48:17 +00:00
(*p)->queue() = 0;
2007-07-20 01:07:30 +00:00
}
2014-06-29 05:48:17 +00:00
*p = cast<GcJreference>(t, (*p)->vmNext());
2007-07-20 01:07:30 +00:00
}
}
void
2014-06-29 05:48:17 +00:00
referenceUnreachable(Thread* t, Heap::Visitor* v, GcJreference** p)
{
2014-06-29 05:48:17 +00:00
GcJreference* r = t->m->heap->follow(*p);
if (DebugReferences) {
fprintf(stderr, "reference %p unreachable (target %p)\n",
2014-06-29 05:48:17 +00:00
*p, r->target());
}
2014-06-29 05:48:17 +00:00
if (r->queue()
and t->m->heap->status(r->queue()) != Heap::Unreachable)
{
// queue is reachable - add the reference
2014-06-29 05:48:17 +00:00
referenceTargetUnreachable(t, v, p);
} else {
2014-06-29 05:48:17 +00:00
*p = cast<GcJreference>(t, (*p)->vmNext());
}
}
2007-07-20 01:07:30 +00:00
void
2014-06-29 05:48:17 +00:00
referenceTargetReachable(Thread* t, Heap::Visitor* v, GcJreference** p)
2007-07-20 01:07:30 +00:00
{
if (DebugReferences) {
fprintf(stderr, "target %p reachable for reference %p\n",
2014-06-29 05:48:17 +00:00
(*p)->target(), *p);
}
v->visit(p);
2014-06-29 05:48:17 +00:00
v->visit(&(*p)->target());
2007-07-20 01:07:30 +00:00
2014-06-29 05:48:17 +00:00
if (t->m->heap->status((*p)->queue()) == Heap::Unreachable) {
(*p)->queue() = 0;
2007-07-20 01:07:30 +00:00
} else {
2014-06-29 05:48:17 +00:00
v->visit(&(*p)->queue());
2007-07-20 01:07:30 +00:00
}
}
bool
isFinalizable(Thread* t, object o)
{
return t->m->heap->status(o) == Heap::Unreachable
2014-06-29 05:48:17 +00:00
and (t->m->heap->follow(objectClass(t, o))->vmFlags()
& HasFinalizerFlag);
}
void
2014-06-29 05:48:17 +00:00
clearTargetIfFinalizable(Thread* t, GcJreference* r)
{
if (isFinalizable
2014-06-29 05:48:17 +00:00
(t, reinterpret_cast<object>(t->m->heap->follow(r->target()))))
{
2014-06-29 05:48:17 +00:00
r->target() = 0;
}
}
void
postVisit(Thread* t, Heap::Visitor* v)
{
Machine* m = t->m;
2007-10-28 01:54:30 +00:00
bool major = m->heap->collectionType() == Heap::MajorCollection;
assertT(t, m->finalizeQueue == 0);
m->heap->postVisit();
2014-06-29 05:48:17 +00:00
for (GcJreference* p = m->weakReferences; p;) {
GcJreference* r = m->heap->follow(p);
p = cast<GcJreference>(t, r->vmNext());
clearTargetIfFinalizable(t, r);
}
if (major) {
2014-06-29 05:48:17 +00:00
for (GcJreference* p = m->tenuredWeakReferences; p;) {
GcJreference* r = m->heap->follow(p);
p = cast<GcJreference>(t, r->vmNext());
clearTargetIfFinalizable(t, r);
}
}
for (Reference* r = m->jniReferences; r; r = r->next) {
if (r->weak and isFinalizable
(t, static_cast<object>(t->m->heap->follow(r->target))))
{
r->target = 0;
}
}
2014-06-29 05:48:17 +00:00
GcFinalizer* firstNewTenuredFinalizer = 0;
GcFinalizer* lastNewTenuredFinalizer = 0;
{ object unreachable = 0;
2014-05-29 04:17:25 +00:00
for (GcFinalizer** p = &(m->finalizers); *p;) {
v->visit(p);
2014-05-29 04:17:25 +00:00
if (m->heap->status((*p)->target()) == Heap::Unreachable) {
GcFinalizer* finalizer = *p;
*p = cast<GcFinalizer>(t, finalizer->next());
2014-05-29 04:17:25 +00:00
finalizer->next() = unreachable;
unreachable = reinterpret_cast<object>(finalizer);
} else {
2014-05-29 04:17:25 +00:00
p = reinterpret_cast<GcFinalizer**>(&(*p)->next());
}
}
2014-05-29 04:17:25 +00:00
for (GcFinalizer** p = &(m->finalizers); *p;) {
// target is reachable
2014-05-29 04:17:25 +00:00
v->visit(&(*p)->target());
if (m->heap->status(*p) == Heap::Tenured) {
// the finalizer is tenured, so we remove it from
// m->finalizers and later add it to m->tenuredFinalizers
if (lastNewTenuredFinalizer == 0) {
2014-06-29 05:48:17 +00:00
lastNewTenuredFinalizer = *p;
}
2014-05-29 04:17:25 +00:00
GcFinalizer* finalizer = *p;
*p = cast<GcFinalizer>(t, finalizer->next());
2014-06-29 05:48:17 +00:00
finalizer->next() = reinterpret_cast<object>(firstNewTenuredFinalizer);
firstNewTenuredFinalizer = finalizer;
} else {
2014-05-29 04:17:25 +00:00
p = reinterpret_cast<GcFinalizer**>(&(*p)->next());
}
}
for (object* p = &unreachable; *p;) {
// target is unreachable - queue it up for finalization
2014-06-29 05:48:17 +00:00
finalizerTargetUnreachable(t, v, reinterpret_cast<GcFinalizer**>(p));
}
}
2014-06-29 05:48:17 +00:00
GcJreference* firstNewTenuredWeakReference = 0;
GcJreference* lastNewTenuredWeakReference = 0;
2014-06-29 05:48:17 +00:00
for (GcJreference** p = &(m->weakReferences); *p;) {
if (m->heap->status(*p) == Heap::Unreachable) {
// reference is unreachable
2007-10-28 01:54:30 +00:00
referenceUnreachable(t, v, p);
} else if (m->heap->status
2014-06-29 05:48:17 +00:00
(m->heap->follow(*p)->target())
2007-08-14 00:37:00 +00:00
== Heap::Unreachable)
{
// target is unreachable
2007-10-28 01:54:30 +00:00
referenceTargetUnreachable(t, v, p);
} else {
// both reference and target are reachable
2007-10-28 01:54:30 +00:00
referenceTargetReachable(t, v, p);
if (m->heap->status(*p) == Heap::Tenured) {
// the reference is tenured, so we remove it from
// m->weakReferences and later add it to
// m->tenuredWeakReferences
if (lastNewTenuredWeakReference == 0) {
lastNewTenuredWeakReference = *p;
}
2014-06-29 05:48:17 +00:00
GcJreference* reference = (*p);
*p = cast<GcJreference>(t, reference->vmNext());
reference->vmNext() = reinterpret_cast<object>(firstNewTenuredWeakReference);
firstNewTenuredWeakReference = reference;
} else {
2014-06-29 05:48:17 +00:00
p = reinterpret_cast<GcJreference**>(&(*p)->vmNext());
}
}
}
2007-10-28 01:54:30 +00:00
if (major) {
{ object unreachable = 0;
2014-05-29 04:17:25 +00:00
for (GcFinalizer** p = &(m->tenuredFinalizers); *p;) {
v->visit(p);
2014-05-29 04:17:25 +00:00
if (m->heap->status((*p)->target()) == Heap::Unreachable) {
GcFinalizer* finalizer = *p;
*p = cast<GcFinalizer>(t, finalizer->next());
2014-05-29 04:17:25 +00:00
finalizer->next() = unreachable;
unreachable = reinterpret_cast<object>(finalizer);
} else {
2014-05-29 04:17:25 +00:00
p = reinterpret_cast<GcFinalizer**>(&(*p)->next());
}
}
2014-05-29 04:17:25 +00:00
for (GcFinalizer** p = &(m->tenuredFinalizers); *p;) {
// target is reachable
2014-05-29 04:17:25 +00:00
v->visit(&(*p)->target());
p = reinterpret_cast<GcFinalizer**>(&(*p)->next());
}
for (object* p = &unreachable; *p;) {
// target is unreachable - queue it up for finalization
2014-06-29 05:48:17 +00:00
finalizerTargetUnreachable(t, v, reinterpret_cast<GcFinalizer**>(p));
}
}
2014-06-29 05:48:17 +00:00
for (GcJreference** p = &(m->tenuredWeakReferences); *p;) {
if (m->heap->status(*p) == Heap::Unreachable) {
// reference is unreachable
2014-06-29 05:48:17 +00:00
referenceUnreachable(t, v, reinterpret_cast<GcJreference**>(p));
} else if (m->heap->status
2014-06-29 05:48:17 +00:00
(m->heap->follow(*p)->target())
== Heap::Unreachable)
{
// target is unreachable
2014-06-29 05:48:17 +00:00
referenceTargetUnreachable(t, v, reinterpret_cast<GcJreference**>(p));
} else {
// both reference and target are reachable
2014-06-29 05:48:17 +00:00
referenceTargetReachable(t, v, reinterpret_cast<GcJreference**>(p));
p = reinterpret_cast<GcJreference**>(&(*p)->vmNext());
}
}
}
if (lastNewTenuredFinalizer) {
2014-06-29 05:48:17 +00:00
lastNewTenuredFinalizer->next() = reinterpret_cast<object>(m->tenuredFinalizers);
m->tenuredFinalizers = firstNewTenuredFinalizer;
}
if (lastNewTenuredWeakReference) {
2014-06-29 05:48:17 +00:00
lastNewTenuredWeakReference->vmNext()
2014-06-28 23:24:24 +00:00
= reinterpret_cast<object>(m->tenuredWeakReferences);
2014-06-29 05:48:17 +00:00
m->tenuredWeakReferences = firstNewTenuredWeakReference;
}
for (Reference* r = m->jniReferences; r; r = r->next) {
if (r->weak) {
if (m->heap->status(r->target) == Heap::Unreachable) {
r->target = 0;
} else {
v->visit(&(r->target));
}
}
}
}
2007-07-07 23:47:35 +00:00
void
postCollect(Thread* t)
{
#ifdef VM_STRESS
t->m->heap->free(t->defaultHeap, ThreadHeapSizeInBytes);
2007-09-13 03:15:16 +00:00
t->defaultHeap = static_cast<uintptr_t*>
(t->m->heap->allocate(ThreadHeapSizeInBytes));
memset(t->defaultHeap, 0, ThreadHeapSizeInBytes);
#endif
if (t->heap == t->defaultHeap) {
memset(t->defaultHeap, 0, t->heapIndex * BytesPerWord);
} else {
memset(t->defaultHeap, 0, ThreadHeapSizeInBytes);
t->heap = t->defaultHeap;
}
t->heapOffset = 0;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (t->m->heap->limitExceeded()) {
// if we're out of memory, pretend the thread-local heap is
// already full so we don't make things worse:
t->heapIndex = ThreadHeapSizeInWords;
} else {
t->heapIndex = 0;
}
2007-07-07 23:47:35 +00:00
if (t->flags & Thread::UseBackupHeapFlag) {
memset(t->backupHeap, 0, ThreadBackupHeapSizeInBytes);
t->flags &= ~Thread::UseBackupHeapFlag;
2008-04-09 19:08:13 +00:00
t->backupHeapIndex = 0;
}
2007-07-07 23:47:35 +00:00
for (Thread* c = t->child; c; c = c->peer) {
postCollect(c);
}
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
uint64_t
invoke(Thread* t, uintptr_t* arguments)
{
2014-05-29 04:17:25 +00:00
GcMethod* m = cast<GcMethod>(t, *reinterpret_cast<object*>(arguments[0]));
object o = *reinterpret_cast<object*>(arguments[1]);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
t->m->processor->invoke(t, m, o);
return 1;
}
void
finalizeObject(Thread* t, object o, const char* name)
{
for (GcClass* c = objectClass(t, o); c; c = c->super()) {
2014-06-29 05:48:17 +00:00
GcArray* mtable = cast<GcArray>(t, c->methodTable());
for (unsigned i = 0; i < mtable->length(); ++i) {
GcMethod* m = cast<GcMethod>(t, mtable->body()[i]);
if (vm::strcmp(reinterpret_cast<const int8_t*>(name),
2014-06-29 05:48:17 +00:00
m->name()->body().begin()) == 0
and vm::strcmp(reinterpret_cast<const int8_t*>("()V"),
2014-06-29 05:48:17 +00:00
m->spec()->body().begin()) == 0)
{
PROTECT(t, m);
PROTECT(t, o);
uintptr_t arguments[] = { reinterpret_cast<uintptr_t>(&m),
reinterpret_cast<uintptr_t>(&o) };
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
run(t, invoke, arguments);
t->exception = 0;
return;
2014-06-29 05:48:17 +00:00
}
}
}
abort(t);
}
2009-08-14 14:52:31 +00:00
unsigned
readByte(AbstractStream& s, unsigned* value)
2009-08-14 14:52:31 +00:00
{
if (*value == NoByte) {
return s.read1();
} else {
unsigned r = *value;
*value = NoByte;
return r;
}
}
2014-06-29 05:48:17 +00:00
GcCharArray*
parseUtf8NonAscii(Thread* t, AbstractStream& s, GcByteArray* bytesSoFar,
unsigned byteCount, unsigned sourceIndex, unsigned byteA,
unsigned byteB)
{
PROTECT(t, bytesSoFar);
2014-06-29 05:48:17 +00:00
unsigned length = bytesSoFar->length() - 1;
GcCharArray* value = makeCharArray(t, length + 1);
unsigned vi = 0;
for (; vi < byteCount; ++vi) {
2014-06-29 05:48:17 +00:00
value->body()[vi] = bytesSoFar->body()[vi];
}
2009-08-14 14:52:31 +00:00
for (unsigned si = sourceIndex; si < length; ++si) {
unsigned a = readByte(s, &byteA);
if (a & 0x80) {
if (a & 0x20) {
2014-06-29 05:48:17 +00:00
// 3 bytes
si += 2;
assertT(t, si < length);
2009-08-14 14:52:31 +00:00
unsigned b = readByte(s, &byteB);
2014-06-29 05:48:17 +00:00
unsigned c = s.read1();
value->body()[vi++] = ((a & 0xf) << 12) | ((b & 0x3f) << 6)
| (c & 0x3f);
} else {
2014-06-29 05:48:17 +00:00
// 2 bytes
++si;
assertT(t, si < length);
2009-08-14 14:52:31 +00:00
unsigned b = readByte(s, &byteB);
2014-06-29 05:48:17 +00:00
if (a == 0xC0 and b == 0x80) {
value->body()[vi++] = 0;
} else {
value->body()[vi++] = ((a & 0x1f) << 6) | (b & 0x3f);
}
}
} else {
2014-06-29 05:48:17 +00:00
value->body()[vi++] = a;
}
}
if (vi < length) {
PROTECT(t, value);
2014-06-29 05:48:17 +00:00
GcCharArray* v = makeCharArray(t, vi + 1);
memcpy(v->body().begin(), value->body().begin(), vi * 2);
value = v;
}
2014-06-29 05:48:17 +00:00
return value;
}
2014-06-29 05:48:17 +00:00
object parseUtf8(Thread* t, AbstractStream& s, unsigned length)
{
2014-06-29 05:48:17 +00:00
GcByteArray* value = makeByteArray(t, length + 1);
unsigned vi = 0;
for (unsigned si = 0; si < length; ++si) {
unsigned a = s.read1();
if (a & 0x80) {
if (a & 0x20) {
2014-06-29 05:48:17 +00:00
// 3 bytes
return reinterpret_cast<object>(parseUtf8NonAscii(t, s, value, vi, si, a, NoByte));
} else {
2014-06-29 05:48:17 +00:00
// 2 bytes
unsigned b = s.read1();
2014-06-29 05:48:17 +00:00
if (a == 0xC0 and b == 0x80) {
++si;
assertT(t, si < length);
2014-06-29 05:48:17 +00:00
value->body()[vi++] = 0;
} else {
return reinterpret_cast<object>(parseUtf8NonAscii(t, s, value, vi, si, a, b));
}
}
} else {
2014-06-29 05:48:17 +00:00
value->body()[vi++] = a;
}
}
if (vi < length) {
PROTECT(t, value);
2014-06-29 05:48:17 +00:00
GcByteArray* v = makeByteArray(t, vi + 1);
memcpy(v->body().begin(), value->body().begin(), vi);
value = v;
}
2014-06-29 05:48:17 +00:00
return reinterpret_cast<object>(value);
}
2014-06-29 05:48:17 +00:00
GcByteArray*
makeByteArray(Thread* t, Stream& s, unsigned length)
{
2014-06-29 05:48:17 +00:00
GcByteArray* value = makeByteArray(t, length + 1);
s.read(reinterpret_cast<uint8_t*>(value->body().begin()), length);
return value;
}
void
removeByteArray(Thread* t, object o)
{
2014-05-29 04:17:25 +00:00
hashMapRemove(t,
2014-06-30 01:44:41 +00:00
roots(t)->byteArrayMap(),
2014-05-29 04:17:25 +00:00
o,
byteArrayHash,
objectEqual);
}
2014-06-29 05:48:17 +00:00
GcByteArray*
internByteArray(Thread* t, GcByteArray* array)
{
PROTECT(t, array);
ACQUIRE(t, t->m->referenceLock);
2014-06-28 20:41:27 +00:00
GcTriple* n = hashMapFindNode
2014-06-30 01:44:41 +00:00
(t, roots(t)->byteArrayMap(), reinterpret_cast<object>(array), byteArrayHash, byteArrayEqual);
if (n) {
2014-06-29 05:48:17 +00:00
return cast<GcByteArray>(t, cast<GcJreference>(t, n->first())->target());
} else {
2014-06-30 01:44:41 +00:00
hashMapInsert(t, roots(t)->byteArrayMap(), reinterpret_cast<object>(array), 0, byteArrayHash);
2014-06-29 05:48:17 +00:00
addFinalizer(t, reinterpret_cast<object>(array), removeByteArray);
return array;
}
}
unsigned
2014-05-29 04:17:25 +00:00
parsePoolEntry(Thread* t, Stream& s, uint32_t* index, GcSingleton* pool, unsigned i)
{
PROTECT(t, pool);
s.setPosition(index[i]);
switch (s.read1()) {
case CONSTANT_Integer:
case CONSTANT_Float: {
2012-05-22 19:53:32 +00:00
uint32_t v = s.read4();
singletonValue(t, pool, i) = v;
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = int/float 0x%x\n", i, v);
}
} return 1;
case CONSTANT_Long:
case CONSTANT_Double: {
uint64_t v = s.read8();
memcpy(&singletonValue(t, pool, i), &v, 8);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = long/double <todo>\n", i);
}
} return 2;
case CONSTANT_Utf8: {
if (singletonObject(t, pool, i) == 0) {
2014-06-29 05:48:17 +00:00
GcByteArray* value = internByteArray(t, makeByteArray(t, s, s.read2()));
2014-06-26 01:42:16 +00:00
pool->setBodyElement(t, i, reinterpret_cast<uintptr_t>(value));
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
2014-06-29 05:48:17 +00:00
fprintf(stderr, " consts[%d] = utf8 %s\n", i, value->body().begin());
2012-05-22 19:53:32 +00:00
}
}
} return 1;
case CONSTANT_Class: {
if (singletonObject(t, pool, i) == 0) {
unsigned si = s.read2() - 1;
parsePoolEntry(t, s, index, pool, si);
2014-06-26 01:42:16 +00:00
GcReference* value = makeReference(t, 0, 0, cast<GcByteArray>(t, singletonObject(t, pool, si)), 0);
pool->setBodyElement(t, i, reinterpret_cast<uintptr_t>(value));
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = class <todo>\n", i);
}
}
} return 1;
case CONSTANT_String: {
if (singletonObject(t, pool, i) == 0) {
unsigned si = s.read2() - 1;
parsePoolEntry(t, s, index, pool, si);
2014-06-28 23:24:24 +00:00
object value = parseUtf8(t, cast<GcByteArray>(t, singletonObject(t, pool, si)));
value = reinterpret_cast<object>(t->m->classpath->makeString
(t, value, 0, fieldAtOffset<uintptr_t>(value, BytesPerWord) - 1));
value = intern(t, value);
2014-06-26 01:42:16 +00:00
pool->setBodyElement(t, i, reinterpret_cast<uintptr_t>(value));
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " consts[%d] = string <todo>\n", i);
}
}
} return 1;
case CONSTANT_NameAndType: {
if (singletonObject(t, pool, i) == 0) {
unsigned ni = s.read2() - 1;
unsigned ti = s.read2() - 1;
parsePoolEntry(t, s, index, pool, ni);
parsePoolEntry(t, s, index, pool, ti);
2014-06-29 05:48:17 +00:00
GcByteArray* name = cast<GcByteArray>(t, singletonObject(t, pool, ni));
GcByteArray* type = cast<GcByteArray>(t, singletonObject(t, pool, ti));
GcPair* value = makePair(t, reinterpret_cast<object>(name), reinterpret_cast<object>(type));
2014-06-26 01:42:16 +00:00
pool->setBodyElement(t, i, reinterpret_cast<uintptr_t>(value));
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
2014-06-29 05:48:17 +00:00
fprintf(stderr, " consts[%d] = nameAndType %s%s\n", i, name->body().begin(), type->body().begin());
2012-05-22 19:53:32 +00:00
}
}
} return 1;
case CONSTANT_Fieldref:
case CONSTANT_Methodref:
case CONSTANT_InterfaceMethodref: {
if (singletonObject(t, pool, i) == 0) {
unsigned ci = s.read2() - 1;
unsigned nti = s.read2() - 1;
parsePoolEntry(t, s, index, pool, ci);
parsePoolEntry(t, s, index, pool, nti);
2014-06-29 05:48:17 +00:00
GcByteArray* className = cast<GcReference>(t, singletonObject(t, pool, ci))->name();
GcPair* nameAndType = cast<GcPair>(t, singletonObject(t, pool, nti));
2014-06-29 05:48:17 +00:00
object value = reinterpret_cast<object>(
makeReference(t,
0,
className,
cast<GcByteArray>(t, nameAndType->first()),
cast<GcByteArray>(t, nameAndType->second())));
2014-06-26 01:42:16 +00:00
pool->setBodyElement(t, i, reinterpret_cast<uintptr_t>(value));
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
2014-06-29 05:48:17 +00:00
fprintf(stderr,
" consts[%d] = method %s.%s%s\n",
i,
className->body().begin(),
cast<GcByteArray>(t, nameAndType->first())->body().begin(),
cast<GcByteArray>(t, nameAndType->second())->body().begin());
2012-05-22 19:53:32 +00:00
}
}
} return 1;
case CONSTANT_MethodHandle:
if (singletonObject(t, pool, i) == 0) {
unsigned kind = s.read1();
unsigned ri = s.read2() - 1;
parsePoolEntry(t, s, index, pool, ri);
2014-06-29 05:48:17 +00:00
GcReference* value = cast<GcReference>(t, singletonObject(t, pool, ri));
if (DebugClassReader) {
fprintf(stderr, " consts[%d] = method handle %d %s.%s%s\n", i, kind,
2014-06-29 05:48:17 +00:00
value->class_()->body().begin(),
value->name()->body().begin(),
value->spec()->body().begin());
}
2014-06-29 05:48:17 +00:00
value = makeReference(
t, kind, value->class_(), value->name(), value->spec());
2014-06-26 01:42:16 +00:00
pool->setBodyElement(t, i, reinterpret_cast<uintptr_t>(value));
} return 1;
case CONSTANT_MethodType:
if (singletonObject(t, pool, i) == 0) {
unsigned ni = s.read2() - 1;
parsePoolEntry(t, s, index, pool, ni);
2014-06-26 01:42:16 +00:00
pool->setBodyElement(t, i, reinterpret_cast<uintptr_t>(singletonObject(t, pool, ni)));
} return 1;
case CONSTANT_InvokeDynamic:
if (singletonObject(t, pool, i) == 0) {
unsigned bootstrap = s.read2();
unsigned nti = s.read2() - 1;
parsePoolEntry(t, s, index, pool, nti);
2014-06-29 05:48:17 +00:00
GcPair* nameAndType = cast<GcPair>(t, singletonObject(t, pool, nti));
const char* specString = reinterpret_cast<const char*>
2014-06-29 05:48:17 +00:00
(cast<GcByteArray>(t, nameAndType->second())->body().begin());
unsigned parameterCount;
unsigned parameterFootprint;
unsigned returnCode;
scanMethodSpec
(t, specString, true, &parameterCount, &parameterFootprint,
&returnCode);
2014-05-29 04:17:25 +00:00
GcMethod* template_ = makeMethod
(t, 0, returnCode, parameterCount, parameterFootprint, 0, 0, 0, 0,
2014-06-29 05:48:17 +00:00
cast<GcByteArray>(t, nameAndType->first()), cast<GcByteArray>(t, nameAndType->second()), 0, 0, 0);
2014-05-29 04:17:25 +00:00
object value = reinterpret_cast
<object>(makeInvocation(t,
bootstrap,
-1,
0,
reinterpret_cast<object>(pool),
reinterpret_cast<object>(template_),
0));
2014-06-26 01:42:16 +00:00
pool->setBodyElement(t, i, reinterpret_cast<uintptr_t>(value));
} return 1;
default: abort(t);
}
}
2014-05-29 04:17:25 +00:00
GcSingleton*
parsePool(Thread* t, Stream& s)
{
unsigned count = s.read2() - 1;
2014-05-29 04:17:25 +00:00
GcSingleton* pool = makeSingletonOfSize(t, count + poolMaskSize(count));
PROTECT(t, pool);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " const pool entries %d\n", count);
}
if (count) {
2008-04-13 18:15:04 +00:00
uint32_t* index = static_cast<uint32_t*>(t->m->heap->allocate(count * 4));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RESOURCE2(t, uint32_t*, index, unsigned, count,
t->m->heap->free(index, count * 4));
for (unsigned i = 0; i < count; ++i) {
index[i] = s.position();
switch (s.read1()) {
case CONSTANT_Class:
case CONSTANT_String:
singletonMarkObject(t, pool, i);
s.skip(2);
break;
case CONSTANT_Integer:
s.skip(4);
break;
case CONSTANT_Float:
singletonSetBit(t, pool, count, i);
s.skip(4);
break;
case CONSTANT_NameAndType:
case CONSTANT_Fieldref:
case CONSTANT_Methodref:
case CONSTANT_InterfaceMethodref:
singletonMarkObject(t, pool, i);
s.skip(4);
break;
case CONSTANT_Long:
s.skip(8);
++ i;
break;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
case CONSTANT_Double:
singletonSetBit(t, pool, count, i);
singletonSetBit(t, pool, count, i + 1);
s.skip(8);
++ i;
break;
case CONSTANT_Utf8:
singletonMarkObject(t, pool, i);
s.skip(s.read2());
break;
case CONSTANT_MethodHandle:
singletonMarkObject(t, pool, i);
s.skip(3);
break;
case CONSTANT_MethodType:
singletonMarkObject(t, pool, i);
s.skip(2);
break;
case CONSTANT_InvokeDynamic:
singletonMarkObject(t, pool, i);
s.skip(4);
break;
default: abort(t);
}
}
unsigned end = s.position();
for (unsigned i = 0; i < count;) {
i += parsePoolEntry(t, s, index, pool, i);
}
s.setPosition(end);
}
return pool;
}
void
2014-05-29 04:17:25 +00:00
addInterfaces(Thread* t, GcClass* class_, GcHashMap* map)
{
2014-06-29 05:48:17 +00:00
GcArray* table = cast<GcArray>(t, class_->interfaceTable());
if (table) {
unsigned increment = 2;
2014-05-29 04:17:25 +00:00
if (class_->flags() & ACC_INTERFACE) {
increment = 1;
}
PROTECT(t, map);
PROTECT(t, table);
2014-06-29 05:48:17 +00:00
for (unsigned i = 0; i < table->length(); i += increment) {
GcClass* interface = cast<GcClass>(t, table->body()[i]);
GcByteArray* name = interface->name();
hashMapInsertMaybe(t,
map,
reinterpret_cast<object>(name),
reinterpret_cast<object>(interface),
byteArrayHash,
byteArrayEqual);
}
}
}
2014-05-29 04:17:25 +00:00
GcClassAddendum*
getClassAddendum(Thread* t, GcClass* class_, GcSingleton* pool)
{
GcClassAddendum* addendum = class_->addendum();
if (addendum == 0) {
PROTECT(t, class_);
2014-06-28 18:28:44 +00:00
addendum = makeClassAddendum(t, pool, 0, 0, 0, 0, -1, 0, 0);
2014-06-26 02:17:27 +00:00
setField(t,
2014-06-26 01:42:16 +00:00
reinterpret_cast<object>(class_),
ClassAddendum,
reinterpret_cast<object>(addendum));
}
return addendum;
}
void
2014-05-29 04:17:25 +00:00
parseInterfaceTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool,
Gc::Type throwType)
{
PROTECT(t, class_);
PROTECT(t, pool);
2014-05-29 04:17:25 +00:00
GcHashMap* map = makeHashMap(t, 0, 0);
PROTECT(t, map);
2014-05-29 04:17:25 +00:00
if (class_->super()) {
addInterfaces(t, class_->super(), map);
}
2007-11-27 22:23:00 +00:00
unsigned count = s.read2();
2014-06-29 05:48:17 +00:00
GcArray* table = 0;
PROTECT(t, table);
if (count) {
2014-06-29 05:48:17 +00:00
table = makeArray(t, count);
2014-06-29 05:48:17 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
2014-06-26 01:42:16 +00:00
addendum->setInterfaceTable(t, reinterpret_cast<object>(table));
}
for (unsigned i = 0; i < count; ++i) {
2014-06-29 05:48:17 +00:00
GcByteArray* name = cast<GcReference>(t, singletonObject(t, pool, s.read2() - 1))->name();
PROTECT(t, name);
2014-05-29 04:17:25 +00:00
GcClass* interface = resolveClass
2014-06-28 21:11:31 +00:00
(t, class_->loader(), name, true, throwType);
PROTECT(t, interface);
2014-06-26 01:42:16 +00:00
table->setBodyElement(t, i, reinterpret_cast<object>(interface));
2014-06-28 23:24:24 +00:00
hashMapInsertMaybe(t, map, reinterpret_cast<object>(name), reinterpret_cast<object>(interface), byteArrayHash, byteArrayEqual);
addInterfaces(t, interface, map);
}
2014-06-29 05:48:17 +00:00
GcArray* interfaceTable = 0;
2014-05-29 04:17:25 +00:00
if (map->size()) {
unsigned length = map->size();
if ((class_->flags() & ACC_INTERFACE) == 0) {
length *= 2;
}
2014-06-29 05:48:17 +00:00
interfaceTable = makeArray(t, length);
PROTECT(t, interfaceTable);
unsigned i = 0;
for (HashMapIterator it(t, map); it.hasMore();) {
2014-06-28 20:41:27 +00:00
GcClass* interface = cast<GcClass>(t, it.next()->second());
2014-06-26 01:42:16 +00:00
interfaceTable->setBodyElement(t, i, reinterpret_cast<object>(interface));
++ i;
2014-05-29 04:17:25 +00:00
if ((class_->flags() & ACC_INTERFACE) == 0) {
2014-06-29 05:48:17 +00:00
if (GcArray* vt = cast<GcArray>(t, interface->virtualTable())) {
PROTECT(t, vt);
2007-08-14 00:37:00 +00:00
// we'll fill in this table in parseMethodTable():
2014-06-29 05:48:17 +00:00
GcArray* vtable = makeArray(t, vt->length());
2014-06-26 01:42:16 +00:00
interfaceTable->setBodyElement(t, i, reinterpret_cast<object>(vtable));
2007-08-14 00:37:00 +00:00
}
++i;
}
}
}
2014-06-26 01:42:16 +00:00
class_->setInterfaceTable(t, reinterpret_cast<object>(interfaceTable));
}
void
2014-05-29 04:17:25 +00:00
parseFieldTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
{
PROTECT(t, class_);
PROTECT(t, pool);
unsigned memberOffset = BytesPerWord;
2014-05-29 04:17:25 +00:00
if (class_->super()) {
memberOffset = class_->super()->fixedSize();
}
unsigned count = s.read2();
if (count) {
unsigned staticOffset = BytesPerWord * 3;
2007-11-02 21:08:14 +00:00
unsigned staticCount = 0;
2014-06-29 05:48:17 +00:00
GcArray* fieldTable = makeArray(t, count);
PROTECT(t, fieldTable);
2014-06-29 05:48:17 +00:00
GcIntArray* staticValueTable = makeIntArray(t, count);
PROTECT(t, staticValueTable);
2014-06-29 05:48:17 +00:00
GcFieldAddendum* addendum = 0;
PROTECT(t, addendum);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
THREAD_RUNTIME_ARRAY(t, uint8_t, staticTypes, count);
2007-11-02 21:08:14 +00:00
for (unsigned i = 0; i < count; ++i) {
unsigned flags = s.read2();
unsigned name = s.read2();
unsigned spec = s.read2();
2007-11-06 15:29:05 +00:00
unsigned value = 0;
addendum = 0;
unsigned code = fieldCode
2014-06-29 05:48:17 +00:00
(t, cast<GcByteArray>(t, singletonObject(t, pool, spec - 1))->body()[0]);
unsigned attributeCount = s.read2();
for (unsigned j = 0; j < attributeCount; ++j) {
2014-06-29 05:48:17 +00:00
GcByteArray* name = cast<GcByteArray>(t, singletonObject(t, pool, s.read2() - 1));
unsigned length = s.read4();
if (vm::strcmp(reinterpret_cast<const int8_t*>("ConstantValue"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
2007-11-06 15:29:05 +00:00
value = s.read2();
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("Signature"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
if (addendum == 0) {
2014-06-29 05:48:17 +00:00
addendum = makeFieldAddendum(t, pool, 0, 0);
}
2014-06-29 05:48:17 +00:00
2014-06-26 01:42:16 +00:00
addendum->setSignature(t, singletonObject(t, pool, s.read2() - 1));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("RuntimeVisibleAnnotations"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
if (addendum == 0) {
2014-06-29 05:48:17 +00:00
addendum = makeFieldAddendum(t, pool, 0, 0);
}
2014-06-29 05:48:17 +00:00
GcByteArray* body = makeByteArray(t, length);
s.read(reinterpret_cast<uint8_t*>(body->body().begin()),
length);
2014-06-26 01:42:16 +00:00
addendum->setAnnotationTable(t, reinterpret_cast<object>(body));
} else {
s.skip(length);
}
}
2014-05-29 04:17:25 +00:00
GcField* field = makeField
(t,
0, // vm flags
2007-11-02 21:08:14 +00:00
code,
flags,
0, // offset
0, // native ID
cast<GcByteArray>(t, singletonObject(t, pool, name - 1)),
cast<GcByteArray>(t, singletonObject(t, pool, spec - 1)),
2014-06-29 05:48:17 +00:00
addendum,
class_);
unsigned size = fieldSize(t, code);
if (flags & ACC_STATIC) {
staticOffset = pad(staticOffset, size);
2007-11-02 21:08:14 +00:00
2014-05-29 04:17:25 +00:00
field->offset() = staticOffset;
2007-11-02 21:08:14 +00:00
staticOffset += size;
2014-06-29 05:48:17 +00:00
staticValueTable->body()[staticCount] = value;
2007-11-02 21:08:14 +00:00
RUNTIME_ARRAY_BODY(staticTypes)[staticCount++] = code;
} else {
2009-03-03 03:18:15 +00:00
if (flags & ACC_FINAL) {
2014-05-29 04:17:25 +00:00
class_->vmFlags() |= HasFinalMemberFlag;
2009-03-03 03:18:15 +00:00
}
memberOffset = pad(memberOffset, size);
2014-05-29 04:17:25 +00:00
field->offset() = memberOffset;
memberOffset += size;
}
2014-06-26 01:42:16 +00:00
fieldTable->setBodyElement(t, i, reinterpret_cast<object>(field));
}
2014-06-26 01:42:16 +00:00
class_->setFieldTable(t, reinterpret_cast<object>(fieldTable));
2007-11-02 21:08:14 +00:00
if (staticCount) {
2013-02-11 01:06:15 +00:00
unsigned footprint = ceilingDivide(staticOffset - (BytesPerWord * 2),
2007-11-02 21:08:14 +00:00
BytesPerWord);
2014-05-29 04:17:25 +00:00
GcSingleton* staticTable = makeSingletonOfSize(t, footprint);
2007-11-02 21:08:14 +00:00
uint8_t* body = reinterpret_cast<uint8_t*>
2014-06-29 05:48:17 +00:00
(staticTable->body().begin());
2007-11-02 21:08:14 +00:00
memcpy(body, &class_, BytesPerWord);
singletonMarkObject(t, staticTable, 0);
for (unsigned i = 0, offset = BytesPerWord; i < staticCount; ++i) {
unsigned size = fieldSize(t, RUNTIME_ARRAY_BODY(staticTypes)[i]);
offset = pad(offset, size);
2007-11-02 21:08:14 +00:00
2014-06-29 05:48:17 +00:00
unsigned value = staticValueTable->body()[i];
2007-11-02 21:08:14 +00:00
if (value) {
switch (RUNTIME_ARRAY_BODY(staticTypes)[i]) {
2007-11-02 21:08:14 +00:00
case ByteField:
case BooleanField:
2007-11-06 15:29:05 +00:00
body[offset] = singletonValue(t, pool, value - 1);
2007-11-02 21:08:14 +00:00
break;
case CharField:
case ShortField:
2007-11-06 15:29:05 +00:00
*reinterpret_cast<uint16_t*>(body + offset)
= singletonValue(t, pool, value - 1);
2007-11-02 21:08:14 +00:00
break;
case IntField:
case FloatField:
2007-11-06 15:29:05 +00:00
*reinterpret_cast<uint32_t*>(body + offset)
= singletonValue(t, pool, value - 1);
2007-11-02 21:08:14 +00:00
break;
case LongField:
case DoubleField:
2007-11-06 15:29:05 +00:00
memcpy(body + offset, &singletonValue(t, pool, value - 1), 8);
2007-11-02 21:08:14 +00:00
break;
case ObjectField:
2007-11-06 15:29:05 +00:00
memcpy(body + offset,
&singletonObject(t, pool, value - 1),
BytesPerWord);
2007-11-02 21:08:14 +00:00
break;
default: abort(t);
}
}
if (RUNTIME_ARRAY_BODY(staticTypes)[i] == ObjectField) {
singletonMarkObject(t, staticTable, offset / BytesPerWord);
2007-11-02 21:08:14 +00:00
}
offset += size;
}
2014-06-26 01:42:16 +00:00
class_->setStaticTable(t, staticTable);
}
}
class_->fixedSize() = memberOffset;
2014-05-29 04:17:25 +00:00
if (class_->super()
and memberOffset == class_->super()->fixedSize())
{
2014-06-26 01:42:16 +00:00
class_->setObjectMask(t,
2014-06-29 05:48:17 +00:00
class_->super()->objectMask());
} else {
2014-06-29 05:48:17 +00:00
GcIntArray* mask = makeIntArray
(t, ceilingDivide(class_->fixedSize(), 32 * BytesPerWord));
mask->body()[0] = 1;
2014-06-29 05:48:17 +00:00
GcIntArray* superMask = 0;
2014-05-29 04:17:25 +00:00
if (class_->super()) {
2014-06-29 05:48:17 +00:00
superMask = class_->super()->objectMask();
2007-08-14 00:37:00 +00:00
if (superMask) {
2014-06-29 05:48:17 +00:00
memcpy(mask->body().begin(),
superMask->body().begin(),
ceilingDivide(class_->super()->fixedSize(),
32 * BytesPerWord)
2007-08-14 00:37:00 +00:00
* 4);
}
}
bool sawReferenceField = false;
2014-06-29 05:48:17 +00:00
GcArray* fieldTable = cast<GcArray>(t, class_->fieldTable());
2007-08-14 00:37:00 +00:00
if (fieldTable) {
2014-06-29 05:48:17 +00:00
for (int i = fieldTable->length() - 1; i >= 0; --i) {
GcField* field = cast<GcField>(t, fieldTable->body()[i]);
if ((field->flags() & ACC_STATIC) == 0
and field->code() == ObjectField)
2007-08-14 00:37:00 +00:00
{
2014-06-29 05:48:17 +00:00
unsigned index = field->offset() / BytesPerWord;
mask->body()[index / 32] |= 1 << (index % 32);
2007-08-14 00:37:00 +00:00
sawReferenceField = true;
}
}
}
2007-08-14 00:37:00 +00:00
if (superMask or sawReferenceField) {
2014-06-26 01:42:16 +00:00
class_->setObjectMask(t, mask);
}
}
}
2012-05-22 19:53:32 +00:00
uint16_t read16(uint8_t* code, unsigned& ip) {
uint16_t a = code[ip++];
uint16_t b = code[ip++];
return (a << 8) | b;
}
uint32_t read32(uint8_t* code, unsigned& ip) {
uint32_t b = code[ip++];
uint32_t a = code[ip++];
uint32_t c = code[ip++];
uint32_t d = code[ip++];
return (a << 24) | (b << 16) | (c << 8) | d;
}
void
disassembleCode(const char* prefix, uint8_t* code, unsigned length)
{
unsigned ip = 0;
while(ip < length) {
unsigned instr;
fprintf(stderr, "%s%x:\t", prefix, ip);
switch (instr = code[ip++]) {
case aaload: fprintf(stderr, "aaload\n"); break;
case aastore: fprintf(stderr, "aastore\n"); break;
case aconst_null: fprintf(stderr, "aconst_null\n"); break;
case aload: fprintf(stderr, "aload %02x\n", code[ip++]); break;
case aload_0: fprintf(stderr, "aload_0\n"); break;
case aload_1: fprintf(stderr, "aload_1\n"); break;
case aload_2: fprintf(stderr, "aload_2\n"); break;
case aload_3: fprintf(stderr, "aload_3\n"); break;
case anewarray: fprintf(stderr, "anewarray %04x\n", read16(code, ip)); break;
case areturn: fprintf(stderr, "areturn\n"); break;
case arraylength: fprintf(stderr, "arraylength\n"); break;
case astore: fprintf(stderr, "astore %02x\n", code[ip++]); break;
case astore_0: fprintf(stderr, "astore_0\n"); break;
case astore_1: fprintf(stderr, "astore_1\n"); break;
case astore_2: fprintf(stderr, "astore_2\n"); break;
case astore_3: fprintf(stderr, "astore_3\n"); break;
case athrow: fprintf(stderr, "athrow\n"); break;
case baload: fprintf(stderr, "baload\n"); break;
case bastore: fprintf(stderr, "bastore\n"); break;
case bipush: fprintf(stderr, "bipush %02x\n", code[ip++]); break;
case caload: fprintf(stderr, "caload\n"); break;
case castore: fprintf(stderr, "castore\n"); break;
case checkcast: fprintf(stderr, "checkcast %04x\n", read16(code, ip)); break;
case d2f: fprintf(stderr, "d2f\n"); break;
case d2i: fprintf(stderr, "d2i\n"); break;
case d2l: fprintf(stderr, "d2l\n"); break;
case dadd: fprintf(stderr, "dadd\n"); break;
case daload: fprintf(stderr, "daload\n"); break;
case dastore: fprintf(stderr, "dastore\n"); break;
case dcmpg: fprintf(stderr, "dcmpg\n"); break;
case dcmpl: fprintf(stderr, "dcmpl\n"); break;
case dconst_0: fprintf(stderr, "dconst_0\n"); break;
case dconst_1: fprintf(stderr, "dconst_1\n"); break;
case ddiv: fprintf(stderr, "ddiv\n"); break;
case dmul: fprintf(stderr, "dmul\n"); break;
case dneg: fprintf(stderr, "dneg\n"); break;
case vm::drem: fprintf(stderr, "drem\n"); break;
case dsub: fprintf(stderr, "dsub\n"); break;
2014-06-29 05:48:17 +00:00
case vm::dup: fprintf(stderr, "dup\n"); break;
2012-05-22 19:53:32 +00:00
case dup_x1: fprintf(stderr, "dup_x1\n"); break;
case dup_x2: fprintf(stderr, "dup_x2\n"); break;
2014-06-29 05:48:17 +00:00
case vm::dup2: fprintf(stderr, "dup2\n"); break;
2012-05-22 19:53:32 +00:00
case dup2_x1: fprintf(stderr, "dup2_x1\n"); break;
case dup2_x2: fprintf(stderr, "dup2_x2\n"); break;
case f2d: fprintf(stderr, "f2d\n"); break;
case f2i: fprintf(stderr, "f2i\n"); break;
case f2l: fprintf(stderr, "f2l\n"); break;
case fadd: fprintf(stderr, "fadd\n"); break;
case faload: fprintf(stderr, "faload\n"); break;
case fastore: fprintf(stderr, "fastore\n"); break;
case fcmpg: fprintf(stderr, "fcmpg\n"); break;
case fcmpl: fprintf(stderr, "fcmpl\n"); break;
case fconst_0: fprintf(stderr, "fconst_0\n"); break;
case fconst_1: fprintf(stderr, "fconst_1\n"); break;
case fconst_2: fprintf(stderr, "fconst_2\n"); break;
case fdiv: fprintf(stderr, "fdiv\n"); break;
case fmul: fprintf(stderr, "fmul\n"); break;
case fneg: fprintf(stderr, "fneg\n"); break;
case frem: fprintf(stderr, "frem\n"); break;
case fsub: fprintf(stderr, "fsub\n"); break;
case getfield: fprintf(stderr, "getfield %04x\n", read16(code, ip)); break;
case getstatic: fprintf(stderr, "getstatic %04x\n", read16(code, ip)); break;
case goto_: {
int16_t offset = read16(code, ip);
fprintf(stderr, "goto %04x\n", offset + ip - 3);
} break;
case goto_w: {
int32_t offset = read32(code, ip);
fprintf(stderr, "goto_w %08x\n", offset + ip - 5);
} break;
case i2b: fprintf(stderr, "i2b\n"); break;
case i2c: fprintf(stderr, "i2c\n"); break;
case i2d: fprintf(stderr, "i2d\n"); break;
case i2f: fprintf(stderr, "i2f\n"); break;
case i2l: fprintf(stderr, "i2l\n"); break;
case i2s: fprintf(stderr, "i2s\n"); break;
case iadd: fprintf(stderr, "iadd\n"); break;
case iaload: fprintf(stderr, "iaload\n"); break;
case iand: fprintf(stderr, "iand\n"); break;
case iastore: fprintf(stderr, "iastore\n"); break;
case iconst_m1: fprintf(stderr, "iconst_m1\n"); break;
case iconst_0: fprintf(stderr, "iconst_0\n"); break;
case iconst_1: fprintf(stderr, "iconst_1\n"); break;
case iconst_2: fprintf(stderr, "iconst_2\n"); break;
case iconst_3: fprintf(stderr, "iconst_3\n"); break;
case iconst_4: fprintf(stderr, "iconst_4\n"); break;
case iconst_5: fprintf(stderr, "iconst_5\n"); break;
case idiv: fprintf(stderr, "idiv\n"); break;
case if_acmpeq: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_acmpeq %04x\n", offset + ip - 3);
} break;
case if_acmpne: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_acmpne %04x\n", offset + ip - 3);
} break;
case if_icmpeq: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmpeq %04x\n", offset + ip - 3);
} break;
case if_icmpne: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmpne %04x\n", offset + ip - 3);
} break;
case if_icmpgt: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmpgt %04x\n", offset + ip - 3);
} break;
case if_icmpge: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmpge %04x\n", offset + ip - 3);
} break;
case if_icmplt: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmplt %04x\n", offset + ip - 3);
} break;
case if_icmple: {
int16_t offset = read16(code, ip);
fprintf(stderr, "if_icmple %04x\n", offset + ip - 3);
} break;
case ifeq: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifeq %04x\n", offset + ip - 3);
} break;
case ifne: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifne %04x\n", offset + ip - 3);
} break;
case ifgt: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifgt %04x\n", offset + ip - 3);
} break;
case ifge: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifge %04x\n", offset + ip - 3);
} break;
case iflt: {
int16_t offset = read16(code, ip);
fprintf(stderr, "iflt %04x\n", offset + ip - 3);
} break;
case ifle: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifle %04x\n", offset + ip - 3);
} break;
case ifnonnull: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifnonnull %04x\n", offset + ip - 3);
} break;
case ifnull: {
int16_t offset = read16(code, ip);
fprintf(stderr, "ifnull %04x\n", offset + ip - 3);
} break;
case iinc: {
uint8_t a = code[ip++];
uint8_t b = code[ip++];
fprintf(stderr, "iinc %02x %02x\n", a, b);
} break;
case iload: fprintf(stderr, "iload %02x\n", code[ip++]); break;
case fload: fprintf(stderr, "fload %02x\n", code[ip++]); break;
case iload_0: fprintf(stderr, "iload_0\n"); break;
case fload_0: fprintf(stderr, "fload_0\n"); break;
case iload_1: fprintf(stderr, "iload_1\n"); break;
case fload_1: fprintf(stderr, "fload_1\n"); break;
case iload_2: fprintf(stderr, "iload_2\n"); break;
case fload_2: fprintf(stderr, "fload_2\n"); break;
case iload_3: fprintf(stderr, "iload_3\n"); break;
case fload_3: fprintf(stderr, "fload_3\n"); break;
case imul: fprintf(stderr, "imul\n"); break;
case ineg: fprintf(stderr, "ineg\n"); break;
case instanceof: fprintf(stderr, "instanceof %04x\n", read16(code, ip)); break;
case invokeinterface: fprintf(stderr, "invokeinterface %04x\n", read16(code, ip)); break;
case invokespecial: fprintf(stderr, "invokespecial %04x\n", read16(code, ip)); break;
case invokestatic: fprintf(stderr, "invokestatic %04x\n", read16(code, ip)); break;
case invokevirtual: fprintf(stderr, "invokevirtual %04x\n", read16(code, ip)); break;
case ior: fprintf(stderr, "ior\n"); break;
case irem: fprintf(stderr, "irem\n"); break;
case ireturn: fprintf(stderr, "ireturn\n"); break;
case freturn: fprintf(stderr, "freturn\n"); break;
case ishl: fprintf(stderr, "ishl\n"); break;
case ishr: fprintf(stderr, "ishr\n"); break;
case istore: fprintf(stderr, "istore %02x\n", code[ip++]); break;
case fstore: fprintf(stderr, "fstore %02x\n", code[ip++]); break;
case istore_0: fprintf(stderr, "istore_0\n"); break;
case fstore_0: fprintf(stderr, "fstore_0\n"); break;
case istore_1: fprintf(stderr, "istore_1\n"); break;
case fstore_1: fprintf(stderr, "fstore_1\n"); break;
case istore_2: fprintf(stderr, "istore_2\n"); break;
case fstore_2: fprintf(stderr, "fstore_2\n"); break;
case istore_3: fprintf(stderr, "istore_3\n"); break;
case fstore_3: fprintf(stderr, "fstore_3\n"); break;
case isub: fprintf(stderr, "isub\n"); break;
case iushr: fprintf(stderr, "iushr\n"); break;
case ixor: fprintf(stderr, "ixor\n"); break;
case jsr: fprintf(stderr, "jsr %04x\n", read16(code, ip)); break;
case jsr_w: fprintf(stderr, "jsr_w %08x\n", read32(code, ip)); break;
case l2d: fprintf(stderr, "l2d\n"); break;
case l2f: fprintf(stderr, "l2f\n"); break;
case l2i: fprintf(stderr, "l2i\n"); break;
case ladd: fprintf(stderr, "ladd\n"); break;
case laload: fprintf(stderr, "laload\n"); break;
case land: fprintf(stderr, "land\n"); break;
case lastore: fprintf(stderr, "lastore\n"); break;
case lcmp: fprintf(stderr, "lcmp\n"); break;
case lconst_0: fprintf(stderr, "lconst_0\n"); break;
case lconst_1: fprintf(stderr, "lconst_1\n"); break;
case ldc: fprintf(stderr, "ldc %04x\n", read16(code, ip)); break;
case ldc_w: fprintf(stderr, "ldc_w %08x\n", read32(code, ip)); break;
case ldc2_w: fprintf(stderr, "ldc2_w %04x\n", read16(code, ip)); break;
case ldiv_: fprintf(stderr, "ldiv_\n"); break;
case lload: fprintf(stderr, "lload %02x\n", code[ip++]); break;
case dload: fprintf(stderr, "dload %02x\n", code[ip++]); break;
case lload_0: fprintf(stderr, "lload_0\n"); break;
case dload_0: fprintf(stderr, "dload_0\n"); break;
case lload_1: fprintf(stderr, "lload_1\n"); break;
case dload_1: fprintf(stderr, "dload_1\n"); break;
case lload_2: fprintf(stderr, "lload_2\n"); break;
case dload_2: fprintf(stderr, "dload_2\n"); break;
case lload_3: fprintf(stderr, "lload_3\n"); break;
case dload_3: fprintf(stderr, "dload_3\n"); break;
case lmul: fprintf(stderr, "lmul\n"); break;
case lneg: fprintf(stderr, "lneg\n"); break;
case lookupswitch: {
int32_t default_ = read32(code, ip);
int32_t pairCount = read32(code, ip);
fprintf(stderr, "lookupswitch default: %d pairCount: %d\n", default_, pairCount);
for (int i = 0; i < pairCount; i++) {
int32_t k = read32(code, ip);
int32_t d = read32(code, ip);
fprintf(stderr, "%s key: %02x dest: %2x\n", prefix, k, d);
}
} break;
case lor: fprintf(stderr, "lor\n"); break;
case lrem: fprintf(stderr, "lrem\n"); break;
case lreturn: fprintf(stderr, "lreturn\n"); break;
case dreturn: fprintf(stderr, "dreturn\n"); break;
case lshl: fprintf(stderr, "lshl\n"); break;
case lshr: fprintf(stderr, "lshr\n"); break;
case lstore: fprintf(stderr, "lstore %02x\n", code[ip++]); break;
case dstore: fprintf(stderr, "dstore %02x\n", code[ip++]); break;
case lstore_0: fprintf(stderr, "lstore_0\n"); break;
case dstore_0: fprintf(stderr, "dstore_0\n"); break;
case lstore_1: fprintf(stderr, "lstore_1\n"); break;
case dstore_1: fprintf(stderr, "dstore_1\n"); break;
case lstore_2: fprintf(stderr, "lstore_2\n"); break;
case dstore_2: fprintf(stderr, "dstore_2\n"); break;
case lstore_3: fprintf(stderr, "lstore_3\n"); break;
case dstore_3: fprintf(stderr, "dstore_3\n"); break;
case lsub: fprintf(stderr, "lsub\n"); break;
case lushr: fprintf(stderr, "lushr\n"); break;
case lxor: fprintf(stderr, "lxor\n"); break;
case monitorenter: fprintf(stderr, "monitorenter\n"); break;
case monitorexit: fprintf(stderr, "monitorexit\n"); break;
case multianewarray: {
unsigned type = read16(code, ip);
fprintf(stderr, "multianewarray %04x %02x\n", type, code[ip++]);
} break;
2012-05-22 19:53:32 +00:00
case new_: fprintf(stderr, "new %04x\n", read16(code, ip)); break;
case newarray: fprintf(stderr, "newarray %02x\n", code[ip++]); break;
case nop: fprintf(stderr, "nop\n"); break;
case pop_: fprintf(stderr, "pop\n"); break;
case pop2: fprintf(stderr, "pop2\n"); break;
case putfield: fprintf(stderr, "putfield %04x\n", read16(code, ip)); break;
case putstatic: fprintf(stderr, "putstatic %04x\n", read16(code, ip)); break;
case ret: fprintf(stderr, "ret %02x\n", code[ip++]); break;
case return_: fprintf(stderr, "return_\n"); break;
case saload: fprintf(stderr, "saload\n"); break;
case sastore: fprintf(stderr, "sastore\n"); break;
case sipush: fprintf(stderr, "sipush %04x\n", read16(code, ip)); break;
case swap: fprintf(stderr, "swap\n"); break;
case tableswitch: {
int32_t default_ = read32(code, ip);
int32_t bottom = read32(code, ip);
int32_t top = read32(code, ip);
fprintf(stderr, "tableswitch default: %d bottom: %d top: %d\n", default_, bottom, top);
for (int i = 0; i < top - bottom + 1; i++) {
int32_t d = read32(code, ip);
fprintf(stderr, "%s key: %d dest: %2x\n", prefix, i + bottom, d);
}
} break;
case wide: {
switch (code[ip++]) {
case aload: fprintf(stderr, "wide aload %04x\n", read16(code, ip)); break;
case astore: fprintf(stderr, "wide astore %04x\n", read16(code, ip)); break;
case iinc: fprintf(stderr, "wide iinc %04x %04x\n", read16(code, ip), read16(code, ip)); break;
case iload: fprintf(stderr, "wide iload %04x\n", read16(code, ip)); break;
case istore: fprintf(stderr, "wide istore %04x\n", read16(code, ip)); break;
case lload: fprintf(stderr, "wide lload %04x\n", read16(code, ip)); break;
case lstore: fprintf(stderr, "wide lstore %04x\n", read16(code, ip)); break;
case ret: fprintf(stderr, "wide ret %04x\n", read16(code, ip)); break;
default: {
fprintf(stderr, "unknown wide instruction %02x %04x\n", instr, read16(code, ip));
}
}
} break;
default: {
fprintf(stderr, "unknown instruction %02x\n", instr);
}
}
}
}
2014-06-29 05:48:17 +00:00
GcCode*
2014-05-29 04:17:25 +00:00
parseCode(Thread* t, Stream& s, GcSingleton* pool)
{
2007-07-19 23:45:44 +00:00
PROTECT(t, pool);
unsigned maxStack = s.read2();
unsigned maxLocals = s.read2();
unsigned length = s.read4();
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " code: maxStack %d maxLocals %d length %d\n", maxStack, maxLocals, length);
}
2014-06-29 05:48:17 +00:00
GcCode* code = makeCode(t, pool, 0, 0, 0, 0, 0, maxStack, maxLocals, length);
s.read(code->body().begin(), length);
PROTECT(t, code);
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
2014-06-29 05:48:17 +00:00
disassembleCode(" ", code->body().begin(), length);
2012-05-22 19:53:32 +00:00
}
unsigned ehtLength = s.read2();
if (ehtLength) {
2014-06-29 05:48:17 +00:00
GcExceptionHandlerTable* eht = makeExceptionHandlerTable(t, ehtLength);
for (unsigned i = 0; i < ehtLength; ++i) {
unsigned start = s.read2();
unsigned end = s.read2();
unsigned ip = s.read2();
unsigned catchType = s.read2();
2014-06-29 05:48:17 +00:00
eht->body()[i] = exceptionHandler
(start, end, ip, catchType);
}
2014-06-26 01:42:16 +00:00
code->setExceptionHandlerTable(t, reinterpret_cast<object>(eht));
}
unsigned attributeCount = s.read2();
for (unsigned j = 0; j < attributeCount; ++j) {
2014-06-29 05:48:17 +00:00
GcByteArray* name = cast<GcByteArray>(t, singletonObject(t, pool, s.read2() - 1));
unsigned length = s.read4();
if (vm::strcmp(reinterpret_cast<const int8_t*>("LineNumberTable"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
unsigned lntLength = s.read2();
2014-06-29 05:48:17 +00:00
GcLineNumberTable* lnt = makeLineNumberTable(t, lntLength);
for (unsigned i = 0; i < lntLength; ++i) {
unsigned ip = s.read2();
unsigned line = s.read2();
2014-06-29 05:48:17 +00:00
lnt->body()[i] = lineNumber(ip, line);
}
2014-06-26 01:42:16 +00:00
code->setLineNumberTable(t, lnt);
} else {
s.skip(length);
}
}
return code;
}
2014-06-29 05:48:17 +00:00
GcList*
2014-05-29 04:17:25 +00:00
addInterfaceMethods(Thread* t, GcClass* class_, GcHashMap* virtualMap,
unsigned* virtualCount, bool makeList)
{
2014-06-29 05:48:17 +00:00
GcArray* itable = cast<GcArray>(t, class_->interfaceTable());
if (itable) {
PROTECT(t, class_);
2014-06-29 05:48:17 +00:00
PROTECT(t, virtualMap);
PROTECT(t, itable);
2014-06-29 05:48:17 +00:00
GcList* list = 0;
PROTECT(t, list);
2014-05-29 04:17:25 +00:00
GcMethod* method = 0;
PROTECT(t, method);
2014-06-29 05:48:17 +00:00
GcArray* vtable = 0;
PROTECT(t, vtable);
2014-06-29 05:48:17 +00:00
unsigned stride = (class_->flags() & ACC_INTERFACE) ? 1 : 2;
for (unsigned i = 0; i < itable->length(); i += stride) {
vtable = cast<GcArray>(t, cast<GcClass>(t, itable->body()[i])->virtualTable());
if (vtable) {
2014-06-29 05:48:17 +00:00
for (unsigned j = 0; j < vtable->length(); ++j) {
method = cast<GcMethod>(t, vtable->body()[j]);
GcTriple* n = hashMapFindNode(t,
virtualMap,
reinterpret_cast<object>(method),
methodHash,
methodEqual);
if (n == 0) {
2014-06-29 05:48:17 +00:00
method = makeMethod(t,
method->vmFlags(),
method->returnCode(),
method->parameterCount(),
method->parameterFootprint(),
method->flags(),
(*virtualCount)++,
0,
0,
method->name(),
method->spec(),
0,
class_,
0);
2014-05-29 04:17:25 +00:00
hashMapInsert(t,
virtualMap,
reinterpret_cast<object>(method),
reinterpret_cast<object>(method),
methodHash);
if (makeList) {
if (list == 0) {
2014-06-29 05:48:17 +00:00
list = vm::makeList(t, 0, 0, 0);
}
2014-06-29 05:48:17 +00:00
listAppend(t, list, reinterpret_cast<object>(method));
}
}
}
}
}
return list;
}
return 0;
}
void
2014-05-29 04:17:25 +00:00
parseMethodTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
{
PROTECT(t, class_);
PROTECT(t, pool);
2014-05-29 04:17:25 +00:00
GcHashMap* virtualMap = makeHashMap(t, 0, 0);
PROTECT(t, virtualMap);
unsigned virtualCount = 0;
unsigned declaredVirtualCount = 0;
2014-06-29 05:48:17 +00:00
GcArray* superVirtualTable = 0;
PROTECT(t, superVirtualTable);
2014-05-29 04:17:25 +00:00
if ((class_->flags() & ACC_INTERFACE) == 0) {
if (class_->super()) {
2014-06-29 05:48:17 +00:00
superVirtualTable = cast<GcArray>(t, class_->super()->virtualTable());
}
if (superVirtualTable) {
2014-06-29 05:48:17 +00:00
virtualCount = superVirtualTable->length();
for (unsigned i = 0; i < virtualCount; ++i) {
2014-06-29 05:48:17 +00:00
object method = superVirtualTable->body()[i];
hashMapInsert(t, virtualMap, method, method, methodHash);
}
}
}
2014-06-29 05:48:17 +00:00
GcList* newVirtuals = makeList(t, 0, 0, 0);
PROTECT(t, newVirtuals);
unsigned count = s.read2();
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " method count %d\n", count);
}
if (count) {
2014-06-29 05:48:17 +00:00
GcArray* methodTable = makeArray(t, count);
PROTECT(t, methodTable);
2014-06-29 05:48:17 +00:00
GcMethodAddendum* addendum = 0;
PROTECT(t, addendum);
2014-06-29 05:48:17 +00:00
GcCode* code = 0;
PROTECT(t, code);
for (unsigned i = 0; i < count; ++i) {
unsigned flags = s.read2();
unsigned name = s.read2();
unsigned spec = s.read2();
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " method flags %d name %d spec %d '%s%s'\n", flags, name, spec,
2014-06-29 05:48:17 +00:00
cast<GcByteArray>(t, singletonObject(t, pool, name - 1))->body().begin(),
cast<GcByteArray>(t, singletonObject(t, pool, spec - 1))->body().begin());
2012-05-22 19:53:32 +00:00
}
addendum = 0;
code = 0;
unsigned attributeCount = s.read2();
for (unsigned j = 0; j < attributeCount; ++j) {
2014-06-29 05:48:17 +00:00
GcByteArray* attributeName = cast<GcByteArray>(t, singletonObject(t, pool, s.read2() - 1));
unsigned length = s.read4();
if (vm::strcmp(reinterpret_cast<const int8_t*>("Code"),
2014-06-29 05:48:17 +00:00
attributeName->body().begin()) == 0)
{
code = parseCode(t, s, pool);
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("Exceptions"),
2014-06-29 05:48:17 +00:00
attributeName->body().begin()) == 0)
{
if (addendum == 0) {
2014-06-29 05:48:17 +00:00
addendum = makeMethodAddendum(t, pool, 0, 0, 0, 0, 0);
}
unsigned exceptionCount = s.read2();
2014-06-29 05:48:17 +00:00
GcShortArray* body = makeShortArray(t, exceptionCount);
for (unsigned i = 0; i < exceptionCount; ++i) {
2014-06-29 05:48:17 +00:00
body->body()[i] = s.read2();
}
2014-06-26 01:42:16 +00:00
addendum->setExceptionTable(t, reinterpret_cast<object>(body));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("AnnotationDefault"),
2014-06-29 05:48:17 +00:00
attributeName->body().begin()) == 0)
{
if (addendum == 0) {
2014-06-29 05:48:17 +00:00
addendum = makeMethodAddendum(t, pool, 0, 0, 0, 0, 0);
}
2014-06-29 05:48:17 +00:00
GcByteArray* body = makeByteArray(t, length);
s.read(reinterpret_cast<uint8_t*>(body->body().begin()),
length);
2014-06-26 01:42:16 +00:00
addendum->setAnnotationDefault(t, reinterpret_cast<object>(body));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("Signature"),
2014-06-29 05:48:17 +00:00
attributeName->body().begin()) == 0)
{
if (addendum == 0) {
2014-06-29 05:48:17 +00:00
addendum = makeMethodAddendum(t, pool, 0, 0, 0, 0, 0);
}
2014-06-29 05:48:17 +00:00
2014-06-26 01:42:16 +00:00
addendum->setSignature(t, singletonObject(t, pool, s.read2() - 1));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("RuntimeVisibleAnnotations"),
2014-06-29 05:48:17 +00:00
attributeName->body().begin()) == 0)
{
if (addendum == 0) {
2014-06-29 05:48:17 +00:00
addendum = makeMethodAddendum(t, pool, 0, 0, 0, 0, 0);
}
2014-06-29 05:48:17 +00:00
GcByteArray* body = makeByteArray(t, length);
s.read(reinterpret_cast<uint8_t*>(body->body().begin()),
length);
2014-06-26 01:42:16 +00:00
addendum->setAnnotationTable(t, reinterpret_cast<object>(body));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("RuntimeVisibleParameterAnnotations"),
2014-06-29 05:48:17 +00:00
attributeName->body().begin()) == 0)
{
if (addendum == 0) {
2014-06-29 05:48:17 +00:00
addendum = makeMethodAddendum(t, pool, 0, 0, 0, 0, 0);
}
2014-06-29 05:48:17 +00:00
GcByteArray* body = makeByteArray(t, length);
s.read(reinterpret_cast<uint8_t*>(body->body().begin()),
length);
2014-06-26 01:42:16 +00:00
addendum->setParameterAnnotationTable(t, reinterpret_cast<object>(body));
} else {
s.skip(length);
}
}
const char* specString = reinterpret_cast<const char*>
2014-06-29 05:48:17 +00:00
(cast<GcByteArray>(t, singletonObject(t, pool, spec - 1))->body().begin());
unsigned parameterCount;
unsigned parameterFootprint;
unsigned returnCode;
scanMethodSpec(t, specString, flags & ACC_STATIC, &parameterCount,
&parameterFootprint, &returnCode);
2014-05-29 04:17:25 +00:00
GcMethod* method = t->m->processor->makeMethod
(t,
0, // vm flags
returnCode,
parameterCount,
parameterFootprint,
flags,
0, // offset
2014-05-29 04:17:25 +00:00
cast<GcByteArray>(t, singletonObject(t, pool, name - 1)),
cast<GcByteArray>(t, singletonObject(t, pool, spec - 1)),
2014-06-29 05:48:17 +00:00
addendum,
class_,
2014-06-29 05:48:17 +00:00
code);
PROTECT(t, method);
2007-11-05 14:28:46 +00:00
if (methodVirtual(t, method)) {
++ declaredVirtualCount;
2014-06-28 20:41:27 +00:00
GcTriple* p = hashMapFindNode
2014-05-29 04:17:25 +00:00
(t, virtualMap, reinterpret_cast<object>(method), methodHash, methodEqual);
if (p) {
2014-06-29 05:48:17 +00:00
method->offset() = cast<GcMethod>(t, p->first())->offset();
2014-06-26 01:42:16 +00:00
p->setSecond(t, reinterpret_cast<object>(method));
} else {
2014-05-29 04:17:25 +00:00
method->offset() = virtualCount++;
2014-06-29 05:48:17 +00:00
listAppend(t, newVirtuals, reinterpret_cast<object>(method));
2014-05-29 04:17:25 +00:00
hashMapInsert(t, virtualMap, reinterpret_cast<object>(method), reinterpret_cast<object>(method), methodHash);
}
2014-05-29 04:17:25 +00:00
if (UNLIKELY((class_->flags() & ACC_INTERFACE) == 0
and vm::strcmp
(reinterpret_cast<const int8_t*>("finalize"),
method->name()->body().begin()) == 0
and vm::strcmp
(reinterpret_cast<const int8_t*>("()V"),
method->spec()->body().begin()) == 0
and (not emptyMethod(t, method))))
{
2014-05-29 04:17:25 +00:00
class_->vmFlags() |= HasFinalizerFlag;
}
2007-11-05 14:28:46 +00:00
} else {
2014-05-29 04:17:25 +00:00
method->offset() = i;
2007-11-05 14:28:46 +00:00
if (vm::strcmp(reinterpret_cast<const int8_t*>("<clinit>"),
method->name()->body().begin()) == 0)
2007-11-05 14:28:46 +00:00
{
2014-05-29 04:17:25 +00:00
method->vmFlags() |= ClassInitFlag;
class_->vmFlags() |= NeedInitFlag;
} else if (vm::strcmp
(reinterpret_cast<const int8_t*>("<init>"),
method->name()->body().begin()) == 0)
2009-03-03 03:18:15 +00:00
{
2014-05-29 04:17:25 +00:00
method->vmFlags() |= ConstructorFlag;
2007-11-05 14:28:46 +00:00
}
}
2014-06-26 01:42:16 +00:00
methodTable->setBodyElement(t, i, reinterpret_cast<object>(method));
}
2014-06-26 01:42:16 +00:00
class_->setMethodTable(t, reinterpret_cast<object>(methodTable));
}
2014-06-29 05:48:17 +00:00
GcList* abstractVirtuals = addInterfaceMethods
(t, class_, virtualMap, &virtualCount, true);
PROTECT(t, abstractVirtuals);
bool populateInterfaceVtables = false;
2007-08-14 13:27:10 +00:00
if (declaredVirtualCount == 0
and abstractVirtuals == 0
2014-05-29 04:17:25 +00:00
and (class_->flags() & ACC_INTERFACE) == 0)
2007-08-14 13:27:10 +00:00
{
2014-05-29 04:17:25 +00:00
if (class_->super()) {
// inherit virtual table from superclass
2014-06-26 01:42:16 +00:00
class_->setVirtualTable(t, reinterpret_cast<object>(superVirtualTable));
if (class_->super()->interfaceTable()
2014-06-29 05:48:17 +00:00
and cast<GcArray>(t, class_->interfaceTable())->length()
== cast<GcArray>
(t, class_->super()->interfaceTable())->length())
{
// inherit interface table from superclass
2014-06-26 01:42:16 +00:00
class_->setInterfaceTable(t, class_->super()->interfaceTable());
} else {
populateInterfaceVtables = true;
}
} else {
// apparently, Object does not have any virtual methods. We
// give it a vtable anyway so code doesn't break elsewhere.
2014-06-29 05:48:17 +00:00
GcArray* vtable = makeArray(t, 0);
2014-06-26 01:42:16 +00:00
class_->setVirtualTable(t, reinterpret_cast<object>(vtable));
2007-11-27 22:23:00 +00:00
}
} else if (virtualCount) {
// generate class vtable
2014-06-29 05:48:17 +00:00
GcArray* vtable = makeArray(t, virtualCount);
2007-08-14 00:37:00 +00:00
unsigned i = 0;
2014-05-29 04:17:25 +00:00
if (class_->flags() & ACC_INTERFACE) {
PROTECT(t, vtable);
for (HashMapIterator it(t, virtualMap); it.hasMore();) {
2014-06-29 05:48:17 +00:00
GcMethod* method = cast<GcMethod>(t, it.next()->first());
assertT(t, vtable->body()[method->offset()] == 0);
2014-06-26 01:42:16 +00:00
vtable->setBodyElement(t, method->offset(), reinterpret_cast<object>(method));
2007-08-14 00:37:00 +00:00
++ i;
}
} else {
populateInterfaceVtables = true;
if (superVirtualTable) {
2014-06-29 05:48:17 +00:00
for (; i < superVirtualTable->length(); ++i) {
object method = superVirtualTable->body()[i];
method = hashMapFind(t, virtualMap, method, methodHash, methodEqual);
2014-06-26 01:42:16 +00:00
vtable->setBodyElement(t, i, method);
}
}
2014-06-29 05:48:17 +00:00
for (GcPair* p = cast<GcPair>(t, newVirtuals->front()); p; p = cast<GcPair>(t, p->second())) {
2014-06-26 01:42:16 +00:00
vtable->setBodyElement(t, i, p->first());
++ i;
}
}
if (abstractVirtuals) {
PROTECT(t, vtable);
2014-05-29 04:17:25 +00:00
object originalMethodTable = class_->methodTable();
PROTECT(t, originalMethodTable);
2014-05-29 04:17:25 +00:00
unsigned oldLength = class_->methodTable() ?
2014-06-29 05:48:17 +00:00
cast<GcArray>(t, class_->methodTable())->length() : 0;
2014-06-29 05:48:17 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
addendum->declaredMethodCount() = oldLength;
2014-06-29 05:48:17 +00:00
GcArray* newMethodTable = makeArray
(t, oldLength + abstractVirtuals->size());
if (oldLength) {
2014-06-26 01:42:16 +00:00
GcArray* mtable = cast<GcArray>(t, class_->methodTable());
for(size_t i = 0; i < oldLength; i++) {
newMethodTable->setBodyElement(t, i, mtable->body()[i]);
}
}
2014-06-29 05:48:17 +00:00
mark(t, reinterpret_cast<object>(newMethodTable), ArrayBody, oldLength);
unsigned mti = oldLength;
2014-06-29 05:48:17 +00:00
for (GcPair* p = cast<GcPair>(t, abstractVirtuals->front());
p; p = cast<GcPair>(t, p->second()))
{
2014-06-26 01:42:16 +00:00
newMethodTable->setBodyElement(t, mti++, p->first());
2014-05-29 04:17:25 +00:00
if ((class_->flags() & ACC_INTERFACE) == 0) {
2014-06-26 01:42:16 +00:00
vtable->setBodyElement(t, i++, p->first());
}
}
2014-06-29 05:48:17 +00:00
assertT(t, newMethodTable->length() == mti);
2014-06-26 01:42:16 +00:00
class_->setMethodTable(t, reinterpret_cast<object>(newMethodTable));
}
2014-06-29 05:48:17 +00:00
assertT(t, vtable->length() == i);
2007-08-14 00:37:00 +00:00
2014-06-26 01:42:16 +00:00
class_->setVirtualTable(t, reinterpret_cast<object>(vtable));
}
if (populateInterfaceVtables) {
// generate interface vtables
2014-06-29 05:48:17 +00:00
GcArray* itable = cast<GcArray>(t, class_->interfaceTable());
if (itable) {
PROTECT(t, itable);
2014-06-29 05:48:17 +00:00
for (unsigned i = 0; i < itable->length(); i += 2) {
GcArray* ivtable = cast<GcArray>(t, cast<GcClass>(t, itable->body()[i])->virtualTable());
if (ivtable) {
2014-06-26 01:42:16 +00:00
GcArray* vtable = cast<GcArray>(t, itable->body()[i + 1]);
2014-06-29 05:48:17 +00:00
for (unsigned j = 0; j < ivtable->length(); ++j) {
object method = ivtable->body()[j];
method = hashMapFind
(t, virtualMap, method, methodHash, methodEqual);
assertT(t, method);
2007-08-14 00:37:00 +00:00
2014-06-26 01:42:16 +00:00
vtable->setBodyElement(t, j, method);
}
}
}
}
}
}
void
2014-05-29 04:17:25 +00:00
parseAttributeTable(Thread* t, Stream& s, GcClass* class_, GcSingleton* pool)
{
PROTECT(t, class_);
PROTECT(t, pool);
unsigned attributeCount = s.read2();
for (unsigned j = 0; j < attributeCount; ++j) {
2014-06-29 05:48:17 +00:00
GcByteArray* name = cast<GcByteArray>(t, singletonObject(t, pool, s.read2() - 1));
unsigned length = s.read4();
if (vm::strcmp(reinterpret_cast<const int8_t*>("SourceFile"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
2014-06-26 01:42:16 +00:00
class_->setSourceFile(t, cast<GcByteArray>(t, singletonObject(t, pool, s.read2() - 1)));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("Signature"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
2014-05-29 04:17:25 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
2014-06-26 01:42:16 +00:00
addendum->setSignature(t,
singletonObject(t, pool, s.read2() - 1));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>("InnerClasses"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
unsigned innerClassCount = s.read2();
2014-06-29 05:48:17 +00:00
GcArray* table = makeArray(t, innerClassCount);
PROTECT(t, table);
for (unsigned i = 0; i < innerClassCount; ++i) {
int16_t inner = s.read2();
int16_t outer = s.read2();
int16_t name = s.read2();
int16_t flags = s.read2();
2014-06-29 05:48:17 +00:00
GcInnerClassReference* reference = makeInnerClassReference
(t,
2014-06-29 05:48:17 +00:00
inner ? cast<GcReference>(t, singletonObject(t, pool, inner - 1))->name() : 0,
outer ? cast<GcReference>(t, singletonObject(t, pool, outer - 1))->name() : 0,
name ? cast<GcByteArray>(t, singletonObject(t, pool, name - 1)) : 0,
flags);
2014-06-26 01:42:16 +00:00
table->setBodyElement(t, i, reinterpret_cast<object>(reference));
}
2014-05-29 04:17:25 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
2014-06-26 01:42:16 +00:00
addendum->setInnerClassTable(t, reinterpret_cast<object>(table));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("RuntimeVisibleAnnotations"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
2014-06-29 05:48:17 +00:00
GcByteArray* body = makeByteArray(t, length);
PROTECT(t, body);
2014-06-29 05:48:17 +00:00
s.read(reinterpret_cast<uint8_t*>(body->body().begin()), length);
2014-05-29 04:17:25 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
2014-06-26 01:42:16 +00:00
addendum->setAnnotationTable(t, reinterpret_cast<object>(body));
} else if (vm::strcmp(reinterpret_cast<const int8_t*>
("EnclosingMethod"),
2014-06-29 05:48:17 +00:00
name->body().begin()) == 0)
{
int16_t enclosingClass = s.read2();
int16_t enclosingMethod = s.read2();
2014-05-29 04:17:25 +00:00
GcClassAddendum* addendum = getClassAddendum(t, class_, pool);
2014-06-26 01:42:16 +00:00
addendum->setEnclosingClass(t,
reinterpret_cast<object>(cast<GcReference>(t, singletonObject(t, pool, enclosingClass - 1))->name()));
2014-06-26 01:42:16 +00:00
addendum->setEnclosingMethod(t, enclosingMethod
? singletonObject(t, pool, enclosingMethod - 1) : 0);
} else {
s.skip(length);
}
}
}
void
2014-05-29 04:17:25 +00:00
updateClassTables(Thread* t, GcClass* newClass, GcClass* oldClass)
{
2014-06-29 05:48:17 +00:00
GcArray* fieldTable = cast<GcArray>(t, newClass->fieldTable());
if (fieldTable) {
2014-06-29 05:48:17 +00:00
for (unsigned i = 0; i < fieldTable->length(); ++i) {
2014-06-26 01:42:16 +00:00
cast<GcField>(t, fieldTable->body()[i])->setClass(t, newClass);
}
}
2014-06-26 01:42:16 +00:00
GcSingleton* staticTable = newClass->staticTable();
if (staticTable) {
2014-06-26 01:42:16 +00:00
staticTable->setBodyElement(t, 0, reinterpret_cast<uintptr_t>(newClass));
}
2014-05-29 04:17:25 +00:00
if (newClass->flags() & ACC_INTERFACE) {
2014-06-29 05:48:17 +00:00
GcArray* virtualTable = cast<GcArray>(t, newClass->virtualTable());
if (virtualTable) {
2014-06-29 05:48:17 +00:00
for (unsigned i = 0; i < virtualTable->length(); ++i) {
2014-06-26 01:42:16 +00:00
GcMethod* m = cast<GcMethod>(t, virtualTable->body()[i]);
if (m->class_() == oldClass) {
m->setClass(t, newClass);
}
}
}
}
2014-06-29 05:48:17 +00:00
GcArray* methodTable = cast<GcArray>(t, newClass->methodTable());
if (methodTable) {
2014-06-29 05:48:17 +00:00
for (unsigned i = 0; i < methodTable->length(); ++i) {
2014-06-26 01:42:16 +00:00
cast<GcMethod>(t, methodTable->body()[i])->setClass(t, newClass);
}
}
}
void
2014-05-29 04:17:25 +00:00
updateBootstrapClass(Thread* t, GcClass* bootstrapClass, GcClass* class_)
{
expect(t, bootstrapClass != class_);
// verify that the classes have the same layout
2014-05-29 04:17:25 +00:00
expect(t, bootstrapClass->super() == class_->super());
2014-05-29 04:17:25 +00:00
expect(t, bootstrapClass->fixedSize() >= class_->fixedSize());
2014-05-29 04:17:25 +00:00
expect(t, (class_->vmFlags() & HasFinalizerFlag) == 0);
PROTECT(t, bootstrapClass);
PROTECT(t, class_);
ENTER(t, Thread::ExclusiveState);
2014-05-29 04:17:25 +00:00
bootstrapClass->vmFlags() &= ~BootstrapFlag;
bootstrapClass->vmFlags() |= class_->vmFlags();
bootstrapClass->flags() |= class_->flags();
2014-06-26 01:42:16 +00:00
bootstrapClass->setArrayElementClass(t, class_->arrayElementClass());
bootstrapClass->setSuper(t, class_->super());
bootstrapClass->setInterfaceTable(t, class_->interfaceTable());
bootstrapClass->setVirtualTable(t, class_->virtualTable());
bootstrapClass->setFieldTable(t, class_->fieldTable());
bootstrapClass->setMethodTable(t, class_->methodTable());
bootstrapClass->setStaticTable(t, class_->staticTable());
bootstrapClass->setAddendum(t, class_->addendum());
updateClassTables(t, bootstrapClass, class_);
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 23:24:24 +00:00
makeArrayClass(Thread* t, GcClassLoader* loader, unsigned dimensions, GcByteArray* spec,
GcClass* elementClass)
{
2014-05-29 04:17:25 +00:00
if (type(t, GcJobject::Type)->vmFlags() & BootstrapFlag) {
PROTECT(t, loader);
PROTECT(t, spec);
PROTECT(t, elementClass);
// Load java.lang.Object if present so we can use its vtable, but
// don't throw an exception if we can't find it. This way, we
// avoid infinite recursion due to trying to create an array to
// make a stack trace for a ClassNotFoundException.
resolveSystemClass
2014-06-30 01:44:41 +00:00
(t, roots(t)->bootLoader(),
2014-06-28 23:24:24 +00:00
type(t, GcJobject::Type)->name(), false);
}
2014-06-29 05:48:17 +00:00
GcArray* vtable = cast<GcArray>(t, type(t, GcJobject::Type)->virtualTable());
2014-05-29 04:17:25 +00:00
GcClass* c = t->m->processor->makeClass
(t,
0,
0,
2 * BytesPerWord,
BytesPerWord,
dimensions,
2014-06-28 23:24:24 +00:00
elementClass,
type(t, GcArray::Type)->objectMask(),
2014-06-28 23:24:24 +00:00
spec,
0,
type(t, GcJobject::Type),
2014-06-30 01:44:41 +00:00
reinterpret_cast<object>(roots(t)->arrayInterfaceTable()),
2014-06-29 05:48:17 +00:00
reinterpret_cast<object>(vtable),
0,
2007-07-30 23:19:05 +00:00
0,
0,
2014-06-28 00:32:20 +00:00
0,
2014-06-28 21:11:31 +00:00
loader,
2014-06-29 05:48:17 +00:00
vtable->length());
2007-12-11 21:26:59 +00:00
PROTECT(t, c);
2007-12-11 21:26:59 +00:00
t->m->processor->initVtable(t, c);
return c;
}
void
2014-06-28 21:11:31 +00:00
saveLoadedClass(Thread* t, GcClassLoader* loader, GcClass* c)
{
PROTECT(t, loader);
PROTECT(t, c);
ACQUIRE(t, t->m->classLock);
2014-06-28 21:11:31 +00:00
if (loader->map() == 0) {
2014-05-29 04:17:25 +00:00
GcHashMap* map = makeHashMap(t, 0, 0);
2014-06-26 01:42:16 +00:00
loader->setMap(t, reinterpret_cast<object>(map));
}
2014-05-29 04:17:25 +00:00
hashMapInsert(t,
2014-06-28 21:11:31 +00:00
cast<GcHashMap>(t, loader->map()),
reinterpret_cast<object>(c->name()),
2014-05-29 04:17:25 +00:00
reinterpret_cast<object>(c),
byteArrayHash);
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 23:24:24 +00:00
makeArrayClass(Thread* t, GcClassLoader* loader, GcByteArray* spec, bool throw_,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
{
PROTECT(t, loader);
PROTECT(t, spec);
2014-06-28 23:24:24 +00:00
const char* s = reinterpret_cast<const char*>(spec->body().begin());
const char* start = s;
unsigned dimensions = 0;
for (; *s == '['; ++s) ++ dimensions;
2014-06-28 23:24:24 +00:00
GcByteArray* elementSpec;
switch (*s) {
case 'L': {
++ s;
const char* elementSpecStart = s;
while (*s and *s != ';') ++ s;
if (dimensions > 1) {
elementSpecStart -= dimensions;
++ s;
}
2014-06-28 23:24:24 +00:00
elementSpec = makeByteArray(t, s - elementSpecStart + 1);
memcpy(elementSpec->body().begin(),
&spec->body()[elementSpecStart - start],
s - elementSpecStart);
2014-06-28 23:24:24 +00:00
elementSpec->body()[s - elementSpecStart] = 0;
} break;
default:
if (dimensions > 1) {
char c = *s;
2014-06-28 23:24:24 +00:00
elementSpec = makeByteArray(t, dimensions + 1);
unsigned i;
for (i = 0; i < dimensions - 1; ++i) {
2014-06-29 05:48:17 +00:00
elementSpec->body()[i] ='[';
}
2014-06-28 23:24:24 +00:00
elementSpec->body()[i++] = c;
elementSpec->body()[i] = 0;
-- dimensions;
} else {
abort(t);
}
}
2014-05-29 04:17:25 +00:00
GcClass* elementClass = cast<GcClass>(t, hashMapFind
2014-06-30 01:44:41 +00:00
(t, roots(t)->bootstrapClassMap(), reinterpret_cast<object>(elementSpec), byteArrayHash,
2014-05-29 04:17:25 +00:00
byteArrayEqual));
if (elementClass == 0) {
elementClass = resolveClass(t, loader, elementSpec, throw_, throwType);
if (elementClass == 0) return 0;
}
PROTECT(t, elementClass);
ACQUIRE(t, t->m->classLock);
2014-06-28 21:11:31 +00:00
GcClass* class_ = findLoadedClass(t, elementClass->loader(), spec);
if (class_) {
return class_;
}
class_ = makeArrayClass
2014-06-28 23:24:24 +00:00
(t, elementClass->loader(), dimensions, spec, elementClass);
PROTECT(t, class_);
2014-06-28 21:11:31 +00:00
saveLoadedClass(t, elementClass->loader(), class_);
return class_;
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 23:24:24 +00:00
resolveArrayClass(Thread* t, GcClassLoader* loader, GcByteArray* spec, bool throw_,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
{
2014-05-29 04:17:25 +00:00
GcClass* c = cast<GcClass>(t,
hashMapFind(t,
2014-06-30 01:44:41 +00:00
roots(t)->bootstrapClassMap(),
2014-06-28 23:24:24 +00:00
reinterpret_cast<object>(spec),
2014-05-29 04:17:25 +00:00
byteArrayHash,
byteArrayEqual));
if (c) {
2014-06-26 01:42:16 +00:00
c->setVirtualTable(t, type(t, GcJobject::Type)->virtualTable());
return c;
} else {
PROTECT(t, loader);
PROTECT(t, spec);
2014-06-30 01:44:41 +00:00
c = findLoadedClass(t, roots(t)->bootLoader(), spec);
if (c) {
return c;
} else {
return makeArrayClass(t, loader, spec, throw_, throwType);
}
}
}
void
removeMonitor(Thread* t, object o)
{
unsigned hash;
if (DebugMonitors) {
hash = objectHash(t, o);
}
object m = hashMapRemove
2014-06-30 01:44:41 +00:00
(t, roots(t)->monitorMap(), o, objectHash, objectEqual);
if (DebugMonitors) {
fprintf(stderr, "dispose monitor %p for object %x\n", m, hash);
}
}
2007-07-29 00:02:32 +00:00
void
removeString(Thread* t, object o)
{
2014-06-30 01:44:41 +00:00
hashMapRemove(t, roots(t)->stringMap(), o, stringHash, objectEqual);
}
void
2014-05-29 04:17:25 +00:00
bootClass(Thread* t, Gc::Type type, int superType, uint32_t objectMask,
unsigned fixedSize, unsigned arrayElementSize, unsigned vtableLength)
{
2014-05-29 04:17:25 +00:00
GcClass* super = (superType >= 0
? vm::type(t, static_cast<Gc::Type>(superType)) : 0);
2007-11-06 15:29:05 +00:00
2014-06-29 05:48:17 +00:00
GcIntArray* mask;
if (objectMask) {
2007-11-06 15:29:05 +00:00
if (super
2014-05-29 04:17:25 +00:00
and super->objectMask()
and super->objectMask()->body()[0]
2007-11-06 15:29:05 +00:00
== static_cast<int32_t>(objectMask))
{
2014-06-29 05:48:17 +00:00
mask = vm::type(t, static_cast<Gc::Type>(superType))->objectMask();
2007-11-06 15:29:05 +00:00
} else {
2014-06-29 05:48:17 +00:00
mask = makeIntArray(t, 1);
mask->body()[0] = objectMask;
2007-11-06 15:29:05 +00:00
}
} else {
mask = 0;
}
super = (superType >= 0
2014-05-29 04:17:25 +00:00
? vm::type(t, static_cast<Gc::Type>(superType)) : 0);
2014-05-29 04:17:25 +00:00
GcClass* class_ = t->m->processor->makeClass
2009-12-06 02:40:46 +00:00
(t, 0, BootstrapFlag, fixedSize, arrayElementSize,
2014-06-29 05:48:17 +00:00
arrayElementSize ? 1 : 0, 0, mask, 0, 0, super, 0, 0, 0, 0, 0, 0,
2014-06-30 01:44:41 +00:00
roots(t)->bootLoader(), vtableLength);
setType(t, type, class_);
}
void
2014-05-29 04:17:25 +00:00
bootJavaClass(Thread* t, Gc::Type type, int superType, const char* name,
2007-11-05 14:28:46 +00:00
int vtableLength, object bootMethod)
{
PROTECT(t, bootMethod);
2014-06-29 05:48:17 +00:00
GcByteArray* n = makeByteArray(t, name);
2010-12-10 05:17:57 +00:00
PROTECT(t, n);
2014-05-29 04:17:25 +00:00
GcClass* class_ = vm::type(t, type);
2010-12-10 05:17:57 +00:00
PROTECT(t, class_);
2014-06-26 01:42:16 +00:00
class_->setName(t, n);
2014-06-29 05:48:17 +00:00
GcArray* vtable;
2007-11-05 14:28:46 +00:00
if (vtableLength >= 0) {
2014-06-29 05:48:17 +00:00
vtable = makeArray(t, vtableLength);
2007-11-05 14:28:46 +00:00
for (int i = 0; i < vtableLength; ++ i) {
2014-06-26 01:42:16 +00:00
vtable->setBodyElement(t, i, bootMethod);
2007-11-05 14:28:46 +00:00
}
} else {
2014-06-29 05:48:17 +00:00
vtable = cast<GcArray>(t, vm::type(t, static_cast<Gc::Type>(superType))->virtualTable());
}
2014-06-26 01:42:16 +00:00
class_->setVirtualTable(t, reinterpret_cast<object>(vtable));
2007-12-11 21:26:59 +00:00
t->m->processor->initVtable(t, class_);
hashMapInsert
2014-06-30 01:44:41 +00:00
(t, roots(t)->bootstrapClassMap(), reinterpret_cast<object>(n), reinterpret_cast<object>(class_), byteArrayHash);
}
void
2014-05-29 04:17:25 +00:00
nameClass(Thread* t, Gc::Type type, const char* name)
{
2014-06-29 05:48:17 +00:00
GcByteArray* n = makeByteArray(t, name);
2014-06-26 01:42:16 +00:00
cast<GcClass>(t, t->m->types->body()[type])->setName(t, n);
}
void
makeArrayInterfaceTable(Thread* t)
{
2014-06-29 05:48:17 +00:00
GcArray* interfaceTable = makeArray(t, 4);
2014-06-26 01:42:16 +00:00
interfaceTable->setBodyElement(t, 0, reinterpret_cast<object>(type(t, GcSerializable::Type)));
interfaceTable->setBodyElement(t, 2, reinterpret_cast<object>(type(t, GcCloneable::Type)));
roots(t)->setArrayInterfaceTable(t, interfaceTable);
}
void
boot(Thread* t)
{
Machine* m = t->m;
m->unsafe = true;
2014-06-30 01:44:41 +00:00
m->roots = reinterpret_cast<GcRoots*>(allocate(t, GcRoots::FixedSize, true));
2014-06-30 01:44:41 +00:00
object classLoader = allocate(t, GcSystemClassLoader::FixedSize, true);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(t)->setBootLoader(t, reinterpret_cast<GcClassLoader*>(classLoader));
2014-06-30 01:44:41 +00:00
classLoader = allocate(t, GcSystemClassLoader::FixedSize, true);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(t)->setAppLoader(t, reinterpret_cast<GcClassLoader*>(classLoader));
2014-06-28 23:24:24 +00:00
m->types = reinterpret_cast<GcArray*>(allocate(t, pad((TypeCount + 2) * BytesPerWord), true));
m->types->length() = TypeCount;
#include "type-initializations.cpp"
2014-05-29 04:17:25 +00:00
GcClass* arrayClass = type(t, GcArray::Type);
2014-06-26 02:17:27 +00:00
setField(t, m->types, 0, arrayClass);
2014-06-30 01:44:41 +00:00
GcClass* rootsClass = type(t, GcRoots::Type);
2014-06-26 02:17:27 +00:00
setField(t, m->roots, 0, rootsClass);
2014-05-29 04:17:25 +00:00
GcClass* loaderClass = type(t, GcSystemClassLoader::Type);
2014-06-26 02:17:27 +00:00
setField(t, roots(t)->bootLoader(), 0, loaderClass);
setField(t, roots(t)->appLoader(), 0, loaderClass);
2014-05-29 04:17:25 +00:00
GcClass* objectClass = type(t, GcJobject::Type);
2014-05-29 04:17:25 +00:00
GcClass* classClass = type(t, GcClass::Type);
2014-06-26 02:17:27 +00:00
setField(t, classClass, 0, classClass);
2014-06-26 01:42:16 +00:00
classClass->setSuper(t, objectClass);
2014-05-29 04:17:25 +00:00
GcClass* intArrayClass = type(t, GcIntArray::Type);
2014-06-26 02:17:27 +00:00
setField(t, intArrayClass, 0, classClass);
2014-06-26 01:42:16 +00:00
intArrayClass->setSuper(t, objectClass);
m->unsafe = false;
2014-05-29 04:17:25 +00:00
type(t, GcSingleton::Type)->vmFlags()
|= SingletonFlag;
2014-05-29 04:17:25 +00:00
type(t, GcContinuation::Type)->vmFlags()
2009-05-03 20:57:11 +00:00
|= ContinuationFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJreference::Type)->vmFlags()
|= ReferenceFlag;
2014-05-29 04:17:25 +00:00
type(t, GcWeakReference::Type)->vmFlags()
|= ReferenceFlag | WeakReferenceFlag;
2014-05-29 04:17:25 +00:00
type(t, GcSoftReference::Type)->vmFlags()
|= ReferenceFlag | WeakReferenceFlag;
2014-05-29 04:17:25 +00:00
type(t, GcPhantomReference::Type)->vmFlags()
|= ReferenceFlag | WeakReferenceFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJboolean::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJbyte::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJchar::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJshort::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJint::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJlong::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJfloat::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJdouble::Type)->vmFlags()
|= PrimitiveFlag;
2014-05-29 04:17:25 +00:00
type(t, GcJvoid::Type)->vmFlags()
|= PrimitiveFlag;
2014-06-26 01:42:16 +00:00
type(t, GcBooleanArray::Type)->setArrayElementClass(t, type(t, GcJboolean::Type));
type(t, GcByteArray::Type)->setArrayElementClass(t, type(t, GcJbyte::Type));
type(t, GcCharArray::Type)->setArrayElementClass(t, type(t, GcJchar::Type));
type(t, GcShortArray::Type)->setArrayElementClass(t, type(t, GcJshort::Type));
type(t, GcIntArray::Type)->setArrayElementClass(t, type(t, GcJint::Type));
type(t, GcLongArray::Type)->setArrayElementClass(t, type(t, GcJlong::Type));
type(t, GcFloatArray::Type)->setArrayElementClass(t, type(t, GcJfloat::Type));
type(t, GcDoubleArray::Type)->setArrayElementClass(t, type(t, GcJdouble::Type));
2014-05-29 04:17:25 +00:00
{ GcHashMap* map = makeHashMap(t, 0, 0);
2014-06-26 01:42:16 +00:00
roots(t)->bootLoader()->setMap(t, reinterpret_cast<object>(map));
}
2014-06-30 01:44:41 +00:00
roots(t)->bootLoader()->as<GcSystemClassLoader>(t)->finder() = m->bootFinder;
2014-05-29 04:17:25 +00:00
{ GcHashMap* map = makeHashMap(t, 0, 0);
2014-06-26 01:42:16 +00:00
roots(t)->appLoader()->setMap(t, reinterpret_cast<object>(map));
}
2014-06-30 01:44:41 +00:00
roots(t)->appLoader()->as<GcSystemClassLoader>(t)->finder() = m->appFinder;
2014-06-26 01:42:16 +00:00
roots(t)->appLoader()->setParent(t, roots(t)->bootLoader());
2014-06-30 01:44:41 +00:00
{
GcHashMap* map = makeHashMap(t, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(t)->setBootstrapClassMap(t, map);
2014-06-30 01:44:41 +00:00
}
2014-06-30 01:44:41 +00:00
{
GcWeakHashMap* map = makeWeakHashMap(t, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(t)->setStringMap(t, map->as<GcHashMap>(t));
2014-06-30 01:44:41 +00:00
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
makeArrayInterfaceTable(t);
2013-02-21 22:37:17 +00:00
2014-06-26 01:42:16 +00:00
type(t, GcBooleanArray::Type)->setInterfaceTable(t, reinterpret_cast<object>(roots(t)->arrayInterfaceTable()));
type(t, GcByteArray::Type)->setInterfaceTable(t, reinterpret_cast<object>(roots(t)->arrayInterfaceTable()));
type(t, GcCharArray::Type)->setInterfaceTable(t, reinterpret_cast<object>(roots(t)->arrayInterfaceTable()));
type(t, GcShortArray::Type)->setInterfaceTable(t, reinterpret_cast<object>(roots(t)->arrayInterfaceTable()));
type(t, GcIntArray::Type)->setInterfaceTable(t, reinterpret_cast<object>(roots(t)->arrayInterfaceTable()));
type(t, GcLongArray::Type)->setInterfaceTable(t, reinterpret_cast<object>(roots(t)->arrayInterfaceTable()));
type(t, GcFloatArray::Type)->setInterfaceTable(t, reinterpret_cast<object>(roots(t)->arrayInterfaceTable()));
type(t, GcDoubleArray::Type)->setInterfaceTable(t, reinterpret_cast<object>(roots(t)->arrayInterfaceTable()));
2013-02-21 22:37:17 +00:00
m->processor->boot(t, 0, 0);
2008-12-04 02:09:57 +00:00
2014-06-29 05:48:17 +00:00
{ GcCode* bootCode = makeCode(t, 0, 0, 0, 0, 0, 0, 0, 0, 1);
bootCode->body()[0] = impdep1;
2014-05-29 04:17:25 +00:00
object bootMethod = reinterpret_cast<object>(makeMethod
2014-06-29 05:48:17 +00:00
(t, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, bootCode));
PROTECT(t, bootMethod);
#include "type-java-initializations.cpp"
//#ifdef AVIAN_HEAPDUMP
# include "type-name-initializations.cpp"
//#endif
}
}
class HeapClient: public Heap::Client {
public:
HeapClient(Machine* m): m(m) { }
virtual void visitRoots(Heap::Visitor* v) {
::visitRoots(m, v);
postVisit(m->rootThread, v);
}
virtual void collect(void* context, Heap::CollectionType type) {
collect(static_cast<Thread*>(context), type);
}
virtual bool isFixed(void* p) {
return objectFixed(m->rootThread, static_cast<object>(p));
}
virtual unsigned sizeInWords(void* p) {
Thread* t = m->rootThread;
2013-02-11 00:14:16 +00:00
object o = static_cast<object>(m->heap->follow(maskAlignedPointer(p)));
2014-06-29 05:48:17 +00:00
unsigned n = baseSize(t, o, m->heap->follow(objectClass(t, o)));
if (objectExtended(t, o)) {
++ n;
}
return n;
}
virtual unsigned copiedSizeInWords(void* p) {
Thread* t = m->rootThread;
2013-02-11 00:14:16 +00:00
object o = static_cast<object>(m->heap->follow(maskAlignedPointer(p)));
assertT(t, not objectFixed(t, o));
2014-06-29 05:48:17 +00:00
unsigned n = baseSize(t, o, m->heap->follow(objectClass(t, o)));
if (objectExtended(t, o) or hashTaken(t, o)) {
++ n;
}
return n;
}
virtual void copy(void* srcp, void* dstp) {
Thread* t = m->rootThread;
2013-02-11 00:14:16 +00:00
object src = static_cast<object>(m->heap->follow(maskAlignedPointer(srcp)));
assertT(t, not objectFixed(t, src));
2014-06-29 05:48:17 +00:00
GcClass* class_ = m->heap->follow(objectClass(t, src));
unsigned base = baseSize(t, src, class_);
unsigned n = extendedSize(t, src, base);
object dst = static_cast<object>(dstp);
memcpy(dst, src, n * BytesPerWord);
if (hashTaken(t, src)) {
alias(dst, 0) &= PointerMask;
alias(dst, 0) |= ExtendedMark;
extendedWord(t, dst, base) = takeHash(t, src);
}
}
virtual void walk(void* p, Heap::Walker* w) {
2013-02-11 00:14:16 +00:00
object o = static_cast<object>(m->heap->follow(maskAlignedPointer(p)));
::walk(m->rootThread, w, o, 0);
}
void dispose() {
2008-04-13 18:15:04 +00:00
m->heap->free(this, sizeof(*this));
}
private:
Machine* m;
};
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
void
doCollect(Thread* t, Heap::CollectionType type, int pendingAllocation)
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
{
expect(t, not t->m->collecting);
t->m->collecting = true;
THREAD_RESOURCE0(t, t->m->collecting = false);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
#ifdef VM_STRESS
bool stress = (t->flags & Thread::StressFlag) != 0;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (not stress) atomicOr(&(t->flags), Thread::StressFlag);
#endif
Machine* m = t->m;
m->unsafe = true;
m->heap->collect(type, footprint(m->rootThread), pendingAllocation
- (t->m->heapPoolIndex * ThreadHeapSizeInWords));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
m->unsafe = false;
postCollect(m->rootThread);
killZombies(t, m->rootThread);
for (unsigned i = 0; i < m->heapPoolIndex; ++i) {
m->heap->free(m->heapPool[i], ThreadHeapSizeInBytes);
}
m->heapPoolIndex = 0;
if (m->heap->limitExceeded()) {
// if we're out of memory, disallow further allocations of fixed
// objects:
m->fixedFootprint = FixedFootprintThresholdInBytes;
} else {
m->fixedFootprint = 0;
}
#ifdef VM_STRESS
if (not stress) atomicAnd(&(t->flags), ~Thread::StressFlag);
#endif
2014-06-29 05:48:17 +00:00
GcFinalizer* finalizeQueue = t->m->finalizeQueue;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
t->m->finalizeQueue = 0;
2014-06-29 05:48:17 +00:00
for (; finalizeQueue; finalizeQueue = cast<GcFinalizer>(t, finalizeQueue->next())) {
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
void (*function)(Thread*, object);
2014-06-29 05:48:17 +00:00
memcpy(&function, &finalizeQueue->finalize(), BytesPerWord);
function(t, finalizeQueue->target());
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
2014-06-30 01:44:41 +00:00
if ((roots(t)->objectsToFinalize() or roots(t)->objectsToClean())
and m->finalizeThread == 0
and t->state != Thread::ExitState)
{
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
m->finalizeThread = m->processor->makeThread
2014-06-30 01:44:41 +00:00
(m, roots(t)->finalizerThread(), m->rootThread);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
addThread(t, m->finalizeThread);
if (not startThread(t, m->finalizeThread)) {
removeThread(t, m->finalizeThread);
m->finalizeThread = 0;
}
}
}
uint64_t
invokeLoadClass(Thread* t, uintptr_t* arguments)
{
2014-05-29 04:17:25 +00:00
GcMethod* method = cast<GcMethod>(t, reinterpret_cast<object>(arguments[0]));
object loader = reinterpret_cast<object>(arguments[1]);
object specString = reinterpret_cast<object>(arguments[2]);
return reinterpret_cast<uintptr_t>
(t->m->processor->invoke(t, method, loader, specString));
}
bool
2014-05-29 04:17:25 +00:00
isInitializing(Thread* t, GcClass* c)
{
for (Thread::ClassInitStack* s = t->classInitStack; s; s = s->next) {
if (s->class_ == c) {
return true;
}
}
return false;
}
object
2014-06-29 05:48:17 +00:00
findInTable(Thread* t, GcArray* table, GcByteArray* name, GcByteArray* spec,
2014-06-26 01:42:16 +00:00
GcByteArray* (*getName)(Thread*, object),
GcByteArray* (*getSpec)(Thread*, object))
{
if (table) {
2014-06-29 05:48:17 +00:00
for (unsigned i = 0; i < table->length(); ++i) {
object o = table->body()[i];
if (vm::strcmp(getName(t, o)->body().begin(),
name->body().begin()) == 0 and
vm::strcmp(getSpec(t, o)->body().begin(),
2014-06-28 23:24:24 +00:00
spec->body().begin()) == 0)
{
return o;
}
}
// fprintf(stderr, "%s %s not in\n",
2014-06-29 05:48:17 +00:00
// name->body().begin(),
// spec->body().begin();
// for (unsigned i = 0; i < arrayLength(t, table); ++i) {
// object o = arrayBody(t, table, i);
// fprintf(stderr, "\t%s %s\n",
// &byteArrayBody(t, getName(t, o), 0),
// &byteArrayBody(t, getSpec(t, o), 0));
// }
}
return 0;
}
void
2014-05-29 04:17:25 +00:00
updatePackageMap(Thread* t, GcClass* class_)
{
PROTECT(t, class_);
2014-06-30 01:44:41 +00:00
if (roots(t)->packageMap() == 0) {
GcHashMap* map = makeHashMap(t, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(t)->setPackageMap(t, map);
}
2014-06-29 05:48:17 +00:00
GcByteArray* className = class_->name();
if ('[' != className->body()[0]) {
THREAD_RUNTIME_ARRAY
2014-06-29 05:48:17 +00:00
(t, char, packageName, className->length());
2014-06-29 05:48:17 +00:00
char* s = reinterpret_cast<char*>(className->body().begin());
char* p = strrchr(s, '/');
if (p) {
int length = (p - s) + 1;
memcpy(RUNTIME_ARRAY_BODY(packageName),
2014-06-29 05:48:17 +00:00
className->body().begin(),
length);
RUNTIME_ARRAY_BODY(packageName)[length] = 0;
2014-06-29 05:48:17 +00:00
GcByteArray* key = vm::makeByteArray
(t, "%s", RUNTIME_ARRAY_BODY(packageName));
PROTECT(t, key);
hashMapRemove
2014-06-30 01:44:41 +00:00
(t, roots(t)->packageMap(), reinterpret_cast<object>(key), byteArrayHash,
byteArrayEqual);
2014-06-29 05:48:17 +00:00
GcByteArray* source = class_->source();
if (source) {
// note that we strip the "file:" prefix, since OpenJDK's
// Package.defineSystemPackage expects an unadorned filename:
const unsigned PrefixLength = 5;
2014-06-29 05:48:17 +00:00
unsigned sourceNameLength = source->length()
- PrefixLength;
THREAD_RUNTIME_ARRAY(t, char, sourceName, sourceNameLength);
memcpy(RUNTIME_ARRAY_BODY(sourceName),
2014-06-29 05:48:17 +00:00
&source->body()[PrefixLength],
sourceNameLength);
2014-06-29 05:48:17 +00:00
source = vm::makeByteArray(t, "%s", RUNTIME_ARRAY_BODY(sourceName));
} else {
2014-06-29 05:48:17 +00:00
source = vm::makeByteArray(t, "avian-dummy-package-source");
}
hashMapInsert
2014-06-30 01:44:41 +00:00
(t, roots(t)->packageMap(), reinterpret_cast<object>(key), reinterpret_cast<object>(source), byteArrayHash);
}
}
}
2007-07-06 23:50:26 +00:00
} // namespace
namespace vm {
Machine::Machine(System* system, Heap* heap, Finder* bootFinder,
Finder* appFinder, Processor* processor, Classpath* classpath,
const char** properties, unsigned propertyCount,
2012-03-14 18:36:42 +00:00
const char** arguments, unsigned argumentCount,
unsigned stackSizeInBytes):
vtable(&javaVMVTable),
2007-07-06 23:50:26 +00:00
system(system),
2008-04-13 18:15:04 +00:00
heapClient(new (heap->allocate(sizeof(HeapClient)))
HeapClient(this)),
heap(heap),
bootFinder(bootFinder),
appFinder(appFinder),
processor(processor),
classpath(classpath),
2007-07-06 23:50:26 +00:00
rootThread(0),
exclusive(0),
finalizeThread(0),
2007-09-07 23:20:21 +00:00
jniReferences(0),
propertyCount(propertyCount),
arguments(arguments),
argumentCount(argumentCount),
threadCount(0),
2007-07-06 23:50:26 +00:00
activeCount(0),
liveCount(0),
daemonCount(0),
fixedFootprint(0),
2012-03-14 18:36:42 +00:00
stackSizeInBytes(stackSizeInBytes),
localThread(0),
2007-07-06 23:50:26 +00:00
stateLock(0),
heapLock(0),
classLock(0),
referenceLock(0),
shutdownLock(0),
libraries(0),
errorLog(0),
2012-06-02 21:43:42 +00:00
bootimage(0),
2007-07-06 23:50:26 +00:00
types(0),
roots(0),
2007-07-06 23:50:26 +00:00
finalizers(0),
tenuredFinalizers(0),
finalizeQueue(0),
weakReferences(0),
tenuredWeakReferences(0),
unsafe(false),
collecting(false),
triedBuiltinOnLoad(false),
dumpedHeapOnOOM(false),
alive(true),
heapPoolIndex(0)
2007-07-06 23:50:26 +00:00
{
heap->setClient(heapClient);
populateJNITables(&javaVMVTable, &jniEnvVTable);
2007-07-06 23:50:26 +00:00
// Copying the properties memory (to avoid memory crashes)
this->properties = (char**)heap->allocate(sizeof(char*) * propertyCount);
for (unsigned int i = 0; i < propertyCount; i++)
{
size_t length = strlen(properties[i]) + 1; // +1 for null-terminating char
this->properties[i] = (char*)heap->allocate(sizeof(char) * length);
memcpy(this->properties[i], properties[i], length);
}
2014-04-09 12:02:48 +00:00
const char* bootstrapProperty = findProperty(this, BOOTSTRAP_PROPERTY);
const char* bootstrapPropertyDup = bootstrapProperty ? strdup(bootstrapProperty) : 0;
const char* bootstrapPropertyEnd = bootstrapPropertyDup + (bootstrapPropertyDup ? strlen(bootstrapPropertyDup) : 0);
char* codeLibraryName = (char*)bootstrapPropertyDup;
char* codeLibraryNameEnd = 0;
if (codeLibraryName && (codeLibraryNameEnd = strchr(codeLibraryName, system->pathSeparator())))
*codeLibraryNameEnd = 0;
if (not system->success(system->make(&localThread)) or
not system->success(system->make(&stateLock)) or
2007-07-06 23:50:26 +00:00
not system->success(system->make(&heapLock)) or
not system->success(system->make(&classLock)) or
not system->success(system->make(&referenceLock)) or
not system->success(system->make(&shutdownLock)) or
not system->success
(system->load(&libraries, bootstrapPropertyDup)))
2007-07-06 23:50:26 +00:00
{
system->abort();
}
System::Library* additionalLibrary = 0;
while (codeLibraryNameEnd && codeLibraryNameEnd + 1 < bootstrapPropertyEnd) {
codeLibraryName = codeLibraryNameEnd + 1;
codeLibraryNameEnd = strchr(codeLibraryName, system->pathSeparator());
if (codeLibraryNameEnd)
*codeLibraryNameEnd = 0;
if (!system->success(system->load(&additionalLibrary, codeLibraryName)))
system->abort();
libraries->setNext(additionalLibrary);
}
if(bootstrapPropertyDup)
free((void*)bootstrapPropertyDup);
2007-07-06 23:50:26 +00:00
}
void
Machine::dispose()
{
localThread->dispose();
2007-07-06 23:50:26 +00:00
stateLock->dispose();
heapLock->dispose();
classLock->dispose();
referenceLock->dispose();
shutdownLock->dispose();
2007-07-06 23:50:26 +00:00
if (libraries) {
libraries->disposeAll();
2007-07-06 23:50:26 +00:00
}
2007-09-07 23:20:21 +00:00
for (Reference* r = jniReferences; r;) {
Reference* tmp = r;
2007-09-07 23:20:21 +00:00
r = r->next;
2008-04-13 18:15:04 +00:00
heap->free(tmp, sizeof(*tmp));
2007-09-07 23:20:21 +00:00
}
2007-10-28 01:54:30 +00:00
for (unsigned i = 0; i < heapPoolIndex; ++i) {
heap->free(heapPool[i], ThreadHeapSizeInBytes);
2007-10-28 01:54:30 +00:00
}
if (bootimage) {
heap->free(bootimage, bootimageSize);
}
heap->free(arguments, sizeof(const char*) * argumentCount);
for (unsigned int i = 0; i < propertyCount; i++)
{
heap->free(properties[i], sizeof(char) * (strlen(properties[i]) + 1));
}
heap->free(properties, sizeof(const char*) * propertyCount);
static_cast<HeapClient*>(heapClient)->dispose();
2008-04-13 18:15:04 +00:00
heap->free(this, sizeof(*this));
2007-07-06 23:50:26 +00:00
}
2014-06-28 23:24:24 +00:00
Thread::Thread(Machine* m, GcThread* javaThread, Thread* parent):
2007-07-06 23:50:26 +00:00
vtable(&(m->jniEnvVTable)),
m(m),
2007-07-07 18:09:16 +00:00
parent(parent),
peer(0),
2007-07-06 23:50:26 +00:00
child(0),
waitNext(0),
2007-07-06 23:50:26 +00:00
state(NoState),
2007-09-07 23:20:21 +00:00
criticalLevel(0),
2007-07-07 18:09:16 +00:00
systemThread(0),
lock(0),
2007-07-07 18:09:16 +00:00
javaThread(javaThread),
2007-07-06 23:50:26 +00:00
exception(0),
heapIndex(0),
heapOffset(0),
2007-07-28 21:28:25 +00:00
protector(0),
classInitStack(0),
libraryLoadStack(0),
runnable(this),
defaultHeap(static_cast<uintptr_t*>
(m->heap->allocate(ThreadHeapSizeInBytes))),
2008-04-09 19:08:13 +00:00
heap(defaultHeap),
backupHeapIndex(0),
flags(ActiveFlag)
{ }
void
Thread::init()
2007-07-06 23:50:26 +00:00
{
memset(defaultHeap, 0, ThreadHeapSizeInBytes);
memset(backupHeap, 0, ThreadBackupHeapSizeInBytes);
2007-07-07 18:09:16 +00:00
if (parent == 0) {
assertT(this, m->rootThread == 0);
assertT(this, javaThread == 0);
2007-07-07 18:09:16 +00:00
2007-07-06 23:50:26 +00:00
m->rootThread = this;
m->unsafe = true;
2007-07-28 21:28:25 +00:00
if (not m->system->success(m->system->attach(&runnable))) {
2007-07-07 18:09:16 +00:00
abort(this);
}
BootImage* image = 0;
uint8_t* code = 0;
const char* imageFunctionName = findProperty(m, "avian.bootimage");
if (imageFunctionName) {
bool lzma = strncmp("lzma:", imageFunctionName, 5) == 0;
const char* symbolName
= lzma ? imageFunctionName + 5 : imageFunctionName;
void* imagep = m->libraries->resolve(symbolName);
if (imagep) {
uint8_t* (*imageFunction)(unsigned*);
memcpy(&imageFunction, &imagep, BytesPerWord);
2007-07-06 23:50:26 +00:00
unsigned size;
uint8_t* imageBytes = imageFunction(&size);
if (lzma) {
#ifdef AVIAN_USE_LZMA
m->bootimage = image = reinterpret_cast<BootImage*>
(decodeLZMA
(m->system, m->heap, imageBytes, size, &(m->bootimageSize)));
#else
abort(this);
#endif
} else {
image = reinterpret_cast<BootImage*>(imageBytes);
}
const char* codeFunctionName = findProperty(m, "avian.codeimage");
if (codeFunctionName) {
void* codep = m->libraries->resolve(codeFunctionName);
if (codep) {
uint8_t* (*codeFunction)(unsigned*);
memcpy(&codeFunction, &codep, BytesPerWord);
code = codeFunction(&size);
}
}
}
}
2007-07-06 23:50:26 +00:00
m->unsafe = false;
enter(this, ActiveState);
2011-10-03 14:04:58 +00:00
if (image and code) {
m->processor->boot(this, image, code);
makeArrayInterfaceTable(this);
} else {
boot(this);
}
2007-07-30 23:19:05 +00:00
2014-06-30 01:44:41 +00:00
GcWeakHashMap* map = makeWeakHashMap(this, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(this)->setByteArrayMap(this, map->as<GcHashMap>(this));
2014-06-30 01:44:41 +00:00
map = makeWeakHashMap(this, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(this)->setMonitorMap(this, map->as<GcHashMap>(this));
2014-06-30 01:44:41 +00:00
GcVector* v = makeVector(this, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(this)->setClassRuntimeDataTable(this, v);
2014-06-30 01:44:41 +00:00
v = makeVector(this, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(this)->setMethodRuntimeDataTable(this, v);
2014-06-30 01:44:41 +00:00
v = makeVector(this, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(this)->setJNIMethodTable(this, v);
2014-06-30 01:44:41 +00:00
v = makeVector(this, 0, 0);
// sequence point, for gc (don't recombine statements)
2014-06-26 01:42:16 +00:00
roots(this)->setJNIFieldTable(this, v);
m->localThread->set(this);
2007-07-07 18:09:16 +00:00
}
expect(this, m->system->success(m->system->make(&lock)));
2007-07-07 18:09:16 +00:00
}
void
Thread::exit()
{
if (state != Thread::ExitState and
state != Thread::ZombieState)
{
enter(this, Thread::ExclusiveState);
if (m->liveCount == 1) {
turnOffTheLights(this);
2007-07-07 18:09:16 +00:00
} else {
2014-06-28 23:24:24 +00:00
javaThread->peer() = 0;
enter(this, Thread::ZombieState);
2007-07-07 18:09:16 +00:00
}
2007-07-06 23:50:26 +00:00
}
}
void
Thread::dispose()
{
if (lock) {
lock->dispose();
}
if (systemThread) {
systemThread->dispose();
2007-07-07 18:09:16 +00:00
}
-- m->threadCount;
m->heap->free(defaultHeap, ThreadHeapSizeInBytes);
2007-07-17 00:23:23 +00:00
m->processor->dispose(this);
2007-07-06 23:50:26 +00:00
}
2007-07-07 18:09:16 +00:00
void
shutDown(Thread* t)
2007-07-07 18:09:16 +00:00
{
ACQUIRE(t, t->m->shutdownLock);
2007-07-07 18:09:16 +00:00
2014-06-30 01:44:41 +00:00
GcPair* hooks = roots(t)->shutdownHooks();
PROTECT(t, hooks);
2007-07-07 18:09:16 +00:00
2014-06-26 01:42:16 +00:00
roots(t)->setShutdownHooks(t, 0);
2007-07-17 01:55:49 +00:00
2014-06-29 05:48:17 +00:00
GcPair* h = hooks;
PROTECT(t, h);
2014-06-29 05:48:17 +00:00
for (; h; h = cast<GcPair>(t, h->second())) {
startThread(t, cast<GcThread>(t, h->first()));
2007-07-07 18:09:16 +00:00
}
// wait for hooks to exit
h = hooks;
2014-06-29 05:48:17 +00:00
for (; h; h = cast<GcPair>(t, h->second())) {
while (true) {
2014-06-29 05:48:17 +00:00
Thread* ht = reinterpret_cast<Thread*>(cast<GcThread>(t, h->first())->peer());
2007-07-17 01:55:49 +00:00
{ ACQUIRE(t, t->m->stateLock);
2007-07-11 04:19:26 +00:00
if (ht == 0
or ht->state == Thread::ZombieState
or ht->state == Thread::JoinedState)
{
break;
} else {
ENTER(t, Thread::IdleState);
t->m->stateLock->wait(t->systemThread, 0);
}
}
}
}
// tell finalize thread to exit and wait for it to do so
{ ACQUIRE(t, t->m->stateLock);
Thread* finalizeThread = t->m->finalizeThread;
if (finalizeThread) {
t->m->finalizeThread = 0;
t->m->stateLock->notifyAll(t->systemThread);
while (finalizeThread->state != Thread::ZombieState
and finalizeThread->state != Thread::JoinedState)
{
ENTER(t, Thread::IdleState);
t->m->stateLock->wait(t->systemThread, 0);
}
}
}
// interrupt daemon threads and tell them to die
// todo: be more aggressive about killing daemon threads, e.g. at
// any GC point, not just at waits/sleeps
{ ACQUIRE(t, t->m->stateLock);
t->m->alive = false;
visitAll(t, t->m->rootThread, interruptDaemon);
}
2007-07-07 18:09:16 +00:00
}
2007-07-06 23:50:26 +00:00
void
enter(Thread* t, Thread::State s)
{
stress(t);
2007-07-06 23:50:26 +00:00
if (s == t->state) return;
2007-07-18 01:33:00 +00:00
if (t->state == Thread::ExitState) {
// once in exit state, we stay that way
return;
}
#ifdef USE_ATOMIC_OPERATIONS
# define INCREMENT atomicIncrement
# define ACQUIRE_LOCK ACQUIRE_RAW(t, t->m->stateLock)
# define STORE_LOAD_MEMORY_BARRIER storeLoadMemoryBarrier()
#else
# define INCREMENT(pointer, value) *(pointer) += value;
# define ACQUIRE_LOCK
# define STORE_LOAD_MEMORY_BARRIER
ACQUIRE_RAW(t, t->m->stateLock);
#endif // not USE_ATOMIC_OPERATIONS
2007-07-06 23:50:26 +00:00
switch (s) {
case Thread::ExclusiveState: {
ACQUIRE_LOCK;
while (t->m->exclusive) {
2007-07-06 23:50:26 +00:00
// another thread got here first.
2007-07-07 18:09:16 +00:00
ENTER(t, Thread::IdleState);
t->m->stateLock->wait(t->systemThread, 0);
2007-07-06 23:50:26 +00:00
}
switch (t->state) {
case Thread::ActiveState: break;
case Thread::IdleState: {
INCREMENT(&(t->m->activeCount), 1);
} break;
default: abort(t);
}
2007-07-06 23:50:26 +00:00
t->state = Thread::ExclusiveState;
t->m->exclusive = t;
STORE_LOAD_MEMORY_BARRIER;
while (t->m->activeCount > 1) {
t->m->stateLock->wait(t->systemThread, 0);
2007-07-06 23:50:26 +00:00
}
} break;
case Thread::IdleState:
if (LIKELY(t->state == Thread::ActiveState)) {
// fast path
assertT(t, t->m->activeCount > 0);
INCREMENT(&(t->m->activeCount), -1);
t->state = s;
if (t->m->exclusive) {
ACQUIRE_LOCK;
t->m->stateLock->notifyAll(t->systemThread);
}
break;
} else {
// fall through to slow path
}
2007-07-06 23:50:26 +00:00
case Thread::ZombieState: {
ACQUIRE_LOCK;
2007-07-06 23:50:26 +00:00
switch (t->state) {
case Thread::ExclusiveState: {
assertT(t, t->m->exclusive == t);
t->m->exclusive = 0;
2007-07-06 23:50:26 +00:00
} break;
case Thread::ActiveState: break;
default: abort(t);
}
assertT(t, t->m->activeCount > 0);
INCREMENT(&(t->m->activeCount), -1);
2007-07-18 01:33:00 +00:00
2007-07-06 23:50:26 +00:00
if (s == Thread::ZombieState) {
assertT(t, t->m->liveCount > 0);
-- t->m->liveCount;
if (t->flags & Thread::DaemonFlag) {
-- t->m->daemonCount;
}
2007-07-06 23:50:26 +00:00
}
2007-07-06 23:50:26 +00:00
t->state = s;
t->m->stateLock->notifyAll(t->systemThread);
2007-07-06 23:50:26 +00:00
} break;
case Thread::ActiveState:
if (LIKELY(t->state == Thread::IdleState and t->m->exclusive == 0)) {
// fast path
INCREMENT(&(t->m->activeCount), 1);
t->state = s;
if (t->m->exclusive) {
// another thread has entered the exclusive state, so we
// return to idle and use the slow path to become active
enter(t, Thread::IdleState);
} else {
break;
}
}
{ ACQUIRE_LOCK;
2007-07-06 23:50:26 +00:00
switch (t->state) {
case Thread::ExclusiveState: {
assertT(t, t->m->exclusive == t);
2007-07-06 23:50:26 +00:00
t->state = s;
t->m->exclusive = 0;
t->m->stateLock->notifyAll(t->systemThread);
} break;
case Thread::NoState:
case Thread::IdleState: {
while (t->m->exclusive) {
t->m->stateLock->wait(t->systemThread, 0);
}
INCREMENT(&(t->m->activeCount), 1);
if (t->state == Thread::NoState) {
++ t->m->liveCount;
++ t->m->threadCount;
}
t->state = s;
} break;
2007-07-06 23:50:26 +00:00
default: abort(t);
2007-07-06 23:50:26 +00:00
}
} break;
case Thread::ExitState: {
ACQUIRE_LOCK;
2007-07-06 23:50:26 +00:00
switch (t->state) {
case Thread::ExclusiveState: {
assertT(t, t->m->exclusive == t);
// exit state should also be exclusive, so don't set exclusive = 0
2007-11-27 22:23:00 +00:00
t->m->stateLock->notifyAll(t->systemThread);
2007-07-06 23:50:26 +00:00
} break;
case Thread::ActiveState: break;
default: abort(t);
}
2007-07-18 01:33:00 +00:00
assertT(t, t->m->activeCount > 0);
INCREMENT(&(t->m->activeCount), -1);
2007-07-18 01:33:00 +00:00
2007-07-06 23:50:26 +00:00
t->state = s;
while (t->m->liveCount - t->m->daemonCount > 1) {
t->m->stateLock->wait(t->systemThread, 0);
2007-07-06 23:50:26 +00:00
}
} break;
default: abort(t);
}
}
object
allocate2(Thread* t, unsigned sizeInBytes, bool objectMask)
{
return allocate3
(t, t->m->heap,
2013-02-11 01:06:15 +00:00
ceilingDivide(sizeInBytes, BytesPerWord) > ThreadHeapSizeInWords ?
Machine::FixedAllocation : Machine::MovableAllocation,
2008-04-13 18:15:04 +00:00
sizeInBytes, objectMask);
}
object
allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
2008-04-13 18:15:04 +00:00
unsigned sizeInBytes, bool objectMask)
2007-10-28 01:54:30 +00:00
{
expect(t, t->criticalLevel == 0);
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
2013-02-11 01:06:15 +00:00
expect(t, t->backupHeapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
<= ThreadBackupHeapSizeInWords);
2008-04-09 19:08:13 +00:00
object o = reinterpret_cast<object>(t->backupHeap + t->backupHeapIndex);
2013-02-11 01:06:15 +00:00
t->backupHeapIndex += ceilingDivide(sizeInBytes, BytesPerWord);
2013-02-11 00:38:51 +00:00
fieldAtOffset<object>(o, 0) = 0;
2008-04-09 19:08:13 +00:00
return o;
} else if (UNLIKELY(t->flags & Thread::TracingFlag)) {
2013-02-11 01:06:15 +00:00
expect(t, t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
<= ThreadHeapSizeInWords);
return allocateSmall(t, sizeInBytes);
2008-04-09 19:08:13 +00:00
}
ACQUIRE_RAW(t, t->m->stateLock);
2007-07-06 23:50:26 +00:00
while (t->m->exclusive and t->m->exclusive != t) {
2007-07-06 23:50:26 +00:00
// another thread wants to enter the exclusive state, either for a
// collection or some other reason. We give it a chance here.
2007-07-07 18:09:16 +00:00
ENTER(t, Thread::IdleState);
while (t->m->exclusive) {
t->m->stateLock->wait(t->systemThread, 0);
}
2007-07-06 23:50:26 +00:00
}
do {
switch (type) {
case Machine::MovableAllocation:
2013-02-11 01:06:15 +00:00
if (t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
> ThreadHeapSizeInWords)
{
t->heap = 0;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if ((not t->m->heap->limitExceeded())
and t->m->heapPoolIndex < ThreadHeapPoolSize)
{
t->heap = static_cast<uintptr_t*>
(t->m->heap->tryAllocate(ThreadHeapSizeInBytes));
if (t->heap) {
memset(t->heap, 0, ThreadHeapSizeInBytes);
t->m->heapPool[t->m->heapPoolIndex++] = t->heap;
t->heapOffset += t->heapIndex;
t->heapIndex = 0;
}
}
}
break;
2007-07-06 23:50:26 +00:00
case Machine::FixedAllocation:
if (t->m->fixedFootprint + sizeInBytes > FixedFootprintThresholdInBytes)
{
t->heap = 0;
}
break;
case Machine::ImmortalAllocation:
break;
}
int pendingAllocation = t->m->heap->fixedFootprint
(ceilingDivide(sizeInBytes, BytesPerWord), objectMask);
if (t->heap == 0 or t->m->heap->limitExceeded(pendingAllocation)) {
// fprintf(stderr, "gc");
// vmPrintTrace(t);
collect(t, Heap::MinorCollection, pendingAllocation);
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (t->m->heap->limitExceeded(pendingAllocation)) {
2014-06-30 01:44:41 +00:00
throw_(t, roots(t)->outOfMemoryError());
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
} while (type == Machine::MovableAllocation
2013-02-11 01:06:15 +00:00
and t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
> ThreadHeapSizeInWords);
switch (type) {
case Machine::MovableAllocation: {
return allocateSmall(t, sizeInBytes);
}
2007-07-06 23:50:26 +00:00
case Machine::FixedAllocation: {
object o = static_cast<object>
(t->m->heap->allocateFixed
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask));
memset(o, 0, sizeInBytes);
alias(o, 0) = FixedMark;
t->m->fixedFootprint += t->m->heap->fixedFootprint
(ceilingDivide(sizeInBytes, BytesPerWord), objectMask);
return o;
}
case Machine::ImmortalAllocation: {
object o = static_cast<object>
(t->m->heap->allocateImmortalFixed
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask));
memset(o, 0, sizeInBytes);
alias(o, 0) = FixedMark;
return o;
}
default: abort(t);
2007-07-06 23:50:26 +00:00
}
}
void
collect(Thread* t, Heap::CollectionType type, int pendingAllocation)
{
ENTER(t, Thread::ExclusiveState);
unsigned pending = pendingAllocation
- (t->m->heapPoolIndex * ThreadHeapSizeInWords);
if (t->m->heap->limitExceeded(pending)) {
type = Heap::MajorCollection;
}
doCollect(t, type, pendingAllocation);
if (t->m->heap->limitExceeded(pending)) {
// try once more, giving the heap a chance to squeeze everything
// into the smallest possible space:
doCollect(t, Heap::MajorCollection, pendingAllocation);
}
}
object
2014-05-29 04:17:25 +00:00
makeNewGeneral(Thread* t, GcClass* class_)
{
assertT(t, t->state == Thread::ActiveState);
2009-08-10 23:35:44 +00:00
PROTECT(t, class_);
object instance = makeNew(t, class_);
PROTECT(t, instance);
2014-05-29 04:17:25 +00:00
if (class_->vmFlags() & WeakReferenceFlag) {
ACQUIRE(t, t->m->referenceLock);
2014-06-29 05:48:17 +00:00
cast<GcJreference>(t, instance)->vmNext() = reinterpret_cast<object>(t->m->weakReferences);
2014-06-28 23:24:24 +00:00
t->m->weakReferences = cast<GcJreference>(t, instance);
}
2014-05-29 04:17:25 +00:00
if (class_->vmFlags() & HasFinalizerFlag) {
addFinalizer(t, instance, 0);
}
return instance;
}
2011-01-27 18:54:41 +00:00
void
popResources(Thread* t)
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
{
2011-01-27 18:54:41 +00:00
while (t->resource != t->checkpoint->resource) {
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
Thread::Resource* r = t->resource;
t->resource = r->next;
r->release();
}
2011-01-27 18:54:41 +00:00
t->protector = t->checkpoint->protector;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
2014-06-28 23:24:24 +00:00
GcByteArray*
makeByteArrayV(Thread* t, const char* format, va_list a, int size)
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
{
THREAD_RUNTIME_ARRAY(t, char, buffer, size);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
int r = vm::vsnprintf(RUNTIME_ARRAY_BODY(buffer), size - 1, format, a);
if (r >= 0 and r < size - 1) {
2014-06-28 23:24:24 +00:00
GcByteArray* s = makeByteArray(t, strlen(RUNTIME_ARRAY_BODY(buffer)) + 1);
memcpy(s->body().begin(), RUNTIME_ARRAY_BODY(buffer),
s->length());
return s;
} else {
return 0;
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
2014-06-28 23:24:24 +00:00
GcByteArray*
2007-07-06 23:50:26 +00:00
makeByteArray(Thread* t, const char* format, ...)
{
int size = 256;
while (true) {
va_list a;
va_start(a, format);
2014-06-28 23:24:24 +00:00
GcByteArray* s = makeByteArrayV(t, format, a, size);
va_end(a);
2007-07-06 23:50:26 +00:00
if (s) {
return s;
} else {
size *= 2;
}
}
2007-07-06 23:50:26 +00:00
}
2014-06-28 23:24:24 +00:00
GcString*
2007-07-06 23:50:26 +00:00
makeString(Thread* t, const char* format, ...)
{
int size = 256;
while (true) {
va_list a;
va_start(a, format);
2014-06-28 23:24:24 +00:00
GcByteArray* s = makeByteArrayV(t, format, a, size);
va_end(a);
2007-07-06 23:50:26 +00:00
if (s) {
2014-06-28 23:24:24 +00:00
return t->m->classpath->makeString(t, reinterpret_cast<object>(s), 0, s->length() - 1);
} else {
size *= 2;
}
}
2007-07-06 23:50:26 +00:00
}
int
2014-06-28 23:24:24 +00:00
stringUTFLength(Thread* t, GcString* string, unsigned start, unsigned length)
{
unsigned result = 0;
if (length) {
2014-06-29 05:48:17 +00:00
object data = reinterpret_cast<object>(string->data());
2014-05-29 04:17:25 +00:00
if (objectClass(t, data) == type(t, GcByteArray::Type)) {
result = length;
} else {
2014-06-29 05:48:17 +00:00
GcCharArray* a = cast<GcCharArray>(t, data);
for (unsigned i = 0; i < length; ++i) {
2014-06-29 05:48:17 +00:00
uint16_t c = a->body()[string->offset(t) + start + i];
if (c == 0) result += 1; // null char (was 2 bytes in Java)
else if (c < 0x80) result += 1; // ASCII char
else if (c < 0x800) result += 2; // two-byte char
else result += 3; // three-byte char
}
}
}
return result;
}
void
2014-06-28 23:24:24 +00:00
stringChars(Thread* t, GcString* string, unsigned start, unsigned length,
char* chars)
{
if (length) {
2014-06-29 05:48:17 +00:00
object data = reinterpret_cast<object>(string->data());
2014-05-29 04:17:25 +00:00
if (objectClass(t, data) == type(t, GcByteArray::Type)) {
2014-06-29 05:48:17 +00:00
GcByteArray* b = cast<GcByteArray>(t, data);
memcpy(chars,
2014-06-29 05:48:17 +00:00
&b->body()[string->offset(t) + start],
length);
} else {
2014-06-29 05:48:17 +00:00
GcCharArray* c = cast<GcCharArray>(t, data);
for (unsigned i = 0; i < length; ++i) {
2014-06-29 05:48:17 +00:00
chars[i] = c->body()[string->offset(t) + start + i];
}
}
}
chars[length] = 0;
}
void
2014-06-28 23:24:24 +00:00
stringChars(Thread* t, GcString* string, unsigned start, unsigned length,
uint16_t* chars)
{
if (length) {
2014-06-29 05:48:17 +00:00
object data = reinterpret_cast<object>(string->data());
2014-05-29 04:17:25 +00:00
if (objectClass(t, data) == type(t, GcByteArray::Type)) {
2014-06-29 05:48:17 +00:00
GcByteArray* b = cast<GcByteArray>(t, data);
for (unsigned i = 0; i < length; ++i) {
2014-06-29 05:48:17 +00:00
chars[i] = b->body()[string->offset(t) + start + i];
}
} else {
2014-06-29 05:48:17 +00:00
GcCharArray* c = cast<GcCharArray>(t, data);
memcpy(chars,
2014-06-29 05:48:17 +00:00
&c->body()[string->offset(t) + start],
length * sizeof(uint16_t));
}
}
chars[length] = 0;
}
void
2014-06-28 23:24:24 +00:00
stringUTFChars(Thread* t, GcString* string, unsigned start, unsigned length,
char* chars, unsigned charsLength UNUSED)
{
assertT(t, static_cast<unsigned>
(stringUTFLength(t, string, start, length)) == charsLength);
2014-06-29 05:48:17 +00:00
object data = reinterpret_cast<object>(string->data());
if (objectClass(t, data) == type(t, GcByteArray::Type)) {
GcByteArray* b = cast<GcByteArray>(t, data);
memcpy(chars,
2014-06-29 05:48:17 +00:00
&b->body()[string->offset(t) + start],
length);
2014-06-29 05:48:17 +00:00
chars[length] = 0;
} else {
2014-06-29 05:48:17 +00:00
GcCharArray* cs = cast<GcCharArray>(t, data);
int j = 0;
for (unsigned i = 0; i < length; ++i) {
2014-06-29 05:48:17 +00:00
uint16_t c = cs->body()[string->offset(t) + start + i];
if(!c) { // null char
chars[j++] = 0;
} else if (c < 0x80) { // ASCII char
chars[j++] = static_cast<char>(c);
} else if (c < 0x800) { // two-byte char
chars[j++] = static_cast<char>(0x0c0 | (c >> 6));
chars[j++] = static_cast<char>(0x080 | (c & 0x03f));
} else { // three-byte char
chars[j++] = static_cast<char>(0x0e0 | ((c >> 12) & 0x0f));
chars[j++] = static_cast<char>(0x080 | ((c >> 6) & 0x03f));
chars[j++] = static_cast<char>(0x080 | (c & 0x03f));
}
}
chars[j] = 0;
2014-06-29 05:48:17 +00:00
}
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
uint64_t
resolveBootstrap(Thread* t, uintptr_t* arguments)
{
2014-06-28 23:24:24 +00:00
GcByteArray* name = cast<GcByteArray>(t, reinterpret_cast<object>(arguments[0]));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
2014-06-30 01:44:41 +00:00
resolveSystemClass(t, roots(t)->bootLoader(), name);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
return 1;
}
2007-07-24 01:44:20 +00:00
bool
2014-05-29 04:17:25 +00:00
isAssignableFrom(Thread* t, GcClass* a, GcClass* b)
2007-07-24 01:44:20 +00:00
{
assertT(t, a);
assertT(t, b);
2007-11-05 14:28:46 +00:00
if (a == b) return true;
2014-05-29 04:17:25 +00:00
if (a->flags() & ACC_INTERFACE) {
if (b->vmFlags() & BootstrapFlag) {
uintptr_t arguments[] = { reinterpret_cast<uintptr_t>(b->name()) };
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (run(t, resolveBootstrap, arguments) == 0) {
t->exception = 0;
return false;
}
2007-11-04 23:10:33 +00:00
}
2007-08-20 02:57:32 +00:00
2014-06-29 05:48:17 +00:00
GcArray* itable = cast<GcArray>(t, b->interfaceTable());
if (itable) {
2014-05-29 04:17:25 +00:00
unsigned stride = (b->flags() & ACC_INTERFACE) ? 1 : 2;
2014-06-29 05:48:17 +00:00
for (unsigned i = 0; i < itable->length(); i += stride) {
if (itable->body()[i] == reinterpret_cast<object>(a)) {
return true;
2007-07-24 01:44:20 +00:00
}
}
}
2014-05-29 04:17:25 +00:00
} else if (a->arrayDimensions()) {
if (b->arrayDimensions()) {
2007-08-20 02:57:32 +00:00
return isAssignableFrom
2014-06-28 00:32:20 +00:00
(t, a->arrayElementClass(), b->arrayElementClass());
2007-08-20 02:57:32 +00:00
}
2014-05-29 04:17:25 +00:00
} else if ((a->vmFlags() & PrimitiveFlag)
== (b->vmFlags() & PrimitiveFlag))
{
for (; b; b = b->super()) {
2007-07-24 01:44:20 +00:00
if (b == a) {
return true;
}
}
}
return false;
}
2008-07-05 20:21:13 +00:00
bool
2014-05-29 04:17:25 +00:00
instanceOf(Thread* t, GcClass* class_, object o)
2007-07-24 01:44:20 +00:00
{
if (o == 0) {
return false;
2007-07-24 03:16:59 +00:00
} else {
return isAssignableFrom(t, class_, objectClass(t, o));
2007-07-24 01:44:20 +00:00
}
}
2014-06-28 23:24:24 +00:00
GcMethod*
2014-05-29 04:17:25 +00:00
classInitializer(Thread* t, GcClass* class_)
2007-07-28 16:10:13 +00:00
{
2014-06-29 05:48:17 +00:00
if (GcArray* mtable = cast<GcArray>(t, class_->methodTable())) {
PROTECT(t, mtable);
for (unsigned i = 0; i < mtable->length(); ++i)
2007-07-28 16:10:13 +00:00
{
2014-06-29 05:48:17 +00:00
GcMethod* o = cast<GcMethod>(t, mtable->body()[i]);
2014-06-28 23:24:24 +00:00
if (o->vmFlags() & ClassInitFlag) {
return o;
2014-06-28 23:24:24 +00:00
}
}
2007-07-28 16:10:13 +00:00
}
return 0;
2007-07-28 16:10:13 +00:00
}
unsigned
fieldCode(Thread* t, unsigned javaCode)
2007-07-06 23:50:26 +00:00
{
switch (javaCode) {
case 'B':
return ByteField;
case 'C':
return CharField;
case 'D':
return DoubleField;
case 'F':
return FloatField;
case 'I':
return IntField;
case 'J':
return LongField;
case 'S':
return ShortField;
case 'V':
return VoidField;
case 'Z':
return BooleanField;
case 'L':
case '[':
return ObjectField;
2007-07-06 23:50:26 +00:00
default: abort(t);
}
}
2007-07-06 23:50:26 +00:00
unsigned
fieldType(Thread* t, unsigned code)
{
switch (code) {
case VoidField:
return VOID_TYPE;
case ByteField:
case BooleanField:
return INT8_TYPE;
case CharField:
case ShortField:
return INT16_TYPE;
case DoubleField:
return DOUBLE_TYPE;
case FloatField:
return FLOAT_TYPE;
case IntField:
return INT32_TYPE;
case LongField:
return INT64_TYPE;
case ObjectField:
return POINTER_TYPE;
default: abort(t);
}
2007-07-06 23:50:26 +00:00
}
unsigned
primitiveSize(Thread* t, unsigned code)
2007-07-11 04:19:26 +00:00
{
switch (code) {
case VoidField:
return 0;
case ByteField:
case BooleanField:
return 1;
case CharField:
case ShortField:
return 2;
case FloatField:
case IntField:
return 4;
case DoubleField:
case LongField:
return 8;
2007-07-11 04:19:26 +00:00
default: abort(t);
}
}
2007-07-11 04:19:26 +00:00
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 21:11:31 +00:00
parseClass(Thread* t, GcClassLoader* loader, const uint8_t* data, unsigned size,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
2007-07-30 23:19:05 +00:00
{
PROTECT(t, loader);
class Client: public Stream::Client {
2007-07-30 23:19:05 +00:00
public:
Client(Thread* t): t(t) { }
virtual void NO_RETURN handleError() {
abort(t);
2007-07-30 23:19:05 +00:00
}
private:
Thread* t;
} client(t);
Stream s(&client, data, size);
uint32_t magic = s.read4();
expect(t, magic == 0xCAFEBABE);
2012-05-22 19:53:32 +00:00
unsigned minorVer = s.read2(); // minor version
unsigned majorVer = s.read2(); // major version
if(DebugClassReader) {
fprintf(stderr, "read class (minor %d major %d)\n", minorVer, majorVer);
}
2007-07-30 23:19:05 +00:00
2014-05-29 04:17:25 +00:00
GcSingleton* pool = parsePool(t, s);
2007-07-30 23:19:05 +00:00
PROTECT(t, pool);
unsigned flags = s.read2();
unsigned name = s.read2();
2014-05-29 04:17:25 +00:00
GcClass* class_ = (GcClass*)makeClass(t,
2007-07-30 23:19:05 +00:00
flags,
0, // VM flags
0, // fixed size
0, // array size
0, // array dimensions
2014-06-28 00:32:20 +00:00
0, // array element class
0, // runtime data index
2007-07-30 23:19:05 +00:00
0, // object mask
2014-06-29 05:48:17 +00:00
cast<GcReference>(t, singletonObject(t, pool, name - 1))->name(),
0, // source file
2007-07-30 23:19:05 +00:00
0, // super
0, // interfaces
0, // vtable
0, // fields
0, // methods
0, // addendum
2007-07-30 23:19:05 +00:00
0, // static table
2014-06-28 21:11:31 +00:00
loader,
0, // source
0);// vtable length
2007-07-30 23:19:05 +00:00
PROTECT(t, class_);
unsigned super = s.read2();
if (super) {
2014-05-29 04:17:25 +00:00
GcClass* sc = resolveClass
2014-06-29 05:48:17 +00:00
(t, loader, cast<GcReference>(t, singletonObject(t, pool, super - 1))->name(),
true, throwType);
2007-07-30 23:19:05 +00:00
2014-06-26 01:42:16 +00:00
class_->setSuper(t, sc);
2007-07-30 23:19:05 +00:00
2014-05-29 04:17:25 +00:00
class_->vmFlags()
|= (sc->vmFlags()
& (ReferenceFlag | WeakReferenceFlag | HasFinalizerFlag
| NeedInitFlag));
2007-07-30 23:19:05 +00:00
}
2012-05-22 19:53:32 +00:00
if(DebugClassReader) {
fprintf(stderr, " flags %d name %d super %d\n", flags, name, super);
}
2007-07-30 23:19:05 +00:00
parseInterfaceTable(t, s, class_, pool, throwType);
2007-07-30 23:19:05 +00:00
parseFieldTable(t, s, class_, pool);
parseMethodTable(t, s, class_, pool);
parseAttributeTable(t, s, class_, pool);
2014-06-29 05:48:17 +00:00
GcArray* vtable = cast<GcArray>(t, class_->virtualTable());
unsigned vtableLength = (vtable ? vtable->length() : 0);
2014-05-29 04:17:25 +00:00
GcClass* real = t->m->processor->makeClass
(t,
2014-05-29 04:17:25 +00:00
class_->flags(),
class_->vmFlags(),
class_->fixedSize(),
class_->arrayElementSize(),
class_->arrayDimensions(),
2014-06-28 00:32:20 +00:00
class_->arrayElementClass(),
class_->objectMask(),
class_->name(),
class_->sourceFile(),
class_->super(),
class_->interfaceTable(),
class_->virtualTable(),
class_->fieldTable(),
class_->methodTable(),
2014-06-28 00:32:20 +00:00
class_->addendum(),
2014-06-28 18:28:44 +00:00
class_->staticTable(),
class_->loader(),
vtableLength);
PROTECT(t, real);
2007-12-11 21:26:59 +00:00
t->m->processor->initVtable(t, real);
updateClassTables(t, real, class_);
2014-06-30 01:44:41 +00:00
if (roots(t)->poolMap()) {
object bootstrapClass = hashMapFind
2014-06-30 01:44:41 +00:00
(t, roots(t)->bootstrapClassMap(), reinterpret_cast<object>(class_->name()),
byteArrayHash, byteArrayEqual);
2014-05-29 04:17:25 +00:00
hashMapInsert(
t,
2014-06-30 01:44:41 +00:00
roots(t)->poolMap(),
2014-05-29 04:17:25 +00:00
bootstrapClass ? bootstrapClass : reinterpret_cast<object>(real),
reinterpret_cast<object>(pool),
objectHash);
}
return real;
2007-07-30 23:19:05 +00:00
}
uint64_t
runParseClass(Thread* t, uintptr_t* arguments)
{
2014-06-28 21:11:31 +00:00
GcClassLoader* loader = cast<GcClassLoader>(t, reinterpret_cast<object>(arguments[0]));
System::Region* region = reinterpret_cast<System::Region*>(arguments[1]);
2014-05-29 04:17:25 +00:00
Gc::Type throwType = static_cast<Gc::Type>(arguments[2]);
return reinterpret_cast<uintptr_t>
(parseClass(t, loader, region->start(), region->length(), throwType));
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 23:24:24 +00:00
resolveSystemClass(Thread* t, GcClassLoader* loader, GcByteArray* spec, bool throw_,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
{
PROTECT(t, loader);
PROTECT(t, spec);
ACQUIRE(t, t->m->classLock);
2014-05-29 04:17:25 +00:00
GcClass* class_ = cast<GcClass>(t, hashMapFind
2014-06-28 23:24:24 +00:00
(t, cast<GcHashMap>(t, loader->map()), reinterpret_cast<object>(spec), byteArrayHash, byteArrayEqual));
2008-12-02 02:38:00 +00:00
if (class_ == 0) {
PROTECT(t, class_);
2014-06-28 21:11:31 +00:00
if (loader->parent()) {
class_ = resolveSystemClass
2014-06-28 21:11:31 +00:00
(t, loader->parent(), spec, false);
if (class_) {
return class_;
}
}
2014-06-28 23:24:24 +00:00
if (spec->body()[0] == '[') {
class_ = resolveArrayClass(t, loader, spec, throw_, throwType);
} else {
2014-06-29 05:48:17 +00:00
GcSystemClassLoader* sysLoader = loader->as<GcSystemClassLoader>(t);
PROTECT(t, sysLoader);
2014-06-28 23:24:24 +00:00
THREAD_RUNTIME_ARRAY(t, char, file, spec->length() + 6);
memcpy(RUNTIME_ARRAY_BODY(file),
2014-06-28 23:24:24 +00:00
spec->body().begin(),
spec->length() - 1);
memcpy(RUNTIME_ARRAY_BODY(file) + spec->length() - 1,
".class",
7);
System::Region* region = static_cast<Finder*>
2014-06-29 05:48:17 +00:00
(sysLoader->finder())->find
(RUNTIME_ARRAY_BODY(file));
2007-09-17 00:13:36 +00:00
if (region) {
if (Verbose) {
2014-06-28 23:24:24 +00:00
fprintf(stderr, "parsing %s\n", spec->body().begin());
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
{ THREAD_RESOURCE(t, System::Region*, region, region->dispose());
uintptr_t arguments[] = { reinterpret_cast<uintptr_t>(loader),
reinterpret_cast<uintptr_t>(region),
static_cast<uintptr_t>(throwType) };
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
// parse class file
2014-05-29 04:17:25 +00:00
class_ = cast<GcClass>
(t, reinterpret_cast<object>(runRaw(t, runParseClass, arguments)));
if (UNLIKELY(t->exception)) {
if (throw_) {
2014-06-28 23:24:24 +00:00
GcThrowable* e = t->exception;
t->exception = 0;
vm::throw_(t, e);
} else {
t->exception = 0;
return 0;
}
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
}
if (Verbose) {
fprintf(stderr, "done parsing %s: %p\n",
2014-06-28 23:24:24 +00:00
spec->body().begin(),
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
class_);
}
{ const char* source = static_cast<Finder*>
2014-06-29 05:48:17 +00:00
(sysLoader->finder())->sourceUrl
(RUNTIME_ARRAY_BODY(file));
if (source) {
unsigned length = strlen(source);
2014-06-29 05:48:17 +00:00
GcByteArray* array = makeByteArray(t, length + 1);
memcpy(array->body().begin(), source, length);
array = internByteArray(t, array);
2014-06-26 01:42:16 +00:00
class_->setSource(t, array);
}
}
2014-05-29 04:17:25 +00:00
GcClass* bootstrapClass = cast<GcClass>(t, hashMapFind
2014-06-30 01:44:41 +00:00
(t, roots(t)->bootstrapClassMap(), reinterpret_cast<object>(spec), byteArrayHash,
2014-05-29 04:17:25 +00:00
byteArrayEqual));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (bootstrapClass) {
PROTECT(t, bootstrapClass);
updateBootstrapClass(t, bootstrapClass, class_);
class_ = bootstrapClass;
}
}
}
if (class_) {
2014-06-28 23:24:24 +00:00
hashMapInsert(t, cast<GcHashMap>(t, loader->map()), reinterpret_cast<object>(spec), reinterpret_cast<object>(class_), byteArrayHash);
updatePackageMap(t, class_);
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
} else if (throw_) {
2014-06-28 23:24:24 +00:00
throwNew(t, throwType, "%s", spec->body().begin());
}
2007-07-11 04:19:26 +00:00
}
return class_;
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 23:24:24 +00:00
findLoadedClass(Thread* t, GcClassLoader* loader, GcByteArray* spec)
{
PROTECT(t, loader);
PROTECT(t, spec);
ACQUIRE(t, t->m->classLock);
2014-06-28 21:11:31 +00:00
return loader->map() ? cast<GcClass>(t, hashMapFind
2014-06-28 23:24:24 +00:00
(t, cast<GcHashMap>(t, loader->map()), reinterpret_cast<object>(spec), byteArrayHash, byteArrayEqual)) : 0;
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 23:24:24 +00:00
resolveClass(Thread* t, GcClassLoader* loader, GcByteArray* spec, bool throw_,
2014-05-29 04:17:25 +00:00
Gc::Type throwType)
{
2014-05-29 04:17:25 +00:00
if (objectClass(t, loader) == type(t, GcSystemClassLoader::Type)) {
return resolveSystemClass(t, loader, spec, throw_, throwType);
} else {
2010-11-27 21:44:49 +00:00
PROTECT(t, loader);
PROTECT(t, spec);
2014-05-29 04:17:25 +00:00
GcClass* c = findLoadedClass(t, loader, spec);
if (c) {
return c;
}
2014-06-28 23:24:24 +00:00
if (spec->body()[0] == '[') {
c = resolveArrayClass(t, loader, spec, throw_, throwType);
} else {
2014-06-30 01:44:41 +00:00
if (roots(t)->loadClassMethod() == 0) {
2014-05-29 04:17:25 +00:00
GcMethod* m = resolveMethod
2014-06-30 01:44:41 +00:00
(t, roots(t)->bootLoader(), "java/lang/ClassLoader",
"loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
if (m) {
2014-06-26 01:42:16 +00:00
roots(t)->setLoadClassMethod(t, m);
2014-05-29 04:17:25 +00:00
GcClass* classLoaderClass = type(t, GcClassLoader::Type);
2014-05-29 04:17:25 +00:00
if (classLoaderClass->vmFlags() & BootstrapFlag) {
resolveSystemClass
2014-06-30 01:44:41 +00:00
(t, roots(t)->bootLoader(),
2014-06-28 23:24:24 +00:00
classLoaderClass->name());
}
}
}
2014-05-29 04:17:25 +00:00
GcMethod* method = findVirtualMethod
2014-06-30 01:44:41 +00:00
(t, roots(t)->loadClassMethod(), objectClass(t, loader));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
PROTECT(t, method);
2014-06-28 23:24:24 +00:00
THREAD_RUNTIME_ARRAY(t, char, s, spec->length());
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
replace('/', '.', RUNTIME_ARRAY_BODY(s), reinterpret_cast<char*>
2014-06-28 23:24:24 +00:00
(spec->body().begin()));
2014-06-28 23:24:24 +00:00
GcString* specString = makeString(t, "%s", RUNTIME_ARRAY_BODY(s));
PROTECT(t, specString);
uintptr_t arguments[] = { reinterpret_cast<uintptr_t>(method),
reinterpret_cast<uintptr_t>(loader),
reinterpret_cast<uintptr_t>(specString) };
2014-06-29 05:48:17 +00:00
GcJclass* jc = cast<GcJclass>(t, reinterpret_cast<object>
(runRaw(t, invokeLoadClass, arguments)));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (LIKELY(jc)) {
2014-06-29 05:48:17 +00:00
c = jc->vmClass();
} else if (t->exception) {
if (throw_) {
2014-06-28 23:24:24 +00:00
GcThrowable* e = type(t, throwType) == objectClass(t, t->exception)
? t->exception
2014-06-25 21:16:33 +00:00
: makeThrowable(t, throwType, specString, 0, t->exception);
t->exception = 0;
vm::throw_(t, e);
} else {
t->exception = 0;
}
}
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (LIKELY(c)) {
PROTECT(t, c);
saveLoadedClass(t, loader, c);
} else if (throw_) {
2014-06-28 23:24:24 +00:00
throwNew(t, throwType, "%s", spec->body().begin());
}
return c;
}
}
2014-05-29 04:17:25 +00:00
GcMethod*
resolveMethod(Thread* t, GcClass* class_, const char* methodName,
const char* methodSpec)
{
PROTECT(t, class_);
2014-06-28 23:24:24 +00:00
GcByteArray* name = makeByteArray(t, methodName);
PROTECT(t, name);
2014-06-28 23:24:24 +00:00
GcByteArray* spec = makeByteArray(t, methodSpec);
2014-05-29 04:17:25 +00:00
GcMethod* method = cast<GcMethod>(t, findMethodInClass(t, class_, name, spec));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (method == 0) {
2014-05-29 04:17:25 +00:00
throwNew(t, GcNoSuchMethodError::Type, "%s %s not found in %s",
methodName, methodSpec, class_->name()->body().begin());
} else {
return method;
}
}
2014-06-28 23:24:24 +00:00
GcField*
2014-05-29 04:17:25 +00:00
resolveField(Thread* t, GcClass* class_, const char* fieldName,
const char* fieldSpec)
{
PROTECT(t, class_);
2014-06-28 23:24:24 +00:00
GcByteArray* name = makeByteArray(t, fieldName);
PROTECT(t, name);
2014-06-28 23:24:24 +00:00
GcByteArray* spec = makeByteArray(t, fieldSpec);
PROTECT(t, spec);
2014-06-29 05:48:17 +00:00
GcField* field = cast<GcField>(t, findInInterfaces(t, class_, name, spec, findFieldInClass));
2014-05-29 04:17:25 +00:00
GcClass* c = class_;
PROTECT(t, c);
for (; c != 0 and field == 0; c = c->super()) {
2014-06-29 05:48:17 +00:00
field = cast<GcField>(t, findFieldInClass(t, c, name, spec));
}
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
if (field == 0) {
2014-05-29 04:17:25 +00:00
throwNew(t, GcNoSuchFieldError::Type, "%s %s not found in %s",
fieldName, fieldSpec, class_->name()->body().begin());
} else {
2014-06-29 05:48:17 +00:00
return field;
}
}
bool
2014-05-29 04:17:25 +00:00
classNeedsInit(Thread* t, GcClass* c)
{
2014-05-29 04:17:25 +00:00
if (c->vmFlags() & NeedInitFlag) {
if (c->vmFlags() & InitFlag) {
// the class is currently being initialized. If this the thread
// which is initializing it, we should not try to initialize it
// recursively. Otherwise, we must wait for the responsible
// thread to finish.
for (Thread::ClassInitStack* s = t->classInitStack; s; s = s->next) {
if (s->class_ == c) {
return false;
}
}
}
return true;
} else {
return false;
}
}
bool
2014-05-29 04:17:25 +00:00
preInitClass(Thread* t, GcClass* c)
{
2014-05-29 04:17:25 +00:00
int flags = c->vmFlags();
loadMemoryBarrier();
if (flags & NeedInitFlag) {
PROTECT(t, c);
ACQUIRE(t, t->m->classLock);
2014-05-29 04:17:25 +00:00
if (c->vmFlags() & NeedInitFlag) {
if (c->vmFlags() & InitFlag) {
// If the class is currently being initialized and this the thread
// which is initializing it, we should not try to initialize it
// recursively.
if (isInitializing(t, c)) {
return false;
}
// some other thread is on the job - wait for it to finish.
2014-05-29 04:17:25 +00:00
while (c->vmFlags() & InitFlag) {
ENTER(t, Thread::IdleState);
t->m->classLock->wait(t->systemThread, 0);
}
2014-05-29 04:17:25 +00:00
} else if (c->vmFlags() & InitErrorFlag) {
throwNew(t, GcNoClassDefFoundError::Type, "%s",
c->name()->body().begin());
} else {
2014-05-29 04:17:25 +00:00
c->vmFlags() |= InitFlag;
return true;
}
}
}
return false;
}
void
2014-06-29 05:48:17 +00:00
postInitClass(Thread* t, GcClass* c)
{
PROTECT(t, c);
ACQUIRE(t, t->m->classLock);
if (t->exception
2014-06-28 23:24:24 +00:00
and instanceOf(t, type(t, GcException::Type), reinterpret_cast<object>(t->exception))) {
2014-06-29 05:48:17 +00:00
c->vmFlags() |= NeedInitFlag | InitErrorFlag;
c->vmFlags() &= ~InitFlag;
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
2014-06-28 23:24:24 +00:00
GcThrowable* exception = t->exception;
t->exception = 0;
2014-06-26 01:42:16 +00:00
GcExceptionInInitializerError* initExecption
= makeThrowable(t, GcExceptionInInitializerError::Type, 0, 0, exception)
->as<GcExceptionInInitializerError>(t);
2014-06-29 05:48:17 +00:00
2014-06-26 01:42:16 +00:00
initExecption->setException(t, exception->cause());
2014-06-26 01:42:16 +00:00
throw_(t, initExecption->as<GcThrowable>(t));
} else {
2014-06-29 05:48:17 +00:00
c->vmFlags() &= ~(NeedInitFlag | InitFlag);
}
t->m->classLock->notifyAll(t->systemThread);
}
void
2014-05-29 04:17:25 +00:00
initClass(Thread* t, GcClass* c)
{
PROTECT(t, c);
2014-06-29 05:48:17 +00:00
GcClass* super = c->super();
if (super) {
2014-06-29 05:48:17 +00:00
initClass(t, super);
}
if (preInitClass(t, c)) {
2014-06-29 05:48:17 +00:00
OBJECT_RESOURCE(t, c, postInitClass(t, cast<GcClass>(t, c)));
rework VM exception handling; throw OOMEs when appropriate This rather large commit modifies the VM to use non-local returns to throw exceptions instead of simply setting Thread::exception and returning frame-by-frame as it used to. This has several benefits: * Functions no longer need to check Thread::exception after each call which might throw an exception (which would be especially tedious and error-prone now that any function which allocates objects directly or indirectly might throw an OutOfMemoryError) * There's no need to audit the code for calls to functions which previously did not throw exceptions but later do * Performance should be improved slightly due to both the reduced need for conditionals and because undwinding now occurs in a single jump instead of a series of returns The main disadvantages are: * Slightly higher overhead for entering and leaving the VM via the JNI and JDK methods * Non-local returns can make the code harder to read * We must be careful to register destructors for stack-allocated resources with the Thread so they can be called prior to a non-local return The non-local return implementation is similar to setjmp/longjmp, except it uses continuation-passing style to avoid the need for cooperation from the C/C++ compiler. Native C++ exceptions would have also been an option, but that would introduce a dependence on libstdc++, which we're trying to avoid for portability reasons. Finally, this commit ensures that the VM throws an OutOfMemoryError instead of aborting when it reaches its memory ceiling. Currently, we treat the ceiling as a soft limit and temporarily exceed it as necessary to allow garbage collection and certain internal allocations to succeed, but refuse to allocate any Java objects until the heap size drops back below the ceiling.
2010-12-27 22:55:23 +00:00
2014-06-28 23:24:24 +00:00
GcMethod* initializer = classInitializer(t, c);
if (initializer) {
Thread::ClassInitStack stack(t, c);
t->m->processor->invoke(t, initializer, 0);
}
}
}
2014-05-29 04:17:25 +00:00
GcClass*
2014-06-28 23:24:24 +00:00
resolveObjectArrayClass(Thread* t, GcClassLoader* loader, GcClass* elementClass)
{
PROTECT(t, loader);
PROTECT(t, elementClass);
2014-06-28 23:24:24 +00:00
{ GcClass* arrayClass = cast<GcClass>(t, getClassRuntimeData(t, elementClass)->arrayClass());
if (arrayClass) {
return arrayClass;
}
}
2014-06-28 23:24:24 +00:00
GcByteArray* elementSpec = elementClass->name();
PROTECT(t, elementSpec);
2014-06-28 23:24:24 +00:00
GcByteArray* spec;
if (elementSpec->body()[0] == '[') {
spec = makeByteArray(t, elementSpec->length() + 1);
spec->body()[0] = '[';
memcpy(&spec->body()[1],
elementSpec->body().begin(),
elementSpec->length());
} else {
2014-06-28 23:24:24 +00:00
spec = makeByteArray(t, elementSpec->length() + 3);
spec->body()[0] = '[';
spec->body()[1] = 'L';
memcpy(&spec->body()[2],
elementSpec->body().begin(),
elementSpec->length() - 1);
spec->body()[elementSpec->length() + 1] = ';';
spec->body()[elementSpec->length() + 2] = 0;
}
2014-05-29 04:17:25 +00:00
GcClass* arrayClass = resolveClass(t, loader, spec);
2014-06-26 01:42:16 +00:00
getClassRuntimeData(t, elementClass)->setArrayClass(t, reinterpret_cast<object>(arrayClass));
return arrayClass;
}
object
2014-05-29 04:17:25 +00:00
makeObjectArray(Thread* t, GcClass* elementClass, unsigned count)
{
2014-05-29 04:17:25 +00:00
GcClass* arrayClass = resolveObjectArrayClass
2014-06-28 23:24:24 +00:00
(t, elementClass->loader(), elementClass);
PROTECT(t, arrayClass);
2014-05-29 04:17:25 +00:00
object array = reinterpret_cast<object>(makeArray(t, count));
setObjectClass(t, array, arrayClass);
return array;
}
2014-06-26 01:42:16 +00:00
static GcByteArray* getFieldName(Thread* t, object obj) {
2014-06-26 02:17:27 +00:00
return reinterpret_cast<GcByteArray*>(cast<GcField>(t, obj)->name());
2014-06-29 05:48:17 +00:00
}
2014-06-26 01:42:16 +00:00
static GcByteArray* getFieldSpec(Thread* t, object obj) {
2014-06-26 02:17:27 +00:00
return reinterpret_cast<GcByteArray*>(cast<GcField>(t, obj)->spec());
2014-06-29 05:48:17 +00:00
}
2014-06-26 01:42:16 +00:00
static GcByteArray* getMethodName(Thread* t, object obj) {
2014-06-26 02:17:27 +00:00
return reinterpret_cast<GcByteArray*>(cast<GcMethod>(t, obj)->name());
2014-06-29 05:48:17 +00:00
}
2014-06-26 01:42:16 +00:00
static GcByteArray* getMethodSpec(Thread* t, object obj) {
2014-06-26 02:17:27 +00:00
return reinterpret_cast<GcByteArray*>(cast<GcMethod>(t, obj)->spec());
2014-06-29 05:48:17 +00:00
}
object
2014-06-28 23:24:24 +00:00
findFieldInClass(Thread* t, GcClass* class_, GcByteArray* name, GcByteArray* spec)
{
return findInTable
2014-06-29 05:48:17 +00:00
(t, cast<GcArray>(t, class_->fieldTable()), name, spec, getFieldName, getFieldSpec);
}
object
2014-06-28 23:24:24 +00:00
findMethodInClass(Thread* t, GcClass* class_, GcByteArray* name, GcByteArray* spec)
{
return findInTable
2014-06-29 05:48:17 +00:00
(t, cast<GcArray>(t, class_->methodTable()), name, spec, getMethodName, getMethodSpec);
}
object
2014-06-28 23:24:24 +00:00
findInHierarchyOrNull(Thread* t, GcClass* class_, GcByteArray* name, GcByteArray* spec,
object (*find)(Thread*, GcClass*, GcByteArray*, GcByteArray*))
{
2014-05-29 04:17:25 +00:00
GcClass* originalClass = class_;
object o = 0;
2014-05-29 04:17:25 +00:00
if ((class_->flags() & ACC_INTERFACE)
and class_->virtualTable())
{
o = findInTable
2014-06-29 05:48:17 +00:00
(t, cast<GcArray>(t, class_->virtualTable()), name, spec, getMethodName, getMethodSpec);
}
if (o == 0) {
for (; o == 0 and class_; class_ = class_->super()) {
o = find(t, class_, name, spec);
}
if (o == 0 and find == findFieldInClass) {
o = findInInterfaces(t, originalClass, name, spec, find);
}
}
return o;
}
unsigned
parameterFootprint(Thread* t, const char* s, bool static_)
{
unsigned footprint = 0;
for (MethodSpecIterator it(t, s); it.hasNext();) {
switch (*it.next()) {
case 'J':
case 'D':
footprint += 2;
break;
default:
++ footprint;
break;
}
}
if (not static_) {
++ footprint;
}
return footprint;
}
void
addFinalizer(Thread* t, object target, void (*finalize)(Thread*, object))
{
PROTECT(t, target);
ACQUIRE(t, t->m->referenceLock);
void* function;
memcpy(&function, &finalize, BytesPerWord);
2014-05-29 04:17:25 +00:00
GcFinalizer* f = makeFinalizer(t, 0, function, 0, 0, 0);
f->target() = target;
f->next() = reinterpret_cast<object>(t->m->finalizers);
t->m->finalizers = f;
2007-07-11 04:19:26 +00:00
}
2014-06-28 23:24:24 +00:00
GcMonitor*
2007-11-27 22:23:00 +00:00
objectMonitor(Thread* t, object o, bool createNew)
2007-07-06 23:50:26 +00:00
{
assertT(t, t->state == Thread::ActiveState);
object m = hashMapFind
2014-06-30 01:44:41 +00:00
(t, roots(t)->monitorMap(), o, objectHash, objectEqual);
2007-07-06 23:50:26 +00:00
if (m) {
if (DebugMonitors) {
fprintf(stderr, "found monitor %p for object %x\n", m, objectHash(t, o));
}
2014-06-28 23:24:24 +00:00
return cast<GcMonitor>(t, m);
2007-11-27 22:23:00 +00:00
} else if (createNew) {
2007-07-06 23:50:26 +00:00
PROTECT(t, o);
PROTECT(t, m);
{ ENTER(t, Thread::ExclusiveState);
2007-07-06 23:50:26 +00:00
m = hashMapFind
2014-06-30 01:44:41 +00:00
(t, roots(t)->monitorMap(), o, objectHash, objectEqual);
if (m) {
if (DebugMonitors) {
fprintf(stderr, "found monitor %p for object %x\n",
m, objectHash(t, o));
}
2014-06-28 23:24:24 +00:00
return cast<GcMonitor>(t, m);
}
2014-05-29 04:17:25 +00:00
object head = reinterpret_cast<object>(makeMonitorNode(t, 0, 0));
m = reinterpret_cast<object>(makeMonitor(t, 0, 0, 0, head, head, 0));
2007-07-06 23:50:26 +00:00
if (DebugMonitors) {
fprintf(stderr, "made monitor %p for object %x\n", m,
objectHash(t, o));
}
2014-06-30 01:44:41 +00:00
hashMapInsert(t, roots(t)->monitorMap(), o, m, objectHash);
2007-07-06 23:50:26 +00:00
addFinalizer(t, o, removeMonitor);
}
2007-07-06 23:50:26 +00:00
2014-06-28 23:24:24 +00:00
return cast<GcMonitor>(t, m);
2007-11-27 22:23:00 +00:00
} else {
return 0;
2007-07-06 23:50:26 +00:00
}
}
2007-07-29 00:02:32 +00:00
object
intern(Thread* t, object s)
{
PROTECT(t, s);
ACQUIRE(t, t->m->referenceLock);
2007-07-29 00:02:32 +00:00
2014-06-28 20:41:27 +00:00
GcTriple* n = hashMapFindNode
2014-06-30 01:44:41 +00:00
(t, roots(t)->stringMap(), s, stringHash, stringEqual);
2007-07-29 00:02:32 +00:00
if (n) {
2014-06-29 05:48:17 +00:00
return reinterpret_cast<object>(cast<GcJreference>(t, n->first())->target());
2007-07-29 00:02:32 +00:00
} else {
2014-06-30 01:44:41 +00:00
hashMapInsert(t, roots(t)->stringMap(), s, 0, stringHash);
2007-07-29 00:02:32 +00:00
addFinalizer(t, s, removeString);
return s;
}
}
void
walk(Thread* t, Heap::Walker* w, object o, unsigned start)
{
2014-06-29 05:48:17 +00:00
GcClass* class_ = t->m->heap->follow(objectClass(t, o));
GcIntArray* objectMask = t->m->heap->follow(class_->objectMask());
2009-05-17 23:43:48 +00:00
bool more = true;
if (objectMask) {
2014-05-29 04:17:25 +00:00
unsigned fixedSize = class_->fixedSize();
unsigned arrayElementSize = class_->arrayElementSize();
unsigned arrayLength
= (arrayElementSize ?
2013-02-11 00:38:51 +00:00
fieldAtOffset<uintptr_t>(o, fixedSize - BytesPerWord) : 0);
2014-06-29 05:48:17 +00:00
THREAD_RUNTIME_ARRAY(t, uint32_t, mask, objectMask->length());
memcpy(RUNTIME_ARRAY_BODY(mask), objectMask->body().begin(),
objectMask->length() * 4);
more = ::walk(t, w, RUNTIME_ARRAY_BODY(mask), fixedSize, arrayElementSize,
arrayLength, start);
2014-05-29 04:17:25 +00:00
} else if (class_->vmFlags() & SingletonFlag) {
2014-06-29 05:48:17 +00:00
GcSingleton* s = cast<GcSingleton>(t, o);
unsigned length = s->length();
if (length) {
2014-06-29 05:48:17 +00:00
more = ::walk(t, w, singletonMask(t, s),
(singletonCount(t, s) + 2) * BytesPerWord, 0, 0, start);
} else if (start == 0) {
2009-05-17 23:43:48 +00:00
more = w->visit(0);
}
} else if (start == 0) {
2009-05-17 23:43:48 +00:00
more = w->visit(0);
}
2009-05-03 20:57:11 +00:00
2014-05-29 04:17:25 +00:00
if (more and class_->vmFlags() & ContinuationFlag) {
2009-05-03 20:57:11 +00:00
t->m->processor->walkContinuationBody(t, w, o, start);
}
}
int
walkNext(Thread* t, object o, int previous)
{
class Walker: public Heap::Walker {
public:
Walker(): value(-1) { }
bool visit(unsigned offset) {
value = offset;
return false;
}
int value;
} walker;
walk(t, &walker, o, previous + 1);
return walker.value;
}
void
visitRoots(Machine* m, Heap::Visitor* v)
{
v->visit(&(m->types));
v->visit(&(m->roots));
for (Thread* t = m->rootThread; t; t = t->peer) {
::visitRoots(t, v);
}
2009-05-03 20:57:11 +00:00
for (Reference* r = m->jniReferences; r; r = r->next) {
if (not r->weak) {
v->visit(&(r->target));
}
2009-05-03 20:57:11 +00:00
}
}
2013-02-05 06:41:37 +00:00
void
logTrace(FILE* f, const char* fmt, ...)
{
va_list a;
va_start(a, fmt);
#ifdef PLATFORM_WINDOWS
const unsigned length = _vscprintf(fmt, a);
#else
const unsigned length = vsnprintf(0, 0, fmt, a);
#endif
va_end(a);
2013-02-05 06:41:37 +00:00
RUNTIME_ARRAY(char, buffer, length + 1);
va_start(a, fmt);
vsnprintf(RUNTIME_ARRAY_BODY(buffer), length + 1, fmt, a);
2013-02-05 06:41:37 +00:00
va_end(a);
RUNTIME_ARRAY_BODY(buffer)[length] = 0;
2013-02-05 06:41:37 +00:00
::fprintf(f, "%s", RUNTIME_ARRAY_BODY(buffer));
2013-02-05 06:41:37 +00:00
#ifdef PLATFORM_WINDOWS
::OutputDebugStringA(RUNTIME_ARRAY_BODY(buffer));
2013-02-05 06:41:37 +00:00
#endif
}
2007-07-24 03:16:59 +00:00
void
2014-06-28 23:24:24 +00:00
printTrace(Thread* t, GcThrowable* exception)
2007-07-24 03:16:59 +00:00
{
if (exception == 0) {
2014-05-29 04:17:25 +00:00
exception = makeThrowable(t, GcNullPointerException::Type);
}
2007-10-13 00:22:52 +00:00
2014-06-28 23:24:24 +00:00
for (GcThrowable* e = exception; e; e = e->cause()) {
2007-07-24 03:16:59 +00:00
if (e != exception) {
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "caused by: ");
2007-07-24 03:16:59 +00:00
}
logTrace(errorLog(t), "%s", objectClass(t, e)->name()->body().begin());
2014-06-28 23:24:24 +00:00
if (e->message()) {
GcString* m = e->message();
THREAD_RUNTIME_ARRAY(t, char, message, m->length(t) + 1);
stringChars(t, m, RUNTIME_ARRAY_BODY(message));
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), ": %s\n", RUNTIME_ARRAY_BODY(message));
2007-07-24 03:16:59 +00:00
} else {
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "\n");
2007-07-24 03:16:59 +00:00
}
2014-06-29 05:48:17 +00:00
object trace = reinterpret_cast<object>(e->trace());
if (trace) {
for (unsigned i = 0; i < objectArrayLength(t, trace); ++i) {
2014-06-29 05:48:17 +00:00
GcTraceElement* e = cast<GcTraceElement>(t, objectArrayBody(t, trace, i));
GcMethod* m = cast<GcMethod>(t, e->method());
const int8_t* class_ = m->class_()->name()->body().begin();
const int8_t* method = m->name()->body().begin();
int line = t->m->processor->lineNumber
2014-06-29 05:48:17 +00:00
(t, m, e->ip());
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), " at %s.%s ", class_, method);
switch (line) {
case NativeLine:
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "(native)\n");
break;
case UnknownLine:
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "(unknown line)\n");
break;
default:
2013-02-05 06:41:37 +00:00
logTrace(errorLog(t), "(line %d)\n", line);
}
2007-07-24 03:16:59 +00:00
}
}
2014-06-28 23:24:24 +00:00
if (e == e->cause()) {
break;
}
2007-07-24 03:16:59 +00:00
}
2013-02-05 06:41:37 +00:00
::fflush(errorLog(t));
2007-07-24 03:16:59 +00:00
}
object
makeTrace(Thread* t, Processor::StackWalker* walker)
{
class Visitor: public Processor::StackVisitor {
public:
Visitor(Thread* t): t(t), trace(0), index(0), protector(t, &trace) { }
2007-09-24 13:46:48 +00:00
virtual bool visit(Processor::StackWalker* walker) {
if (trace == 0) {
trace = makeObjectArray(t, walker->count());
assertT(t, trace);
}
2014-06-29 05:48:17 +00:00
GcTraceElement* e = makeTraceElement(t, reinterpret_cast<object>(walker->method()), walker->ip());
assertT(t, index < objectArrayLength(t, reinterpret_cast<object>(trace)));
2014-06-26 01:42:16 +00:00
reinterpret_cast<GcArray*>(trace)->setBodyElement(t, index, reinterpret_cast<object>(e));
++ index;
return true;
}
Thread* t;
object trace;
unsigned index;
Thread::SingleProtector protector;
} v(t);
walker->walk(&v);
return v.trace ? v.trace : makeObjectArray(t, 0);
}
object
2008-04-09 19:08:13 +00:00
makeTrace(Thread* t, Thread* target)
{
class Visitor: public Processor::StackVisitor {
public:
Visitor(Thread* t): t(t), trace(0) { }
virtual bool visit(Processor::StackWalker* walker) {
trace = vm::makeTrace(t, walker);
return false;
}
Thread* t;
object trace;
} v(t);
2008-04-09 19:08:13 +00:00
t->m->processor->walkStack(target, &v);
return v.trace ? v.trace : makeObjectArray(t, 0);
}
void
runFinalizeThread(Thread* t)
{
2014-06-29 05:48:17 +00:00
GcFinalizer* finalizeList = 0;
PROTECT(t, finalizeList);
2014-06-29 05:48:17 +00:00
GcCleaner* cleanList = 0;
PROTECT(t, cleanList);
while (true) {
{ ACQUIRE(t, t->m->stateLock);
while (t->m->finalizeThread
2014-06-30 01:44:41 +00:00
and roots(t)->objectsToFinalize() == 0
and roots(t)->objectsToClean() == 0)
{
ENTER(t, Thread::IdleState);
t->m->stateLock->wait(t->systemThread, 0);
}
if (t->m->finalizeThread == 0) {
return;
} else {
2014-06-30 01:44:41 +00:00
finalizeList = roots(t)->objectsToFinalize();
2014-06-26 01:42:16 +00:00
roots(t)->setObjectsToFinalize(t, 0);
2014-06-30 01:44:41 +00:00
cleanList = roots(t)->objectsToClean();
2014-06-26 01:42:16 +00:00
roots(t)->setObjectsToClean(t, 0);
}
}
2014-06-29 05:48:17 +00:00
for (; finalizeList; finalizeList = finalizeList->queueNext()) {
finalizeObject(t, finalizeList->queueTarget(), "finalize");
}
2014-06-29 05:48:17 +00:00
for (; cleanList; cleanList = cleanList->queueNext()) {
finalizeObject(t, reinterpret_cast<object>(cleanList), "clean");
}
}
}
object
parseUtf8(Thread* t, const char* data, unsigned length)
{
class Client: public Stream::Client {
public:
Client(Thread* t): t(t) { }
virtual void handleError() {
2013-07-03 20:33:46 +00:00
if (false) abort(t);
}
private:
Thread* t;
} client(t);
Stream s(&client, reinterpret_cast<const uint8_t*>(data), length);
return ::parseUtf8(t, s, length);
}
object
2014-06-28 23:24:24 +00:00
parseUtf8(Thread* t, GcByteArray* array)
{
2014-06-28 23:24:24 +00:00
for (unsigned i = 0; i < array->length() - 1; ++i) {
if (array->body()[i] & 0x80) {
goto slow_path;
}
}
2014-06-28 23:24:24 +00:00
return reinterpret_cast<object>(array);
slow_path:
class Client: public Stream::Client {
public:
Client(Thread* t): t(t) { }
virtual void handleError() {
2013-07-03 20:33:46 +00:00
if (false) abort(t);
}
private:
Thread* t;
} client(t);
class MyStream: public AbstractStream {
public:
class MyProtector: public Thread::Protector {
public:
MyProtector(Thread* t, MyStream* s):
Protector(t), s(s)
{ }
virtual void visit(Heap::Visitor* v) {
v->visit(&(s->array));
}
MyStream* s;
};
2014-06-28 23:24:24 +00:00
MyStream(Thread* t, Client* client, GcByteArray* array):
AbstractStream(client, array->length() - 1),
array(array),
protector(t, this)
{ }
virtual void copy(uint8_t* dst, unsigned offset, unsigned size) {
2014-06-28 23:24:24 +00:00
memcpy(dst, &array->body()[offset], size);
}
2014-06-28 23:24:24 +00:00
GcByteArray* array;
MyProtector protector;
} s(t, &client, array);
2014-06-28 23:24:24 +00:00
return ::parseUtf8(t, s, array->length() - 1);
}
2014-05-29 04:17:25 +00:00
GcMethod*
getCaller(Thread* t, unsigned target, bool skipMethodInvoke)
{
if (static_cast<int>(target) == -1) {
target = 2;
}
class Visitor: public Processor::StackVisitor {
public:
Visitor(Thread* t, unsigned target, bool skipMethodInvoke):
t(t), method(0), count(0), target(target),
skipMethodInvoke(skipMethodInvoke)
{ }
virtual bool visit(Processor::StackWalker* walker) {
if (skipMethodInvoke
and walker->method()->class_()
2014-05-29 04:17:25 +00:00
== type(t, GcJmethod::Type)
and strcmp(walker->method()->name()->body().begin(),
2014-05-29 04:17:25 +00:00
reinterpret_cast<const int8_t*>("invoke")) == 0) {
return true;
}
if (count == target) {
method = walker->method();
return false;
} else {
++ count;
return true;
}
}
Thread* t;
2014-05-29 04:17:25 +00:00
GcMethod* method;
unsigned count;
unsigned target;
bool skipMethodInvoke;
} v(t, target, skipMethodInvoke);
t->m->processor->walkStack(t, &v);
return v.method;
}
object
2014-06-28 21:11:31 +00:00
defineClass(Thread* t, GcClassLoader* loader, const uint8_t* buffer, unsigned length)
{
PROTECT(t, loader);
2014-05-29 04:17:25 +00:00
object c = reinterpret_cast<object>(parseClass(t, loader, buffer, length));
// char name[byteArrayLength(t, className(t, c))];
// memcpy(name, &byteArrayBody(t, className(t, c), 0),
// byteArrayLength(t, className(t, c)));
// replace('/', '-', name);
// const unsigned BufferSize = 1024;
// char path[BufferSize];
// snprintf(path, BufferSize, "/tmp/avian-define-class/%s.class", name);
// FILE* file = fopen(path, "wb");
// if (file) {
// fwrite(buffer, length, 1, file);
// fclose(file);
// }
PROTECT(t, c);
2014-05-29 04:17:25 +00:00
saveLoadedClass(t, loader, cast<GcClass>(t, c));
return c;
}
void
populateMultiArray(Thread* t, object array, int32_t* counts,
unsigned index, unsigned dimensions)
{
if (index + 1 == dimensions or counts[index] == 0) {
return;
}
PROTECT(t, array);
2014-06-28 23:24:24 +00:00
GcByteArray* spec = objectClass(t, array)->name();
PROTECT(t, spec);
2014-06-28 23:24:24 +00:00
GcByteArray* elementSpec = makeByteArray(t, spec->length() - 1);
memcpy(elementSpec->body().begin(),
&spec->body()[1],
spec->length() - 1);
2014-05-29 04:17:25 +00:00
GcClass* class_ = resolveClass
2014-06-28 21:11:31 +00:00
(t, objectClass(t, array)->loader(), elementSpec);
PROTECT(t, class_);
for (int32_t i = 0; i < counts[index]; ++i) {
2014-06-29 05:48:17 +00:00
GcArray* a = makeArray
2013-02-11 01:06:15 +00:00
(t, ceilingDivide
2014-06-29 05:48:17 +00:00
(counts[index + 1] * class_->arrayElementSize(), BytesPerWord));
a->length() = counts[index + 1];
setObjectClass(t, reinterpret_cast<object>(a), class_);
2014-06-26 02:17:27 +00:00
setField(t, array, ArrayBody + (i * BytesPerWord), reinterpret_cast<object>(a));
2014-06-26 01:42:16 +00:00
2014-06-29 05:48:17 +00:00
populateMultiArray(t, reinterpret_cast<object>(a), counts, index + 1, dimensions);
}
}
object
2014-06-28 23:24:24 +00:00
interruptLock(Thread* t, GcThread* thread)
{
2014-06-28 23:24:24 +00:00
object lock = thread->interruptLock();
loadMemoryBarrier();
if (lock == 0) {
PROTECT(t, thread);
ACQUIRE(t, t->m->referenceLock);
2014-06-28 23:24:24 +00:00
if (thread->interruptLock() == 0) {
2014-05-29 04:17:25 +00:00
object head = reinterpret_cast<object>(makeMonitorNode(t, 0, 0));
2014-06-29 05:48:17 +00:00
GcMonitor* lock = makeMonitor(t, 0, 0, 0, head, head, 0);
storeStoreMemoryBarrier();
2014-06-26 01:42:16 +00:00
thread->setInterruptLock(t, reinterpret_cast<object>(lock));
}
}
2014-06-28 23:24:24 +00:00
return thread->interruptLock();
}
void
clearInterrupted(Thread* t)
{
2014-06-28 23:24:24 +00:00
monitorAcquire(t, cast<GcMonitor>(t, interruptLock(t, t->javaThread)));
t->javaThread->interrupted() = false;
monitorRelease(t, cast<GcMonitor>(t, interruptLock(t, t->javaThread)));
}
void
2014-06-28 23:24:24 +00:00
threadInterrupt(Thread* t, GcThread* thread)
{
PROTECT(t, thread);
2014-06-28 23:24:24 +00:00
monitorAcquire(t, cast<GcMonitor>(t, interruptLock(t, thread)));
Thread* p = reinterpret_cast<Thread*>(thread->peer());
if (p) {
interrupt(t, p);
}
2014-06-28 23:24:24 +00:00
thread->interrupted() = true;
monitorRelease(t, cast<GcMonitor>(t, interruptLock(t, thread)));
}
bool
2014-06-28 23:24:24 +00:00
threadIsInterrupted(Thread* t, GcThread* thread, bool clear)
{
PROTECT(t, thread);
2014-06-28 23:24:24 +00:00
monitorAcquire(t, cast<GcMonitor>(t, interruptLock(t, thread)));
bool v = thread->interrupted();
if (clear) {
2014-06-28 23:24:24 +00:00
thread->interrupted() = false;
}
2014-06-28 23:24:24 +00:00
monitorRelease(t, cast<GcMonitor>(t, interruptLock(t, thread)));
return v;
}
void
noop()
{ }
2007-07-06 23:50:26 +00:00
#include "type-constructors.cpp"
} // namespace vm
// for debugging
AVIAN_EXPORT void
vmfPrintTrace(Thread* t, FILE* out)
{
class Visitor: public Processor::StackVisitor {
public:
Visitor(Thread* t, FILE* out): t(t), out(out) { }
virtual bool visit(Processor::StackWalker* walker) {
const int8_t* class_ = walker->method()->class_()->name()->body().begin();
const int8_t* method = walker->method()->name()->body().begin();
int line = t->m->processor->lineNumber
(t, walker->method(), walker->ip());
fprintf(out, " at %s.%s ", class_, method);
switch (line) {
case NativeLine:
fprintf(out, "(native)\n");
break;
case UnknownLine:
fprintf(out, "(unknown line)\n");
break;
default:
fprintf(out, "(line %d)\n", line);
}
return true;
}
Thread* t;
FILE* out;
} v(t, out);
fprintf(out, "debug trace for thread %p\n", t);
t->m->processor->walkStack(t, &v);
fflush(out);
}
AVIAN_EXPORT void
vmPrintTrace(Thread* t)
{
vmfPrintTrace(t, stderr);
}
// also for debugging
AVIAN_EXPORT void*
2014-06-29 05:48:17 +00:00
vmAddressFromLine(GcMethod* m, unsigned line)
{
2014-06-29 05:48:17 +00:00
GcCode* code = m->code();
printf("code: %p\n", code);
2014-06-29 05:48:17 +00:00
GcLineNumberTable* lnt = code->lineNumberTable();
printf("lnt: %p\n", lnt);
2014-05-29 04:17:25 +00:00
if (lnt) {
unsigned last = 0;
unsigned bottom = 0;
2014-06-29 05:48:17 +00:00
unsigned top = lnt->length();
for(unsigned i = bottom; i < top; i++)
{
2014-06-29 05:48:17 +00:00
uint64_t ln = lnt->body()[i];
if(lineNumberLine(ln) == line)
return reinterpret_cast<void*>(lineNumberIp(ln));
else if(lineNumberLine(ln) > line)
return reinterpret_cast<void*>(last);
last = lineNumberIp(ln);
}
}
return 0;
}