fix unintentionally retained finalizables and improve low mem performance

Objects which are eligable for finalization must be retained until
after their finalize methods are called.  However, the VM must
determine the entire set of such objects before retaining any of them;
otherwise the process of retaining a given object may cause others to
become reachable and thus be considered ineligible for finalization
even though they are only reachable via other finalizable objects.
The end result of this mistake is that only a few of the objects which
are finalizable will be recognized at each GC cycle, so it requires
many such cycles to find them all, and if new objects become
finalizable at a faster rate, the VM will never catch up and
eventually run out of memory.

This patch fixes the above mistake and also includes tuning to
minimize the need for GC in low memory situations.
This commit is contained in:
Joel Dice 2013-03-07 20:17:05 -07:00
parent ca84dd26f1
commit 3c44cdc50b
4 changed files with 224 additions and 127 deletions

View File

@ -60,13 +60,15 @@ class Heap: public Allocator {
virtual void setClient(Client* client) = 0;
virtual void setImmortalHeap(uintptr_t* start, unsigned sizeInWords) = 0;
virtual unsigned limit() = 0;
virtual bool limitExceeded() = 0;
virtual void collect(CollectionType type, unsigned footprint) = 0;
virtual void* tryAllocateFixed(Allocator* allocator, unsigned sizeInWords,
bool objectMask, unsigned* totalInBytes) = 0;
virtual void* tryAllocateImmortalFixed(Allocator* allocator,
unsigned sizeInWords, bool objectMask,
unsigned* totalInBytes) = 0;
virtual bool limitExceeded(int pendingAllocation = 0) = 0;
virtual void collect(CollectionType type, unsigned footprint,
int pendingAllocation) = 0;
virtual unsigned fixedFootprint(unsigned sizeInWords, bool objectMask) = 0;
virtual void* allocateFixed(Allocator* allocator, unsigned sizeInWords,
bool objectMask) = 0;
virtual void* allocateImmortalFixed(Allocator* allocator,
unsigned sizeInWords,
bool objectMask) = 0;
virtual void mark(void* p, unsigned offset, unsigned count) = 0;
virtual void pad(void* p) = 0;
virtual void* follow(void* p) = 0;

View File

@ -104,6 +104,8 @@ const bool DebugStack = false;
const bool DebugMonitors = false;
const bool DebugReferences = false;
const bool AbortOnOutOfMemoryError = false;
const uintptr_t HashTakenMark = 1;
const uintptr_t ExtendedMark = 2;
const uintptr_t FixedMark = 3;
@ -1694,7 +1696,7 @@ release(Thread* t, Reference* r)
}
void
collect(Thread* t, Heap::CollectionType type);
collect(Thread* t, Heap::CollectionType type, int pendingAllocation = 0);
void
shutDown(Thread* t);
@ -2689,6 +2691,16 @@ makeThrowable(Thread* t, Machine::Type type, const char* format, ...)
void
popResources(Thread* t);
} // namespace vm
JNIEXPORT void
vmPrintTrace(vm::Thread* t);
namespace vm {
void
dumpHeap(Thread* t, FILE* out);
inline void NO_RETURN
throw_(Thread* t, object e)
{
@ -2699,6 +2711,28 @@ throw_(Thread* t, object e)
t->exception = e;
if (objectClass(t, e) == type(t, Machine::OutOfMemoryErrorType)) {
#ifdef AVIAN_HEAPDUMP
if (not t->m->dumpedHeapOnOOM) {
t->m->dumpedHeapOnOOM = true;
const char* path = findProperty(t, "avian.heap.dump");
if (path) {
FILE* out = vm::fopen(path, "wb");
if (out) {
dumpHeap(t, out);
fclose(out);
}
}
}
#endif//AVIAN_HEAPDUMP
if (AbortOnOutOfMemoryError) {
fprintf(stderr, "OutOfMemoryError\n");
vmPrintTrace(t);
abort();
}
}
// printTrace(t, e);
popResources(t);
@ -3832,9 +3866,6 @@ getCaller(Thread* t, unsigned target, bool skipMethodInvoke = false);
object
defineClass(Thread* t, object loader, const uint8_t* buffer, unsigned length);
void
dumpHeap(Thread* t, FILE* out);
inline object
methodClone(Thread* t, object method)
{
@ -3919,9 +3950,6 @@ errorLog(Thread* t)
} // namespace vm
JNIEXPORT void
vmPrintTrace(vm::Thread* t);
JNIEXPORT void*
vmAddressFromLine(vm::Thread* t, vm::object m, unsigned line);

View File

@ -346,14 +346,41 @@ class Segment {
assert(context, desired >= minimum);
capacity_ = desired;
while (data == 0) {
if (static_cast<int64_t>(footprint(capacity_)) > available) {
data = 0;
} else {
data = static_cast<uintptr_t*>
(local::allocate
(context, (footprint(capacity_)) * BytesPerWord, false));
if (static_cast<int64_t>(footprint(capacity_)) > available) {
unsigned top = capacity_;
unsigned bottom = minimum;
unsigned target = available;
while (true) {
if (static_cast<int64_t>(footprint(capacity_)) > target) {
if (bottom == capacity_) {
break;
} else if (static_cast<int64_t>(footprint(capacity_ - 1))
<= target)
{
-- capacity_;
break;
}
top = capacity_;
capacity_ = avg(bottom, capacity_);
} else if (static_cast<int64_t>(footprint(capacity_)) < target) {
if (top == capacity_
or static_cast<int64_t>(footprint(capacity_ + 1)) >= target)
{
break;
}
bottom = capacity_;
capacity_ = avg(top, capacity_);
} else {
break;
}
}
}
while (data == 0) {
data = static_cast<uintptr_t*>
(local::allocate
(context, (footprint(capacity_)) * BytesPerWord, false));
if (data == 0) {
if (capacity_ > minimum) {
@ -638,6 +665,7 @@ class Context {
gen2Base(0),
incomingFootprint(0),
pendingAllocation(0),
tenureFootprint(0),
gen1Padding(0),
tenurePadding(0),
@ -658,7 +686,9 @@ class Context {
lastCollectionTime(system->now()),
totalCollectionTime(0),
totalTime(0)
totalTime(0),
limitWasExceeded(false)
{
if (not system->success(system->make(&lock))) {
system->abort();
@ -709,6 +739,7 @@ class Context {
unsigned gen2Base;
unsigned incomingFootprint;
int pendingAllocation;
unsigned tenureFootprint;
unsigned gen1Padding;
unsigned tenurePadding;
@ -730,6 +761,8 @@ class Context {
int64_t lastCollectionTime;
int64_t totalCollectionTime;
int64_t totalTime;
bool limitWasExceeded;
};
const char*
@ -818,7 +851,9 @@ initNextGen2(Context* c)
(c, &(c->nextHeapMap), desired, minimum,
static_cast<int64_t>(c->limit / BytesPerWord)
- (static_cast<int64_t>(c->count / BytesPerWord)
- c->gen2.footprint(c->gen2.capacity())));
- c->gen2.footprint(c->gen2.capacity())
- c->gen1.footprint(c->gen1.capacity())
+ c->pendingAllocation));
if (Verbose2) {
fprintf(stderr, "init nextGen2 to %d bytes\n",
@ -1655,16 +1690,41 @@ collect2(Context* c)
c->client->visitRoots(&v);
}
bool
limitExceeded(Context* c, int pendingAllocation)
{
unsigned count = c->count + pendingAllocation
- (c->gen2.remaining() * BytesPerWord);
if (Verbose) {
if (count > c->limit) {
if (not c->limitWasExceeded) {
c->limitWasExceeded = true;
fprintf(stderr, "heap limit %d exceeded: %d\n", c->limit, count);
}
} else if (c->limitWasExceeded) {
c->limitWasExceeded = false;
fprintf(stderr, "heap limit %d no longer exceeded: %d\n",
c->limit, count);
}
}
return count > c->limit;
}
void
collect(Context* c)
{
if (oversizedGen2(c)
if (limitExceeded(c, c->pendingAllocation)
or oversizedGen2(c)
or c->tenureFootprint + c->tenurePadding > c->gen2.remaining()
or c->fixieTenureFootprint + c->tenuredFixieFootprint
> c->tenuredFixieCeiling)
{
if (Verbose) {
if (oversizedGen2(c)) {
if (limitExceeded(c, c->pendingAllocation)) {
fprintf(stderr, "low memory causes ");
} else if (oversizedGen2(c)) {
fprintf(stderr, "oversized gen2 causes ");
} else if (c->tenureFootprint + c->tenurePadding > c->gen2.remaining())
{
@ -1832,8 +1892,8 @@ class MyHeap: public Heap {
return c.limit;
}
virtual bool limitExceeded() {
return c.count > c.limit;
virtual bool limitExceeded(int pendingAllocation = 0) {
return local::limitExceeded(&c, pendingAllocation);
}
virtual void* tryAllocate(unsigned size) {
@ -1848,50 +1908,45 @@ class MyHeap: public Heap {
free_(&c, p, size);
}
virtual void collect(CollectionType type, unsigned incomingFootprint) {
virtual void collect(CollectionType type, unsigned incomingFootprint,
int pendingAllocation)
{
c.mode = type;
c.incomingFootprint = incomingFootprint;
c.pendingAllocation = pendingAllocation;
local::collect(&c);
}
void* tryAllocateFixed(Allocator* allocator, unsigned sizeInWords,
bool objectMask, unsigned* totalInBytes,
Fixie** handle, bool immortal)
{
*totalInBytes = 0;
virtual unsigned fixedFootprint(unsigned sizeInWords, bool objectMask) {
return Fixie::totalSize(sizeInWords, objectMask);
}
if (limitExceeded()) {
return 0;
}
void* allocateFixed(Allocator* allocator, unsigned sizeInWords,
bool objectMask, Fixie** handle, bool immortal)
{
expect(&c, not limitExceeded());
unsigned total = Fixie::totalSize(sizeInWords, objectMask);
void* p = allocator->tryAllocate(total);
if (p == 0) {
return 0;
} else if (limitExceeded()) {
allocator->free(p, total);
return 0;
} else {
*totalInBytes = total;
return (new (p) Fixie(&c, sizeInWords, objectMask, handle, immortal))
->body();
}
void* p = allocator->allocate(total);
expect(&c, not limitExceeded());
return (new (p) Fixie(&c, sizeInWords, objectMask, handle, immortal))
->body();
}
virtual void* tryAllocateFixed(Allocator* allocator, unsigned sizeInWords,
bool objectMask, unsigned* totalInBytes)
virtual void* allocateFixed(Allocator* allocator, unsigned sizeInWords,
bool objectMask)
{
return tryAllocateFixed
(allocator, sizeInWords, objectMask, totalInBytes, &(c.fixies), false);
return allocateFixed
(allocator, sizeInWords, objectMask, &(c.fixies), false);
}
virtual void* tryAllocateImmortalFixed(Allocator* allocator,
unsigned sizeInWords, bool objectMask,
unsigned* totalInBytes)
virtual void* allocateImmortalFixed(Allocator* allocator,
unsigned sizeInWords, bool objectMask)
{
return tryAllocateFixed
(allocator, sizeInWords, objectMask, totalInBytes, 0, true);
return allocateFixed(allocator, sizeInWords, objectMask, 0, true);
}
bool needsMark(void* p) {

View File

@ -537,13 +537,22 @@ postVisit(Thread* t, Heap::Visitor* v)
object firstNewTenuredFinalizer = 0;
object lastNewTenuredFinalizer = 0;
for (object* p = &(m->finalizers); *p;) {
v->visit(p);
{ object unreachable = 0;
for (object* p = &(m->finalizers); *p;) {
v->visit(p);
if (m->heap->status(finalizerTarget(t, *p)) == Heap::Unreachable) {
// target is unreachable - queue it up for finalization
finalizerTargetUnreachable(t, v, p);
} else {
if (m->heap->status(finalizerTarget(t, *p)) == Heap::Unreachable) {
object finalizer = *p;
*p = finalizerNext(t, finalizer);
finalizerNext(t, finalizer) = unreachable;
unreachable = finalizer;
} else {
p = &finalizerNext(t, *p);
}
}
for (object* p = &(m->finalizers); *p;) {
// target is reachable
v->visit(&finalizerTarget(t, *p));
@ -563,6 +572,11 @@ postVisit(Thread* t, Heap::Visitor* v)
p = &finalizerNext(t, *p);
}
}
for (object* p = &unreachable; *p;) {
// target is unreachable - queue it up for finalization
finalizerTargetUnreachable(t, v, p);
}
}
object firstNewTenuredWeakReference = 0;
@ -603,17 +617,31 @@ postVisit(Thread* t, Heap::Visitor* v)
}
if (major) {
for (object* p = &(m->tenuredFinalizers); *p;) {
v->visit(p);
{ object unreachable = 0;
for (object* p = &(m->tenuredFinalizers); *p;) {
v->visit(p);
if (m->heap->status(finalizerTarget(t, *p)) == Heap::Unreachable) {
// target is unreachable - queue it up for finalization
finalizerTargetUnreachable(t, v, p);
} else {
if (m->heap->status(finalizerTarget(t, *p)) == Heap::Unreachable) {
object finalizer = *p;
*p = finalizerNext(t, finalizer);
finalizerNext(t, finalizer) = unreachable;
unreachable = finalizer;
} else {
p = &finalizerNext(t, *p);
}
}
for (object* p = &(m->tenuredFinalizers); *p;) {
// target is reachable
v->visit(&finalizerTarget(t, *p));
p = &finalizerNext(t, *p);
}
for (object* p = &unreachable; *p;) {
// target is unreachable - queue it up for finalization
finalizerTargetUnreachable(t, v, p);
}
}
for (object* p = &(m->tenuredWeakReferences); *p;) {
@ -2862,7 +2890,7 @@ class HeapClient: public Heap::Client {
};
void
doCollect(Thread* t, Heap::CollectionType type)
doCollect(Thread* t, Heap::CollectionType type, int pendingAllocation)
{
expect(t, not t->m->collecting);
@ -2877,7 +2905,8 @@ doCollect(Thread* t, Heap::CollectionType type)
Machine* m = t->m;
m->unsafe = true;
m->heap->collect(type, footprint(m->rootThread));
m->heap->collect(type, footprint(m->rootThread), pendingAllocation
- (t->m->heapPoolIndex * ThreadHeapSizeInWords));
m->unsafe = false;
postCollect(m->rootThread);
@ -3575,13 +3604,16 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
break;
}
if (t->heap == 0 or t->m->heap->limitExceeded()) {
int pendingAllocation = t->m->heap->fixedFootprint
(ceilingDivide(sizeInBytes, BytesPerWord), objectMask);
if (t->heap == 0 or t->m->heap->limitExceeded(pendingAllocation)) {
// fprintf(stderr, "gc");
// vmPrintTrace(t);
collect(t, Heap::MinorCollection);
collect(t, Heap::MinorCollection, pendingAllocation);
}
if (t->m->heap->limitExceeded()) {
if (t->m->heap->limitExceeded(pendingAllocation)) {
throw_(t, root(t, Machine::OutOfMemoryError));
}
} while (type == Machine::MovableAllocation
@ -3594,45 +3626,57 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
}
case Machine::FixedAllocation: {
unsigned total;
object o = static_cast<object>
(t->m->heap->tryAllocateFixed
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask, &total));
(t->m->heap->allocateFixed
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask));
if (o) {
memset(o, 0, sizeInBytes);
memset(o, 0, sizeInBytes);
alias(o, 0) = FixedMark;
alias(o, 0) = FixedMark;
t->m->fixedFootprint += t->m->heap->fixedFootprint
(ceilingDivide(sizeInBytes, BytesPerWord), objectMask);
t->m->fixedFootprint += total;
return o;
} else {
throw_(t, root(t, Machine::OutOfMemoryError));
}
return o;
}
case Machine::ImmortalAllocation: {
unsigned total;
object o = static_cast<object>
(t->m->heap->tryAllocateImmortalFixed
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask, &total));
(t->m->heap->allocateImmortalFixed
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask));
if (o) {
memset(o, 0, sizeInBytes);
memset(o, 0, sizeInBytes);
alias(o, 0) = FixedMark;
alias(o, 0) = FixedMark;
return o;
} else {
throw_(t, root(t, Machine::OutOfMemoryError));
}
return o;
}
default: abort(t);
}
}
void
collect(Thread* t, Heap::CollectionType type, int pendingAllocation)
{
ENTER(t, Thread::ExclusiveState);
unsigned pending = pendingAllocation
- (t->m->heapPoolIndex * ThreadHeapSizeInWords);
if (t->m->heap->limitExceeded(pending)) {
type = Heap::MajorCollection;
}
doCollect(t, type, pendingAllocation);
if (t->m->heap->limitExceeded(pending)) {
// try once more, giving the heap a chance to squeeze everything
// into the smallest possible space:
doCollect(t, Heap::MajorCollection, pendingAllocation);
}
}
object
makeNewGeneral(Thread* t, object class_)
{
@ -4678,38 +4722,6 @@ intern(Thread* t, object s)
}
}
void
collect(Thread* t, Heap::CollectionType type)
{
ENTER(t, Thread::ExclusiveState);
if (t->m->heap->limitExceeded()) {
type = Heap::MajorCollection;
}
doCollect(t, type);
if (t->m->heap->limitExceeded()) {
// try once more, giving the heap a chance to squeeze everything
// into the smallest possible space:
doCollect(t, Heap::MajorCollection);
}
#ifdef AVIAN_HEAPDUMP
if ((not t->m->dumpedHeapOnOOM) and t->m->heap->limitExceeded()) {
t->m->dumpedHeapOnOOM = true;
const char* path = findProperty(t, "avian.heap.dump");
if (path) {
FILE* out = vm::fopen(path, "wb");
if (out) {
dumpHeap(t, out);
fclose(out);
}
}
}
#endif//AVIAN_HEAPDUMP
}
void
walk(Thread* t, Heap::Walker* w, object o, unsigned start)
{