From f148fd06686f811cfc342bf460db729e4cd1be93 Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Wed, 22 Aug 2007 08:50:29 -0600 Subject: [PATCH] add Machine::heapPool, allowing threads to acquire replacement heaps up to a point before forcing a GC --- src/machine.cpp | 35 +++++++++++++++++++++++++++-------- src/machine.h | 11 ++++++++--- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/src/machine.cpp b/src/machine.cpp index 8ed1ef56f6..6571e82b40 100644 --- a/src/machine.cpp +++ b/src/machine.cpp @@ -132,8 +132,6 @@ void visitRoots(Thread* t, Heap::Visitor* v) { if (t->state != Thread::ZombieState) { - t->heapIndex = 0; - v->visit(&(t->javaThread)); v->visit(&(t->code)); v->visit(&(t->exception)); @@ -362,11 +360,14 @@ void postCollect(Thread* t) { #ifdef VM_STRESS - t->vm->system->free(t->heap); - t->heap = static_cast + t->vm->system->free(t->defaultHeap); + t->defaultHeap = static_cast (t->vm->system->allocate(Thread::HeapSizeInBytes)); #endif + t->heap = t->defaultHeap; + t->heapIndex = 0; + if (t->large) { t->vm->system->free(t->large); t->large = 0; @@ -1289,7 +1290,8 @@ Machine::Machine(System* system, Heap* heap, Finder* finder): finalizeQueue(0), weakReferences(0), tenuredWeakReferences(0), - unsafe(false) + unsafe(false), + heapPoolIndex(0) { populateJNITable(&jniEnvVTable); @@ -1335,8 +1337,9 @@ Thread::Thread(Machine* m, object javaThread, Thread* parent): runnable(this) #ifdef VM_STRESS , stress(false), - heap(static_cast(m->system->allocate(HeapSizeInBytes))) + defaultHeap(static_cast(m->system->allocate(HeapSizeInBytes))) #endif // VM_STRESS + , heap(defaultHeap) { if (parent == 0) { assert(this, m->rootThread == 0); @@ -1613,8 +1616,19 @@ allocate2(Thread* t, unsigned sizeInBytes) if (t->heapIndex + ceiling(sizeInBytes, BytesPerWord) >= Thread::HeapSizeInWords) { - ENTER(t, Thread::ExclusiveState); - collect(t, Heap::MinorCollection); + t->heap = 0; + if (t->vm->heapPoolIndex < Machine::HeapPoolSize) { + t->heap = static_cast + (t->vm->system->tryAllocate(Thread::HeapSizeInBytes)); + if (t->heap) { + t->vm->heapPool[t->vm->heapPoolIndex++] = t->heap; + } + } + + if (t->heap == 0) { + ENTER(t, Thread::ExclusiveState); + collect(t, Heap::MinorCollection); + } } if (sizeInBytes > Thread::HeapSizeInBytes) { @@ -2535,6 +2549,11 @@ collect(Thread* t, Heap::CollectionType type) m->finalizeQueue = 0; killZombies(t, m->rootThread); + + for (unsigned i = 0; i < m->heapPoolIndex; ++i) { + m->system->free(m->heapPool[i]); + } + m->heapPoolIndex = 0; } void diff --git a/src/machine.h b/src/machine.h index 60dc38461a..ea6035df1f 100644 --- a/src/machine.h +++ b/src/machine.h @@ -1098,6 +1098,8 @@ class Machine { dispose(); } + static const unsigned HeapPoolSize = 16; + void dispose(); System* system; @@ -1125,6 +1127,8 @@ class Machine { object tenuredWeakReferences; bool unsafe; JNIEnvVTable jniEnvVTable; + object* heapPool[HeapPoolSize]; + unsigned heapPoolIndex; }; object @@ -1193,7 +1197,7 @@ class Thread { Thread* t; }; - static const unsigned HeapSizeInBytes = 512 * 1024; + static const unsigned HeapSizeInBytes = 64 * 1024; static const unsigned StackSizeInBytes = 64 * 1024; static const unsigned HeapSizeInWords = HeapSizeInBytes / BytesPerWord; @@ -1221,11 +1225,12 @@ class Thread { unsigned heapIndex; Protector* protector; Runnable runnable; + object* heap; #ifdef VM_STRESS bool stress; - object* heap; + object* defaultHeap; #else // not VM_STRESS - object heap[HeapSizeInWords]; + object defaultHeap[HeapSizeInWords]; #endif // not VM_STRESS uintptr_t stack[StackSizeInWords]; };