mirror of
https://github.com/corda/corda.git
synced 2025-01-16 09:50:11 +00:00
snapshot
This commit is contained in:
parent
459acc3419
commit
a6e79cc417
178
src/heap.cpp
178
src/heap.cpp
@ -36,8 +36,8 @@ class Segment {
|
|||||||
|
|
||||||
// printf("iterating from %p (index %d) to %p (index %d) "
|
// printf("iterating from %p (index %d) to %p (index %d) "
|
||||||
// "(%d of %d bytes) (scale: %d)\n",
|
// "(%d of %d bytes) (scale: %d)\n",
|
||||||
// start, index, end, limit, (end - start) * sizeof(void*),
|
// start, index, end, limit, (end - start) * BytesPerWord,
|
||||||
// map->segment->position * sizeof(void*), map->scale);
|
// map->segment->position * BytesPerWord, map->scale);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasMore() {
|
bool hasMore() {
|
||||||
@ -51,12 +51,12 @@ class Segment {
|
|||||||
for (; word <= wordLimit and (word < wordLimit or bit < bitLimit);
|
for (; word <= wordLimit and (word < wordLimit or bit < bitLimit);
|
||||||
++word)
|
++word)
|
||||||
{
|
{
|
||||||
uintptr_t* p = map->data + word;
|
uintptr_t* p = map->data() + word;
|
||||||
if (*p) {
|
if (*p) {
|
||||||
for (; bit < BitsPerWord and (word < wordLimit or bit < bitLimit);
|
for (; bit < BitsPerWord and (word < wordLimit or bit < bitLimit);
|
||||||
++bit)
|
++bit)
|
||||||
{
|
{
|
||||||
if (map->data[word] & (static_cast<uintptr_t>(1) << bit)) {
|
if (map->data()[word] & (static_cast<uintptr_t>(1) << bit)) {
|
||||||
index = ::indexOf(word, bit);
|
index = ::indexOf(word, bit);
|
||||||
// printf("hit at index %d\n", index);
|
// printf("hit at index %d\n", index);
|
||||||
return true;
|
return true;
|
||||||
@ -74,8 +74,8 @@ class Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void** next() {
|
void** next() {
|
||||||
assert(hasMore());
|
assert(map->segment->context, hasMore());
|
||||||
assert(map->segment);
|
assert(map->segment->context, map->segment);
|
||||||
|
|
||||||
return reinterpret_cast<void**>(map->segment->data) +
|
return reinterpret_cast<void**>(map->segment->data) +
|
||||||
((index++) * map->scale);
|
((index++) * map->scale);
|
||||||
@ -83,83 +83,66 @@ class Segment {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Segment* segment;
|
Segment* segment;
|
||||||
|
unsigned offset;
|
||||||
unsigned bitsPerRecord;
|
unsigned bitsPerRecord;
|
||||||
unsigned scale;
|
unsigned scale;
|
||||||
uintptr_t* data;
|
|
||||||
Map* next;
|
Map* next;
|
||||||
Map* child;
|
Map* child;
|
||||||
|
|
||||||
void init() {
|
void init() {
|
||||||
init(0);
|
init(0);
|
||||||
data = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void init(Segment* segment, unsigned bitsPerRecord = 1, unsigned scale = 1,
|
void init(Segment* segment, unsigned offset, unsigned bitsPerRecord = 1,
|
||||||
Map* next = 0, Map* child = 0)
|
unsigned scale = 1, Map* next = 0, Map* child = 0)
|
||||||
{
|
{
|
||||||
assert(bitsPerRecord);
|
assert(bitsPerRecord);
|
||||||
assert(scale);
|
assert(scale);
|
||||||
assert(powerOfTwo(scale));
|
assert(powerOfTwo(scale));
|
||||||
|
|
||||||
this->segment = segment;
|
this->segment = segment;
|
||||||
|
this->offset = offset;
|
||||||
this->bitsPerRecord = bitsPerRecord;
|
this->bitsPerRecord = bitsPerRecord;
|
||||||
this->scale = scale;
|
this->scale = scale;
|
||||||
this->next = next;
|
this->next = next;
|
||||||
this->child = child;
|
this->child = child;
|
||||||
}
|
}
|
||||||
|
|
||||||
void replaceWith(Map* m) {
|
uintptr_t* data() {
|
||||||
assert(m);
|
return segment->data + offset;
|
||||||
assert(bitsPerRecord == m->bitsPerRecord);
|
|
||||||
assert(scale == m->scale);
|
|
||||||
|
|
||||||
free(data);
|
|
||||||
|
|
||||||
data = m->data;
|
|
||||||
m->data = 0;
|
|
||||||
|
|
||||||
if (next) next->replaceWith(m->next);
|
|
||||||
if (child) child->replaceWith(m->child);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned size(unsigned capacity) {
|
unsigned size(unsigned capacity) {
|
||||||
unsigned result = pad
|
unsigned result = pad
|
||||||
(divide(divide(capacity, scale) * bitsPerRecord, 8));
|
(divide(divide(capacity, scale) * bitsPerRecord, 8));
|
||||||
assert(result);
|
assert(segment->context, result);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned size() {
|
unsigned size() {
|
||||||
assert(segment);
|
assert(segment->context, segment);
|
||||||
return size(max(segment->capacity, 1));
|
return size(max(segment->capacity, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned indexOf(void* p) {
|
unsigned indexOf(void* p) {
|
||||||
assert(segment);
|
assert(segment->context, segment);
|
||||||
assert(segment->position
|
assert(segment->context,
|
||||||
|
segment->position
|
||||||
and p >= segment->data
|
and p >= segment->data
|
||||||
and p <= segment->data + segment->position);
|
and p <= segment->data + segment->position);
|
||||||
assert(segment->data);
|
assert(segment->context, segment->data);
|
||||||
return ((static_cast<void**>(p) - reinterpret_cast<void**>(segment->data))
|
|
||||||
|
return ((static_cast<void**>(p)
|
||||||
|
- reinterpret_cast<void**>(segment->data))
|
||||||
/ scale) * bitsPerRecord;
|
/ scale) * bitsPerRecord;
|
||||||
}
|
}
|
||||||
|
|
||||||
void update() {
|
void update(uintptr_t* segmentData) {
|
||||||
data = static_cast<uintptr_t*>(realloc(data, size()));
|
uintptr_t* p = segmentData + offset;
|
||||||
assert(data);
|
memcpy(p, data(), size(segment->position));
|
||||||
|
|
||||||
if (next) next->update();
|
if (next) next->update(segmentData);
|
||||||
if (child) child->update();
|
if (child) child->update(segmentData);
|
||||||
}
|
|
||||||
|
|
||||||
void reset(bool clear = true) {
|
|
||||||
free(data);
|
|
||||||
data = static_cast<uintptr_t*>(malloc(size()));
|
|
||||||
assert(data);
|
|
||||||
if (clear) memset(data, 0, size());
|
|
||||||
|
|
||||||
if (next) next->reset();
|
|
||||||
if (child) child->reset();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void clear() {
|
void clear() {
|
||||||
@ -219,8 +202,7 @@ class Segment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void dispose() {
|
void dispose() {
|
||||||
free(data);
|
offset = 0;
|
||||||
data = 0;
|
|
||||||
segment = 0;
|
segment = 0;
|
||||||
next = 0;
|
next = 0;
|
||||||
|
|
||||||
@ -237,7 +219,7 @@ class Segment {
|
|||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setSegment(Segment* s) {
|
void setSegment(Segment* s, bool clear = true) {
|
||||||
segment = s;
|
segment = s;
|
||||||
if (next) next->setSegment(s);
|
if (next) next->setSegment(s);
|
||||||
if (child) child->setSegment(s);
|
if (child) child->setSegment(s);
|
||||||
@ -259,27 +241,46 @@ class Segment {
|
|||||||
return footprint(capacity);
|
return footprint(capacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
void init(unsigned capacityInBytes, Map* map = 0, bool clearMap = true) {
|
void init(Context* context, unsigned capacity, Map* map = 0,
|
||||||
this->capacity = divide(capacityInBytes, BytesPerWord);
|
bool clearMap = true)
|
||||||
|
{
|
||||||
|
this->context = context;
|
||||||
|
this->capacity = capacity;
|
||||||
this->data = 0;
|
this->data = 0;
|
||||||
this->position = 0;
|
this->position = 0;
|
||||||
this->map = map;
|
this->map = map;
|
||||||
|
|
||||||
if (capacity) {
|
if (capacity) {
|
||||||
if (map) map->reset(clearMap);
|
unsigned count = footprint(capacity);
|
||||||
this->data = static_cast<uintptr_t*>(malloc(capacity * BytesPerWord));
|
this->data = static_cast<uintptr_t*>(system(context)->allocate(&count));
|
||||||
assert(this->data);
|
|
||||||
|
if (count != footprint(capacity)) {
|
||||||
|
abort(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (map) {
|
||||||
|
map->setSegment(this, clearMap);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void* add(System* sys, void* p, unsigned size) {
|
void* allocate(unsigned size) {
|
||||||
|
assert(c, size);
|
||||||
|
assert(c, position + size <= capacity);
|
||||||
|
void* p = reinterpret_cast<void**>(data) + position;
|
||||||
|
position += size;
|
||||||
|
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* add(void* p, unsigned size) {
|
||||||
void* target = allocate(size);
|
void* target = allocate(size);
|
||||||
memcpy(target, p, size);
|
memcpy(target, p, size * BytesPerWord);
|
||||||
return target;
|
return target;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned remaining() {
|
unsigned remaining() {
|
||||||
return (capacity - position) * BytesPerWord;
|
return capacity - position;
|
||||||
}
|
}
|
||||||
|
|
||||||
void replaceWith(Segment* s) {
|
void replaceWith(Segment* s) {
|
||||||
@ -307,28 +308,30 @@ class Segment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void grow(System* sys, unsigned extra) {
|
void grow(unsigned extra) {
|
||||||
if (remainingBytes() < extra) {
|
if (remaining() < extra) {
|
||||||
unsigned minimumNeeded = s->usedBytes() + (extra * bytesPerWord);
|
unsigned minimumNeeded = position + extra;
|
||||||
unsigned count = minimumNeeded * 2;
|
unsigned count = minimumNeeded * 2;
|
||||||
|
|
||||||
void* p = sys->allocate(&count);
|
minimumNeeded = footprint(minimumNeeded);
|
||||||
|
count = footprint(count);
|
||||||
|
|
||||||
|
uintptr_t* p = static_cast<uintptr_t*>
|
||||||
|
(system(context)->allocate(&count));
|
||||||
|
|
||||||
if (count >= minimumNeeded) {
|
if (count >= minimumNeeded) {
|
||||||
memcpy(p, s->data, s->usedBytes());
|
memcpy(p, data, position * BytesPerWord);
|
||||||
s->data = p;
|
|
||||||
sys->free(s->data);
|
if (map) {
|
||||||
|
map->update(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
data = p;
|
||||||
|
system(context)->free(data);
|
||||||
} else {
|
} else {
|
||||||
abort(sys);
|
abort(context);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned newCapacity = divide(newCapacityInBytes, BytesPerWord);
|
|
||||||
assert(newCapacity > capacity);
|
|
||||||
capacity = newCapacity;
|
|
||||||
data = static_cast<uintptr_t*>(realloc(data, capacity * BytesPerWord));
|
|
||||||
assert(data);
|
|
||||||
if (map) map->update();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool contains(void* p) {
|
bool contains(void* p) {
|
||||||
@ -354,7 +357,7 @@ void
|
|||||||
initGen1(Context* c)
|
initGen1(Context* c)
|
||||||
{
|
{
|
||||||
c->ageMap.init(&(c->gen1), log(Arena::TenureThreshold));
|
c->ageMap.init(&(c->gen1), log(Arena::TenureThreshold));
|
||||||
c->gen1.init(c->minimumGen1Size, &(c->ageMap), false);
|
c->gen1.init(c->minimumGen1Size / BytesPerWord, &(c->ageMap), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -364,14 +367,14 @@ initGen2(Context* c)
|
|||||||
c->pageMap.init(&(c->gen2), 1, LikelyPageSize / BytesPerWord, 0,
|
c->pageMap.init(&(c->gen2), 1, LikelyPageSize / BytesPerWord, 0,
|
||||||
&(c->pointerMap));
|
&(c->pointerMap));
|
||||||
c->heapMap.init(&(c->gen2), 1, c->pageMap.scale * 1024, 0, &(c->pageMap));
|
c->heapMap.init(&(c->gen2), 1, c->pageMap.scale * 1024, 0, &(c->pageMap));
|
||||||
c->gen2.init(c->minimumGen2Size, &(c->heapMap));
|
c->gen2.init(c->minimumGen2Size / BytesPerWord, &(c->heapMap));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
initNextGen1(Context* c)
|
initNextGen1(Context* c)
|
||||||
{
|
{
|
||||||
unsigned size = max(c->minimumGen1Size,
|
unsigned size = max(c->minimumGen1Size / BytesPerWord,
|
||||||
nextPowerOfTwo(c->gen1.position() * BytesPerWord));
|
nextPowerOfTwo(c->gen1.position()));
|
||||||
c->nextAgeMap.init(&(c->nextGen1), log(Arena::TenureThreshold));
|
c->nextAgeMap.init(&(c->nextGen1), log(Arena::TenureThreshold));
|
||||||
c->nextGen1.init(size, &(c->nextAgeMap), false);
|
c->nextGen1.init(size, &(c->nextAgeMap), false);
|
||||||
}
|
}
|
||||||
@ -379,8 +382,8 @@ initNextGen1(Context* c)
|
|||||||
void
|
void
|
||||||
initNextGen2(Context* c)
|
initNextGen2(Context* c)
|
||||||
{
|
{
|
||||||
unsigned size = max(c->minimumGen2Size,
|
unsigned size = max(c->minimumGen2Size / BytesPerWord,
|
||||||
nextPowerOfTwo(c->gen2.position() * BytesPerWord));
|
nextPowerOfTwo(c->gen2.position()));
|
||||||
c->pointerMap.init(&(c->nextGen2));
|
c->pointerMap.init(&(c->nextGen2));
|
||||||
c->pageMap.init(&(c->nextGen2), 1, LikelyPageSize / BytesPerWord, 0,
|
c->pageMap.init(&(c->nextGen2), 1, LikelyPageSize / BytesPerWord, 0,
|
||||||
&(c->pointerMap));
|
&(c->pointerMap));
|
||||||
@ -411,11 +414,8 @@ bitset(object o)
|
|||||||
object
|
object
|
||||||
copyTo(Context* c, Segment* s, object o, unsigned size)
|
copyTo(Context* c, Segment* s, object o, unsigned size)
|
||||||
{
|
{
|
||||||
assert(c, size >= BytesPerWord);
|
if (s->remaining() < size) {
|
||||||
assert(c, size % BytesPerWord == 0);
|
s->grow(c->sys, size);
|
||||||
|
|
||||||
if (s->remainingBytes() < size) {
|
|
||||||
s->grow(c->sys, size * bytesPerWord);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return static_cast<object>(s->add(o, size));
|
return static_cast<object>(s->add(o, size));
|
||||||
@ -424,7 +424,7 @@ copyTo(Context* c, Segment* s, object o, unsigned size)
|
|||||||
object
|
object
|
||||||
copy2(Context* c, object o)
|
copy2(Context* c, object o)
|
||||||
{
|
{
|
||||||
unsigned size = c->client->sizeOf(o);
|
unsigned size = c->client->sizeInWords(o);
|
||||||
|
|
||||||
if (c->gen2.contains(o)) {
|
if (c->gen2.contains(o)) {
|
||||||
assert(c, c->mode == MajorCollection
|
assert(c, c->mode == MajorCollection
|
||||||
@ -437,7 +437,7 @@ copy2(Context* c, object o)
|
|||||||
if (c->mode == MinorCollection) {
|
if (c->mode == MinorCollection) {
|
||||||
if (c->gen2.front == 0) initGen2(a);
|
if (c->gen2.front == 0) initGen2(a);
|
||||||
|
|
||||||
if (c->gen2.remainingBytes() >= size) {
|
if (c->gen2.remaining() >= size) {
|
||||||
return copyTo(c, &(c->gen2), o, size);
|
return copyTo(c, &(c->gen2), o, size);
|
||||||
} else {
|
} else {
|
||||||
c->mode = OverflowCollection;
|
c->mode = OverflowCollection;
|
||||||
@ -676,7 +676,7 @@ collect(Context* c, void** p)
|
|||||||
virtual bool visit(unsigned offset) {
|
virtual bool visit(unsigned offset) {
|
||||||
bool needsVisit;
|
bool needsVisit;
|
||||||
object childCopy = update
|
object childCopy = update
|
||||||
(c, &cast<object>(copy, offset * sizeof(void*)), &needsVisit);
|
(c, &cast<object>(copy, offset * BytesPerWord), &needsVisit);
|
||||||
|
|
||||||
++ total;
|
++ total;
|
||||||
|
|
||||||
@ -698,7 +698,7 @@ collect(Context* c, void** p)
|
|||||||
bitsetSet(bitset, offset, true);
|
bitsetSet(bitset, offset, true);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
cast<object>(copy, offset * sizeof(void*)) = childCopy;
|
cast<object>(copy, offset * BytesPerWord) = childCopy;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -722,8 +722,8 @@ collect(Context* c, void** p)
|
|||||||
parent = original;
|
parent = original;
|
||||||
}
|
}
|
||||||
|
|
||||||
original = cast<object>(copy, walker.first * sizeof(void*));
|
original = cast<object>(copy, walker.first * BytesPerWord);
|
||||||
cast<object>(copy, walker.first * sizeof(void*)) = follow(original);
|
cast<object>(copy, walker.first * BytesPerWord) = follow(original);
|
||||||
goto visit;
|
goto visit;
|
||||||
} else {
|
} else {
|
||||||
// ascend
|
// ascend
|
||||||
@ -775,8 +775,8 @@ collect(Context* c, void** p)
|
|||||||
parent = ::parent(original);
|
parent = ::parent(original);
|
||||||
}
|
}
|
||||||
|
|
||||||
original = cast<object>(copy, walker.next * sizeof(void*));
|
original = cast<object>(copy, walker.next * BytesPerWord);
|
||||||
cast<object>(copy, walker.next * sizeof(void*)) = follow(original);
|
cast<object>(copy, walker.next * BytesPerWord) = follow(original);
|
||||||
goto visit;
|
goto visit;
|
||||||
} else {
|
} else {
|
||||||
return;
|
return;
|
||||||
@ -843,7 +843,7 @@ class ObjectSegmentIterator {
|
|||||||
object next() {
|
object next() {
|
||||||
assert(c, hasNext());
|
assert(c, hasNext());
|
||||||
object p = s->data + (index * BytesPerWord);
|
object p = s->data + (index * BytesPerWord);
|
||||||
index += c->client->sizeOf(p) / BytesPerWord;
|
index += c->client->sizeInWords(p);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user