mirror of
https://github.com/corda/corda.git
synced 2025-01-03 19:54:13 +00:00
rename ceiling -> ceilingDivide
This commit is contained in:
parent
f9b3be0301
commit
d26d8fdb9f
@ -2562,7 +2562,7 @@ class MyAssembler: public Assembler {
|
||||
arguments[i].size = va_arg(a, unsigned);
|
||||
arguments[i].type = static_cast<OperandType>(va_arg(a, int));
|
||||
arguments[i].operand = va_arg(a, Operand*);
|
||||
footprint += ceiling(arguments[i].size, TargetBytesPerWord);
|
||||
footprint += ceilingDivide(arguments[i].size, TargetBytesPerWord);
|
||||
}
|
||||
va_end(a);
|
||||
|
||||
@ -2578,7 +2578,7 @@ class MyAssembler: public Assembler {
|
||||
pad(arguments[i].size, TargetBytesPerWord), RegisterOperand,
|
||||
&dst);
|
||||
|
||||
offset += ceiling(arguments[i].size, TargetBytesPerWord);
|
||||
offset += ceilingDivide(arguments[i].size, TargetBytesPerWord);
|
||||
} else {
|
||||
Memory dst(StackRegister, offset * TargetBytesPerWord);
|
||||
|
||||
@ -2586,7 +2586,7 @@ class MyAssembler: public Assembler {
|
||||
arguments[i].size, arguments[i].type, arguments[i].operand,
|
||||
pad(arguments[i].size, TargetBytesPerWord), MemoryOperand, &dst);
|
||||
|
||||
offset += ceiling(arguments[i].size, TargetBytesPerWord);
|
||||
offset += ceilingDivide(arguments[i].size, TargetBytesPerWord);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,13 +9,13 @@ const unsigned NAME(BootHeapOffset) = 1 << (NAME(BootShift) + 1);
|
||||
inline unsigned
|
||||
LABEL(codeMapSize)(unsigned codeSize)
|
||||
{
|
||||
return ceiling(codeSize, TargetBitsPerWord) * TargetBytesPerWord;
|
||||
return ceilingDivide(codeSize, TargetBitsPerWord) * TargetBytesPerWord;
|
||||
}
|
||||
|
||||
inline unsigned
|
||||
LABEL(heapMapSize)(unsigned heapSize)
|
||||
{
|
||||
return ceiling(heapSize, TargetBitsPerWord * TargetBytesPerWord)
|
||||
return ceilingDivide(heapSize, TargetBitsPerWord * TargetBytesPerWord)
|
||||
* TargetBytesPerWord;
|
||||
}
|
||||
|
||||
|
@ -542,11 +542,11 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
|
||||
if (hashMapFind(t, typeMaps, c, objectHash, objectEqual) == 0) {
|
||||
object array = makeByteArray
|
||||
(t, TypeMap::sizeInBytes
|
||||
(ceiling(classFixedSize(t, c), BytesPerWord), memberIndex));
|
||||
(ceilingDivide(classFixedSize(t, c), BytesPerWord), memberIndex));
|
||||
|
||||
TypeMap* map = new (&byteArrayBody(t, array, 0)) TypeMap
|
||||
(ceiling(classFixedSize(t, c), BytesPerWord),
|
||||
ceiling(targetMemberOffset, TargetBytesPerWord), memberIndex);
|
||||
(ceilingDivide(classFixedSize(t, c), BytesPerWord),
|
||||
ceilingDivide(targetMemberOffset, TargetBytesPerWord), memberIndex);
|
||||
|
||||
for (unsigned i = 0; i < memberIndex; ++i) {
|
||||
Field* f = memberFields + i;
|
||||
@ -569,7 +569,7 @@ makeCodeImage(Thread* t, Zone* zone, BootImage* image, uint8_t* code,
|
||||
|
||||
TypeMap* map = new (&byteArrayBody(t, array, 0)) TypeMap
|
||||
(singletonCount(t, classStaticTable(t, c)) + 2,
|
||||
ceiling(targetStaticOffset, TargetBytesPerWord), staticIndex,
|
||||
ceilingDivide(targetStaticOffset, TargetBytesPerWord), staticIndex,
|
||||
TypeMap::SingletonKind);
|
||||
|
||||
for (unsigned i = 0; i < staticIndex; ++i) {
|
||||
@ -739,7 +739,7 @@ targetSize(Thread* t, object typeMaps, object p)
|
||||
|
||||
if (map->targetArrayElementSizeInBytes) {
|
||||
return map->targetFixedSizeInWords
|
||||
+ ceiling(map->targetArrayElementSizeInBytes
|
||||
+ ceilingDivide(map->targetArrayElementSizeInBytes
|
||||
* cast<uintptr_t>
|
||||
(p, (map->buildFixedSizeInWords - 1) * BytesPerWord),
|
||||
TargetBytesPerWord);
|
||||
@ -1069,7 +1069,7 @@ copy(Thread* t, object typeMaps, object referer, unsigned refererOffset,
|
||||
|
||||
memset(dst, 0, TargetBytesPerWord);
|
||||
|
||||
unsigned length = ceiling(objectMaskCount(map), 32);
|
||||
unsigned length = ceilingDivide(objectMaskCount(map), 32);
|
||||
|
||||
target_uintptr_t targetLength = targetVW(length);
|
||||
|
||||
@ -1164,7 +1164,7 @@ makeHeapImage(Thread* t, BootImage* image, target_uintptr_t* heap,
|
||||
|
||||
target_uintptr_t* dst = heap + position + TargetFixieSizeInWords;
|
||||
|
||||
unsigned maskSize = ceiling(size, TargetBitsPerWord);
|
||||
unsigned maskSize = ceilingDivide(size, TargetBitsPerWord);
|
||||
|
||||
unsigned total = TargetFixieSizeInWords + size + maskSize;
|
||||
|
||||
@ -1440,11 +1440,11 @@ writeBootImage2(Thread* t, OutputStream* bootimageOutput, OutputStream* codeOutp
|
||||
|
||||
object array = makeByteArray
|
||||
(t, TypeMap::sizeInBytes
|
||||
(ceiling(buildOffset, BytesPerWord), fixedFieldCount));
|
||||
(ceilingDivide(buildOffset, BytesPerWord), fixedFieldCount));
|
||||
|
||||
TypeMap* map = new (&byteArrayBody(t, array, 0)) TypeMap
|
||||
(ceiling(buildOffset, BytesPerWord),
|
||||
ceiling(targetOffset, TargetBytesPerWord),
|
||||
(ceilingDivide(buildOffset, BytesPerWord),
|
||||
ceilingDivide(targetOffset, TargetBytesPerWord),
|
||||
fixedFieldCount, TypeMap::NormalKind, buildArrayElementSize,
|
||||
targetArrayElementSize, arrayElementType);
|
||||
|
||||
|
@ -350,7 +350,7 @@ padWord(uintptr_t n)
|
||||
}
|
||||
|
||||
inline unsigned
|
||||
ceiling(unsigned n, unsigned d)
|
||||
ceilingDivide(unsigned n, unsigned d)
|
||||
{
|
||||
return (n + d - 1) / d;
|
||||
}
|
||||
|
@ -940,7 +940,7 @@ frameMapSizeInBits(MyThread* t, object method)
|
||||
unsigned
|
||||
frameMapSizeInWords(MyThread* t, object method)
|
||||
{
|
||||
return ceiling(frameMapSizeInBits(t, method), BitsPerWord);
|
||||
return ceilingDivide(frameMapSizeInBits(t, method), BitsPerWord);
|
||||
}
|
||||
|
||||
uint16_t*
|
||||
@ -6928,7 +6928,7 @@ unsigned
|
||||
simpleFrameMapTableSize(MyThread* t, object method, object map)
|
||||
{
|
||||
int size = frameMapSizeInBits(t, method);
|
||||
return ceiling(intArrayLength(t, map) * size, 32 + size);
|
||||
return ceilingDivide(intArrayLength(t, map) * size, 32 + size);
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
@ -6965,7 +6965,7 @@ copyFrameMap(int32_t* dst, uintptr_t* src, unsigned mapSizeInBits,
|
||||
{
|
||||
if (DebugFrameMaps) {
|
||||
fprintf(stderr, " orig roots at ip %3d: ", p->ip);
|
||||
printSet(src, ceiling(mapSizeInBits, BitsPerWord));
|
||||
printSet(src, ceilingDivide(mapSizeInBits, BitsPerWord));
|
||||
print(subroutinePath);
|
||||
fprintf(stderr, "\n");
|
||||
|
||||
@ -7068,7 +7068,7 @@ makeGeneralFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
||||
unsigned indexOffset = sizeof(FrameMapTableHeader);
|
||||
unsigned mapsOffset = indexOffset
|
||||
+ (elementCount * sizeof(FrameMapTableIndexElement));
|
||||
unsigned pathsOffset = mapsOffset + (ceiling(mapCount * mapSize, 32) * 4);
|
||||
unsigned pathsOffset = mapsOffset + (ceilingDivide(mapCount * mapSize, 32) * 4);
|
||||
|
||||
object table = makeByteArray(t, pathsOffset + pathFootprint);
|
||||
|
||||
@ -7143,7 +7143,7 @@ makeGeneralFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
||||
sizeof(SubroutineTrace*), compareSubroutineTracePointers);
|
||||
|
||||
for (unsigned i = 0; i < p->subroutineTraceCount; ++i) {
|
||||
assert(t, mapsOffset + ceiling(nextMapIndex + mapSize, 32) * 4
|
||||
assert(t, mapsOffset + ceilingDivide(nextMapIndex + mapSize, 32) * 4
|
||||
<= pathsOffset);
|
||||
|
||||
copyFrameMap(reinterpret_cast<int32_t*>(body + mapsOffset),
|
||||
@ -7155,7 +7155,7 @@ makeGeneralFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
||||
} else {
|
||||
pathIndex = 0;
|
||||
|
||||
assert(t, mapsOffset + ceiling(nextMapIndex + mapSize, 32) * 4
|
||||
assert(t, mapsOffset + ceilingDivide(nextMapIndex + mapSize, 32) * 4
|
||||
<= pathsOffset);
|
||||
|
||||
copyFrameMap(reinterpret_cast<int32_t*>(body + mapsOffset), p->map,
|
||||
@ -7185,7 +7185,7 @@ makeSimpleFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
||||
{
|
||||
unsigned mapSize = frameMapSizeInBits(t, context->method);
|
||||
object table = makeIntArray
|
||||
(t, elementCount + ceiling(elementCount * mapSize, 32));
|
||||
(t, elementCount + ceilingDivide(elementCount * mapSize, 32));
|
||||
|
||||
assert(t, intArrayLength(t, table) == elementCount
|
||||
+ simpleFrameMapTableSize(t, context->method, table));
|
||||
@ -7196,7 +7196,7 @@ makeSimpleFrameMapTable(MyThread* t, Context* context, uint8_t* start,
|
||||
intArrayBody(t, table, i) = static_cast<intptr_t>(p->address->value())
|
||||
- reinterpret_cast<intptr_t>(start);
|
||||
|
||||
assert(t, elementCount + ceiling((i + 1) * mapSize, 32)
|
||||
assert(t, elementCount + ceilingDivide((i + 1) * mapSize, 32)
|
||||
<= intArrayLength(t, table));
|
||||
|
||||
if (mapSize) {
|
||||
@ -9925,7 +9925,7 @@ boot(MyThread* t, BootImage* image, uint8_t* code)
|
||||
uintptr_t* heapMap = reinterpret_cast<uintptr_t*>
|
||||
(padWord(reinterpret_cast<uintptr_t>(callTable + (image->callCount * 2))));
|
||||
|
||||
unsigned heapMapSizeInWords = ceiling
|
||||
unsigned heapMapSizeInWords = ceilingDivide
|
||||
(heapMapSize(image->heapSize), BytesPerWord);
|
||||
uintptr_t* heap = heapMap + heapMapSizeInWords;
|
||||
|
||||
@ -9934,7 +9934,7 @@ boot(MyThread* t, BootImage* image, uint8_t* code)
|
||||
t->heapImage = p->heapImage = heap;
|
||||
|
||||
// fprintf(stderr, "heap from %p to %p\n",
|
||||
// heap, heap + ceiling(image->heapSize, BytesPerWord));
|
||||
// heap, heap + ceilingDivide(image->heapSize, BytesPerWord));
|
||||
|
||||
t->codeImage = p->codeImage = code;
|
||||
p->codeImageSize = image->codeSize;
|
||||
|
@ -4417,11 +4417,11 @@ appendCombine(Context* c, TernaryOperation type,
|
||||
intptr_t handler = c->client->getThunk
|
||||
(type, firstSize, resultSize, &threadParameter);
|
||||
|
||||
unsigned stackSize = ceiling(secondSize, TargetBytesPerWord)
|
||||
+ ceiling(firstSize, TargetBytesPerWord);
|
||||
unsigned stackSize = ceilingDivide(secondSize, TargetBytesPerWord)
|
||||
+ ceilingDivide(firstSize, TargetBytesPerWord);
|
||||
|
||||
local::push(c, ceiling(secondSize, TargetBytesPerWord), second);
|
||||
local::push(c, ceiling(firstSize, TargetBytesPerWord), first);
|
||||
local::push(c, ceilingDivide(secondSize, TargetBytesPerWord), second);
|
||||
local::push(c, ceilingDivide(firstSize, TargetBytesPerWord), first);
|
||||
|
||||
if (threadParameter) {
|
||||
++ stackSize;
|
||||
@ -4543,7 +4543,7 @@ appendTranslate(Context* c, BinaryOperation type, unsigned firstSize,
|
||||
if (thunk) {
|
||||
Stack* oldStack = c->stack;
|
||||
|
||||
local::push(c, ceiling(firstSize, TargetBytesPerWord), first);
|
||||
local::push(c, ceilingDivide(firstSize, TargetBytesPerWord), first);
|
||||
|
||||
Stack* argumentStack = c->stack;
|
||||
c->stack = oldStack;
|
||||
@ -4553,7 +4553,7 @@ appendTranslate(Context* c, BinaryOperation type, unsigned firstSize,
|
||||
(c, ValueGeneral, constantSite
|
||||
(c, c->client->getThunk(type, firstSize, resultSize))),
|
||||
0, 0, result, resultSize, argumentStack,
|
||||
ceiling(firstSize, TargetBytesPerWord), 0);
|
||||
ceilingDivide(firstSize, TargetBytesPerWord), 0);
|
||||
} else {
|
||||
append(c, new(c->zone)
|
||||
TranslateEvent
|
||||
@ -4892,8 +4892,8 @@ appendBranch(Context* c, TernaryOperation type, unsigned size, Value* first,
|
||||
|
||||
assert(c, not threadParameter);
|
||||
|
||||
local::push(c, ceiling(size, TargetBytesPerWord), second);
|
||||
local::push(c, ceiling(size, TargetBytesPerWord), first);
|
||||
local::push(c, ceilingDivide(size, TargetBytesPerWord), second);
|
||||
local::push(c, ceilingDivide(size, TargetBytesPerWord), first);
|
||||
|
||||
Stack* argumentStack = c->stack;
|
||||
c->stack = oldStack;
|
||||
@ -4902,7 +4902,7 @@ appendBranch(Context* c, TernaryOperation type, unsigned size, Value* first,
|
||||
appendCall
|
||||
(c, value
|
||||
(c, ValueGeneral, constantSite(c, handler)), 0, 0, result, 4,
|
||||
argumentStack, ceiling(size, TargetBytesPerWord) * 2, 0);
|
||||
argumentStack, ceilingDivide(size, TargetBytesPerWord) * 2, 0);
|
||||
|
||||
appendBranch(c, thunkBranch(c, type), 4, value
|
||||
(c, ValueGeneral, constantSite(c, static_cast<int64_t>(0))),
|
||||
|
@ -223,7 +223,7 @@ class Segment {
|
||||
unsigned scale, unsigned bitsPerRecord)
|
||||
{
|
||||
unsigned result
|
||||
= ceiling(ceiling(capacity, scale) * bitsPerRecord, BitsPerWord);
|
||||
= ceilingDivide(ceilingDivide(capacity, scale) * bitsPerRecord, BitsPerWord);
|
||||
assert(c, result);
|
||||
return result;
|
||||
}
|
||||
@ -531,7 +531,7 @@ class Fixie {
|
||||
}
|
||||
|
||||
static unsigned maskSize(unsigned size, bool hasMask) {
|
||||
return hasMask * ceiling(size, BitsPerWord) * BytesPerWord;
|
||||
return hasMask * ceilingDivide(size, BitsPerWord) * BytesPerWord;
|
||||
}
|
||||
|
||||
static unsigned totalSize(unsigned size, bool hasMask) {
|
||||
|
@ -307,9 +307,9 @@ bool
|
||||
walk(Thread*, Heap::Walker* w, uint32_t* mask, unsigned fixedSize,
|
||||
unsigned arrayElementSize, unsigned arrayLength, unsigned start)
|
||||
{
|
||||
unsigned fixedSizeInWords = ceiling(fixedSize, BytesPerWord);
|
||||
unsigned fixedSizeInWords = ceilingDivide(fixedSize, BytesPerWord);
|
||||
unsigned arrayElementSizeInWords
|
||||
= ceiling(arrayElementSize, BytesPerWord);
|
||||
= ceilingDivide(arrayElementSize, BytesPerWord);
|
||||
|
||||
for (unsigned i = start; i < fixedSizeInWords; ++i) {
|
||||
if (mask[i / 32] & (static_cast<uint32_t>(1) << (i % 32))) {
|
||||
@ -1286,7 +1286,7 @@ parseFieldTable(Thread* t, Stream& s, object class_, object pool)
|
||||
set(t, class_, ClassFieldTable, fieldTable);
|
||||
|
||||
if (staticCount) {
|
||||
unsigned footprint = ceiling(staticOffset - (BytesPerWord * 2),
|
||||
unsigned footprint = ceilingDivide(staticOffset - (BytesPerWord * 2),
|
||||
BytesPerWord);
|
||||
object staticTable = makeSingletonOfSize(t, footprint);
|
||||
|
||||
@ -1357,7 +1357,7 @@ parseFieldTable(Thread* t, Stream& s, object class_, object pool)
|
||||
classObjectMask(t, classSuper(t, class_)));
|
||||
} else {
|
||||
object mask = makeIntArray
|
||||
(t, ceiling(classFixedSize(t, class_), 32 * BytesPerWord));
|
||||
(t, ceilingDivide(classFixedSize(t, class_), 32 * BytesPerWord));
|
||||
intArrayBody(t, mask, 0) = 1;
|
||||
|
||||
object superMask = 0;
|
||||
@ -1366,7 +1366,7 @@ parseFieldTable(Thread* t, Stream& s, object class_, object pool)
|
||||
if (superMask) {
|
||||
memcpy(&intArrayBody(t, mask, 0),
|
||||
&intArrayBody(t, superMask, 0),
|
||||
ceiling(classFixedSize(t, classSuper(t, class_)),
|
||||
ceilingDivide(classFixedSize(t, classSuper(t, class_)),
|
||||
32 * BytesPerWord)
|
||||
* 4);
|
||||
}
|
||||
@ -3449,7 +3449,7 @@ allocate2(Thread* t, unsigned sizeInBytes, bool objectMask)
|
||||
{
|
||||
return allocate3
|
||||
(t, t->m->heap,
|
||||
ceiling(sizeInBytes, BytesPerWord) > ThreadHeapSizeInWords ?
|
||||
ceilingDivide(sizeInBytes, BytesPerWord) > ThreadHeapSizeInWords ?
|
||||
Machine::FixedAllocation : Machine::MovableAllocation,
|
||||
sizeInBytes, objectMask);
|
||||
}
|
||||
@ -3461,15 +3461,15 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
|
||||
expect(t, t->criticalLevel == 0);
|
||||
|
||||
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
||||
expect(t, t->backupHeapIndex + ceiling(sizeInBytes, BytesPerWord)
|
||||
expect(t, t->backupHeapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
<= ThreadBackupHeapSizeInWords);
|
||||
|
||||
object o = reinterpret_cast<object>(t->backupHeap + t->backupHeapIndex);
|
||||
t->backupHeapIndex += ceiling(sizeInBytes, BytesPerWord);
|
||||
t->backupHeapIndex += ceilingDivide(sizeInBytes, BytesPerWord);
|
||||
cast<object>(o, 0) = 0;
|
||||
return o;
|
||||
} else if (UNLIKELY(t->flags & Thread::TracingFlag)) {
|
||||
expect(t, t->heapIndex + ceiling(sizeInBytes, BytesPerWord)
|
||||
expect(t, t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
<= ThreadHeapSizeInWords);
|
||||
return allocateSmall(t, sizeInBytes);
|
||||
}
|
||||
@ -3489,7 +3489,7 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
|
||||
do {
|
||||
switch (type) {
|
||||
case Machine::MovableAllocation:
|
||||
if (t->heapIndex + ceiling(sizeInBytes, BytesPerWord)
|
||||
if (t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
> ThreadHeapSizeInWords)
|
||||
{
|
||||
t->heap = 0;
|
||||
@ -3531,7 +3531,7 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
|
||||
throw_(t, root(t, Machine::OutOfMemoryError));
|
||||
}
|
||||
} while (type == Machine::MovableAllocation
|
||||
and t->heapIndex + ceiling(sizeInBytes, BytesPerWord)
|
||||
and t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
> ThreadHeapSizeInWords);
|
||||
|
||||
switch (type) {
|
||||
@ -3543,7 +3543,7 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
|
||||
unsigned total;
|
||||
object o = static_cast<object>
|
||||
(t->m->heap->tryAllocateFixed
|
||||
(allocator, ceiling(sizeInBytes, BytesPerWord), objectMask, &total));
|
||||
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask, &total));
|
||||
|
||||
if (o) {
|
||||
memset(o, 0, sizeInBytes);
|
||||
@ -3562,7 +3562,7 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
|
||||
unsigned total;
|
||||
object o = static_cast<object>
|
||||
(t->m->heap->tryAllocateImmortalFixed
|
||||
(allocator, ceiling(sizeInBytes, BytesPerWord), objectMask, &total));
|
||||
(allocator, ceilingDivide(sizeInBytes, BytesPerWord), objectMask, &total));
|
||||
|
||||
if (o) {
|
||||
memset(o, 0, sizeInBytes);
|
||||
@ -5054,7 +5054,7 @@ populateMultiArray(Thread* t, object array, int32_t* counts,
|
||||
|
||||
for (int32_t i = 0; i < counts[index]; ++i) {
|
||||
object a = makeArray
|
||||
(t, ceiling
|
||||
(t, ceilingDivide
|
||||
(counts[index + 1] * classArrayElementSize(t, class_), BytesPerWord));
|
||||
arrayLength(t, a) = counts[index + 1];
|
||||
setObjectClass(t, a, class_);
|
||||
|
@ -1818,7 +1818,7 @@ class FixedAllocator: public Allocator {
|
||||
inline bool
|
||||
ensure(Thread* t, unsigned sizeInBytes)
|
||||
{
|
||||
if (t->heapIndex + ceiling(sizeInBytes, BytesPerWord)
|
||||
if (t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
> ThreadHeapSizeInWords)
|
||||
{
|
||||
if (sizeInBytes <= ThreadBackupHeapSizeInBytes) {
|
||||
@ -1845,11 +1845,11 @@ allocate3(Thread* t, Allocator* allocator, Machine::AllocationType type,
|
||||
inline object
|
||||
allocateSmall(Thread* t, unsigned sizeInBytes)
|
||||
{
|
||||
assert(t, t->heapIndex + ceiling(sizeInBytes, BytesPerWord)
|
||||
assert(t, t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
<= ThreadHeapSizeInWords);
|
||||
|
||||
object o = reinterpret_cast<object>(t->heap + t->heapIndex);
|
||||
t->heapIndex += ceiling(sizeInBytes, BytesPerWord);
|
||||
t->heapIndex += ceilingDivide(sizeInBytes, BytesPerWord);
|
||||
return o;
|
||||
}
|
||||
|
||||
@ -1858,7 +1858,7 @@ allocate(Thread* t, unsigned sizeInBytes, bool objectMask)
|
||||
{
|
||||
stress(t);
|
||||
|
||||
if (UNLIKELY(t->heapIndex + ceiling(sizeInBytes, BytesPerWord)
|
||||
if (UNLIKELY(t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
> ThreadHeapSizeInWords
|
||||
or t->m->exclusive))
|
||||
{
|
||||
@ -2149,8 +2149,8 @@ baseSize(Thread* t, object o, object class_)
|
||||
{
|
||||
assert(t, classFixedSize(t, class_) >= BytesPerWord);
|
||||
|
||||
return ceiling(classFixedSize(t, class_), BytesPerWord)
|
||||
+ ceiling(classArrayElementSize(t, class_)
|
||||
return ceilingDivide(classFixedSize(t, class_), BytesPerWord)
|
||||
+ ceilingDivide(classArrayElementSize(t, class_)
|
||||
* cast<uintptr_t>(o, classFixedSize(t, class_) - BytesPerWord),
|
||||
BytesPerWord);
|
||||
}
|
||||
@ -3349,7 +3349,7 @@ inline unsigned
|
||||
singletonMaskSize(unsigned count, unsigned bitsPerWord)
|
||||
{
|
||||
if (count) {
|
||||
return ceiling(count + 2, bitsPerWord);
|
||||
return ceilingDivide(count + 2, bitsPerWord);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3365,7 +3365,7 @@ singletonMaskSize(Thread* t, object singleton)
|
||||
{
|
||||
unsigned length = singletonLength(t, singleton);
|
||||
if (length) {
|
||||
return ceiling(length + 2, BitsPerWord + 1);
|
||||
return ceilingDivide(length + 2, BitsPerWord + 1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3448,7 +3448,7 @@ singletonBit(Thread* t, object singleton, unsigned start, unsigned index)
|
||||
inline unsigned
|
||||
poolMaskSize(unsigned count, unsigned bitsPerWord)
|
||||
{
|
||||
return ceiling(count, bitsPerWord);
|
||||
return ceilingDivide(count, bitsPerWord);
|
||||
}
|
||||
|
||||
inline unsigned
|
||||
@ -3460,7 +3460,7 @@ poolMaskSize(unsigned count)
|
||||
inline unsigned
|
||||
poolMaskSize(Thread* t, object pool)
|
||||
{
|
||||
return ceiling(singletonCount(t, pool), BitsPerWord + 1);
|
||||
return ceilingDivide(singletonCount(t, pool), BitsPerWord + 1);
|
||||
}
|
||||
|
||||
inline unsigned
|
||||
|
@ -816,7 +816,7 @@ class MySystem: public System {
|
||||
// been unable to track it down so far. The workaround is to give
|
||||
// it 8 words more than it should need, where 8 is a number I just
|
||||
// made up and seems to work.
|
||||
void* array[ceiling(sizeof(struct stat), sizeof(void*)) + 8];
|
||||
void* array[ceilingDivide(sizeof(struct stat), sizeof(void*)) + 8];
|
||||
struct stat* s = reinterpret_cast<struct stat*>(array);
|
||||
#endif
|
||||
|
||||
|
@ -2238,7 +2238,7 @@ class MyArchitecture: public Assembler::Architecture {
|
||||
|
||||
virtual unsigned alignFrameSize(unsigned sizeInWords) {
|
||||
const unsigned alignment = StackAlignmentInWords;
|
||||
return (ceiling(sizeInWords + FrameFooterSize, alignment) * alignment);
|
||||
return (ceilingDivide(sizeInWords + FrameFooterSize, alignment) * alignment);
|
||||
}
|
||||
|
||||
virtual void nextFrame(void* start, unsigned size, unsigned footprint,
|
||||
@ -2513,7 +2513,7 @@ class MyAssembler: public Assembler {
|
||||
arguments[i].size = va_arg(a, unsigned);
|
||||
arguments[i].type = static_cast<OperandType>(va_arg(a, int));
|
||||
arguments[i].operand = va_arg(a, Operand*);
|
||||
footprint += ceiling(arguments[i].size, TargetBytesPerWord);
|
||||
footprint += ceilingDivide(arguments[i].size, TargetBytesPerWord);
|
||||
}
|
||||
va_end(a);
|
||||
|
||||
@ -2529,7 +2529,7 @@ class MyAssembler: public Assembler {
|
||||
pad(arguments[i].size, TargetBytesPerWord), RegisterOperand,
|
||||
&dst);
|
||||
|
||||
offset += ceiling(arguments[i].size, TargetBytesPerWord);
|
||||
offset += ceilingDivide(arguments[i].size, TargetBytesPerWord);
|
||||
} else {
|
||||
Memory dst
|
||||
(ThreadRegister, (offset + FrameFooterSize) * TargetBytesPerWord);
|
||||
@ -2538,7 +2538,7 @@ class MyAssembler: public Assembler {
|
||||
arguments[i].size, arguments[i].type, arguments[i].operand,
|
||||
pad(arguments[i].size, TargetBytesPerWord), MemoryOperand, &dst);
|
||||
|
||||
offset += ceiling(arguments[i].size, TargetBytesPerWord);
|
||||
offset += ceilingDivide(arguments[i].size, TargetBytesPerWord);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3444,7 +3444,7 @@ class MyAssembler: public Assembler {
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].type
|
||||
= static_cast<OperandType>(va_arg(a, int));
|
||||
RUNTIME_ARRAY_BODY(arguments)[i].operand = va_arg(a, Operand*);
|
||||
footprint += ceiling
|
||||
footprint += ceilingDivide
|
||||
(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord);
|
||||
}
|
||||
va_end(a);
|
||||
@ -3471,7 +3471,7 @@ class MyAssembler: public Assembler {
|
||||
pad(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord),
|
||||
MemoryOperand,
|
||||
&dst);
|
||||
offset += ceiling
|
||||
offset += ceilingDivide
|
||||
(RUNTIME_ARRAY_BODY(arguments)[i].size, TargetBytesPerWord);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user