mirror of
https://github.com/corda/corda.git
synced 2025-01-03 19:54:13 +00:00
ensure Thread::flags is always updated atomically
Since this field is sometimes updated from other threads, it is essential that we always update it atomically.
This commit is contained in:
parent
e1074c026c
commit
32aefaf421
@ -1111,15 +1111,17 @@ class Thread {
|
||||
ExitState
|
||||
};
|
||||
|
||||
static const unsigned UseBackupHeapFlag = 1 << 0;
|
||||
static const unsigned WaitingFlag = 1 << 1;
|
||||
static const unsigned TracingFlag = 1 << 2;
|
||||
static const unsigned DaemonFlag = 1 << 3;
|
||||
static const unsigned StressFlag = 1 << 4;
|
||||
static const unsigned ActiveFlag = 1 << 5;
|
||||
static const unsigned SystemFlag = 1 << 6;
|
||||
static const unsigned JoinFlag = 1 << 7;
|
||||
static const unsigned TryNativeFlag = 1 << 8;
|
||||
enum Flag {
|
||||
UseBackupHeapFlag = 1 << 0,
|
||||
WaitingFlag = 1 << 1,
|
||||
TracingFlag = 1 << 2,
|
||||
DaemonFlag = 1 << 3,
|
||||
StressFlag = 1 << 4,
|
||||
ActiveFlag = 1 << 5,
|
||||
SystemFlag = 1 << 6,
|
||||
JoinFlag = 1 << 7,
|
||||
TryNativeFlag = 1 << 8
|
||||
};
|
||||
|
||||
class Protector {
|
||||
public:
|
||||
@ -1301,6 +1303,18 @@ class Thread {
|
||||
void exit();
|
||||
void dispose();
|
||||
|
||||
void setFlag(Flag flag) {
|
||||
atomicOr(&flags, flag);
|
||||
}
|
||||
|
||||
void clearFlag(Flag flag) {
|
||||
atomicAnd(&flags, ~flag);
|
||||
}
|
||||
|
||||
unsigned getFlags() {
|
||||
return flags;
|
||||
}
|
||||
|
||||
JNIEnvVTable* vtable;
|
||||
Machine* m;
|
||||
Thread* parent;
|
||||
@ -1325,6 +1339,8 @@ class Thread {
|
||||
uintptr_t* heap;
|
||||
uintptr_t backupHeap[ThreadBackupHeapSizeInWords];
|
||||
unsigned backupHeapIndex;
|
||||
|
||||
private:
|
||||
unsigned flags;
|
||||
};
|
||||
|
||||
@ -1496,9 +1512,9 @@ void shutDown(Thread* t);
|
||||
inline void stress(Thread* t)
|
||||
{
|
||||
if ((not t->m->unsafe)
|
||||
and (t->flags & (Thread::StressFlag | Thread::TracingFlag)) == 0
|
||||
and (t->getFlags() & (Thread::StressFlag | Thread::TracingFlag)) == 0
|
||||
and t->state != Thread::NoState and t->state != Thread::IdleState) {
|
||||
atomicOr(&(t->flags), Thread::StressFlag);
|
||||
t->setFlag(Thread::StressFlag);
|
||||
|
||||
#ifdef VM_STRESS_MAJOR
|
||||
collect(t, Heap::MajorCollection);
|
||||
@ -1506,7 +1522,7 @@ inline void stress(Thread* t)
|
||||
collect(t, Heap::MinorCollection);
|
||||
#endif // not VM_STRESS_MAJOR
|
||||
|
||||
atomicAnd(&(t->flags), ~Thread::StressFlag);
|
||||
t->clearFlag(Thread::StressFlag);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1581,9 +1597,9 @@ inline bool ensure(Thread* t, unsigned sizeInBytes)
|
||||
if (t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
> ThreadHeapSizeInWords) {
|
||||
if (sizeInBytes <= ThreadBackupHeapSizeInBytes) {
|
||||
expect(t, (t->flags & Thread::UseBackupHeapFlag) == 0);
|
||||
expect(t, (t->getFlags() & Thread::UseBackupHeapFlag) == 0);
|
||||
|
||||
atomicOr(&(t->flags), Thread::UseBackupHeapFlag);
|
||||
t->setFlag(Thread::UseBackupHeapFlag);
|
||||
|
||||
return true;
|
||||
} else {
|
||||
@ -1790,7 +1806,7 @@ inline uint64_t runThread(Thread* t, uintptr_t*)
|
||||
|
||||
inline bool startThread(Thread* t, Thread* p)
|
||||
{
|
||||
p->flags |= Thread::JoinFlag;
|
||||
p->setFlag(Thread::JoinFlag);
|
||||
return t->m->system->success(t->m->system->start(&(p->runnable)));
|
||||
}
|
||||
|
||||
@ -1865,7 +1881,7 @@ inline void registerDaemon(Thread* t)
|
||||
{
|
||||
ACQUIRE_RAW(t, t->m->stateLock);
|
||||
|
||||
atomicOr(&(t->flags), Thread::DaemonFlag);
|
||||
t->setFlag(Thread::DaemonFlag);
|
||||
|
||||
++t->m->daemonCount;
|
||||
|
||||
@ -2698,7 +2714,7 @@ inline bool acquireSystem(Thread* t, Thread* target)
|
||||
ACQUIRE_RAW(t, t->m->stateLock);
|
||||
|
||||
if (t->state != Thread::JoinedState) {
|
||||
atomicOr(&(target->flags), Thread::SystemFlag);
|
||||
target->setFlag(Thread::SystemFlag);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@ -2711,7 +2727,7 @@ inline void releaseSystem(Thread* t, Thread* target)
|
||||
|
||||
assertT(t, t->state != Thread::JoinedState);
|
||||
|
||||
atomicAnd(&(target->flags), ~Thread::SystemFlag);
|
||||
target->clearFlag(Thread::SystemFlag);
|
||||
}
|
||||
|
||||
inline bool atomicCompareAndSwapObject(Thread* t,
|
||||
@ -2875,10 +2891,10 @@ inline void monitorAppendWait(Thread* t, GcMonitor* monitor)
|
||||
{
|
||||
assertT(t, monitor->owner() == t);
|
||||
|
||||
expect(t, (t->flags & Thread::WaitingFlag) == 0);
|
||||
expect(t, (t->getFlags() & Thread::WaitingFlag) == 0);
|
||||
expect(t, t->waitNext == 0);
|
||||
|
||||
atomicOr(&(t->flags), Thread::WaitingFlag);
|
||||
t->setFlag(Thread::WaitingFlag);
|
||||
|
||||
if (monitor->waitTail()) {
|
||||
static_cast<Thread*>(monitor->waitTail())->waitNext = t;
|
||||
@ -2909,7 +2925,7 @@ inline void monitorRemoveWait(Thread* t, GcMonitor* monitor)
|
||||
}
|
||||
|
||||
t->waitNext = 0;
|
||||
atomicAnd(&(t->flags), ~Thread::WaitingFlag);
|
||||
t->clearFlag(Thread::WaitingFlag);
|
||||
|
||||
return;
|
||||
} else {
|
||||
@ -2967,7 +2983,7 @@ inline bool monitorWait(Thread* t, GcMonitor* monitor, int64_t time)
|
||||
|
||||
monitor->depth() = depth;
|
||||
|
||||
if (t->flags & Thread::WaitingFlag) {
|
||||
if (t->getFlags() & Thread::WaitingFlag) {
|
||||
monitorRemoveWait(t, monitor);
|
||||
} else {
|
||||
expect(t, not monitorFindWait(t, monitor));
|
||||
@ -2986,7 +3002,7 @@ inline Thread* monitorPollWait(Thread* t UNUSED, GcMonitor* monitor)
|
||||
|
||||
if (next) {
|
||||
monitor->waitHead() = next->waitNext;
|
||||
atomicAnd(&(next->flags), ~Thread::WaitingFlag);
|
||||
next->clearFlag(Thread::WaitingFlag);
|
||||
next->waitNext = 0;
|
||||
if (next == monitor->waitTail()) {
|
||||
monitor->waitTail() = 0;
|
||||
@ -3099,7 +3115,7 @@ inline void wait(Thread* t, object o, int64_t milliseconds)
|
||||
bool interrupted = monitorWait(t, m, milliseconds);
|
||||
|
||||
if (interrupted) {
|
||||
if (t->m->alive or (t->flags & Thread::DaemonFlag) == 0) {
|
||||
if (t->m->alive or (t->getFlags() & Thread::DaemonFlag) == 0) {
|
||||
t->m->classpath->clearInterrupted(t);
|
||||
throwNew(t, GcInterruptedException::Type);
|
||||
} else {
|
||||
|
@ -317,8 +317,8 @@ extern "C" AVIAN_EXPORT int64_t JNICALL
|
||||
int64_t argument;
|
||||
memcpy(&argument, arguments + 2, 8);
|
||||
|
||||
t->flags |= Thread::TryNativeFlag;
|
||||
THREAD_RESOURCE0(t, t->flags &= ~Thread::TryNativeFlag);
|
||||
t->setFlag(Thread::TryNativeFlag);
|
||||
THREAD_RESOURCE0(t, t->clearFlag(Thread::TryNativeFlag));
|
||||
|
||||
return reinterpret_cast<int64_t (*)(int64_t)>(function)(argument);
|
||||
}
|
||||
|
@ -391,7 +391,7 @@ class MyClasspath : public Classpath {
|
||||
}
|
||||
|
||||
vm::acquire(t, t->javaThread);
|
||||
t->flags &= ~Thread::ActiveFlag;
|
||||
t->clearFlag(Thread::ActiveFlag);
|
||||
vm::notifyAll(t, t->javaThread);
|
||||
vm::release(t, t->javaThread);
|
||||
});
|
||||
|
@ -582,7 +582,7 @@ class MyClasspath : public Classpath {
|
||||
|
||||
THREAD_RESOURCE0(t, {
|
||||
vm::acquire(t, t->javaThread);
|
||||
t->flags &= ~Thread::ActiveFlag;
|
||||
t->clearFlag(Thread::ActiveFlag);
|
||||
vm::notifyAll(t, t->javaThread);
|
||||
vm::release(t, t->javaThread);
|
||||
|
||||
@ -3624,7 +3624,7 @@ extern "C" AVIAN_EXPORT jboolean JNICALL
|
||||
ENTER(t, Thread::ActiveState);
|
||||
|
||||
Thread* p = reinterpret_cast<Thread*>(cast<GcThread>(t, *thread)->peer());
|
||||
return p and (p->flags & Thread::ActiveFlag) != 0;
|
||||
return p and (p->getFlags() & Thread::ActiveFlag) != 0;
|
||||
}
|
||||
|
||||
extern "C" AVIAN_EXPORT void JNICALL EXPORT(JVM_SuspendThread)(Thread*, jobject)
|
||||
|
@ -2494,8 +2494,8 @@ unsigned traceSize(Thread* t)
|
||||
void NO_RETURN throwArithmetic(MyThread* t)
|
||||
{
|
||||
if (ensure(t, GcArithmeticException::FixedSize + traceSize(t))) {
|
||||
atomicOr(&(t->flags), Thread::TracingFlag);
|
||||
THREAD_RESOURCE0(t, atomicAnd(&(t->flags), ~Thread::TracingFlag));
|
||||
t->setFlag(Thread::TracingFlag);
|
||||
THREAD_RESOURCE0(t, t->clearFlag(Thread::TracingFlag));
|
||||
|
||||
throwNew(t, GcArithmeticException::Type);
|
||||
} else {
|
||||
@ -2706,8 +2706,8 @@ uint64_t makeMultidimensionalArrayFromReference(MyThread* t,
|
||||
void NO_RETURN throwArrayIndexOutOfBounds(MyThread* t)
|
||||
{
|
||||
if (ensure(t, GcArrayIndexOutOfBoundsException::FixedSize + traceSize(t))) {
|
||||
atomicOr(&(t->flags), Thread::TracingFlag);
|
||||
THREAD_RESOURCE0(t, atomicAnd(&(t->flags), ~Thread::TracingFlag));
|
||||
t->setFlag(Thread::TracingFlag);
|
||||
THREAD_RESOURCE0(t, t->clearFlag(Thread::TracingFlag));
|
||||
|
||||
throwNew(t, GcArrayIndexOutOfBoundsException::Type);
|
||||
} else {
|
||||
@ -3006,7 +3006,7 @@ void gcIfNecessary(MyThread* t)
|
||||
{
|
||||
stress(t);
|
||||
|
||||
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
||||
if (UNLIKELY(t->getFlags() & Thread::UseBackupHeapFlag)) {
|
||||
collect(t, Heap::MinorCollection);
|
||||
}
|
||||
}
|
||||
@ -8116,7 +8116,7 @@ object invoke(Thread* thread, GcMethod* method, ArgumentList* arguments)
|
||||
}
|
||||
|
||||
if (t->exception) {
|
||||
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
||||
if (UNLIKELY(t->getFlags() & Thread::UseBackupHeapFlag)) {
|
||||
collect(t, Heap::MinorCollection);
|
||||
}
|
||||
|
||||
@ -8166,9 +8166,9 @@ class SignalHandler : public SignalRegistrar::Handler {
|
||||
|
||||
void setException(MyThread* t) {
|
||||
if (ensure(t, pad(fixedSize) + traceSize(t))) {
|
||||
atomicOr(&(t->flags), Thread::TracingFlag);
|
||||
t->setFlag(Thread::TracingFlag);
|
||||
t->exception = makeThrowable(t, type);
|
||||
atomicAnd(&(t->flags), ~Thread::TracingFlag);
|
||||
t->clearFlag(Thread::TracingFlag);
|
||||
} else {
|
||||
// not enough memory available for a new exception and stack
|
||||
// trace -- use a preallocated instance instead
|
||||
@ -8183,7 +8183,7 @@ class SignalHandler : public SignalRegistrar::Handler {
|
||||
{
|
||||
MyThread* t = static_cast<MyThread*>(m->localThread->get());
|
||||
if (t and t->state == Thread::ActiveState) {
|
||||
if (t->flags & Thread::TryNativeFlag) {
|
||||
if (t->getFlags() & Thread::TryNativeFlag) {
|
||||
setException(t);
|
||||
|
||||
popResources(t);
|
||||
@ -8873,9 +8873,9 @@ class MyProcessor : public Processor {
|
||||
}
|
||||
|
||||
if (ensure(t, traceSize(target))) {
|
||||
atomicOr(&(t->flags), Thread::TracingFlag);
|
||||
t->setFlag(Thread::TracingFlag);
|
||||
trace = makeTrace(t, target);
|
||||
atomicAnd(&(t->flags), ~Thread::TracingFlag);
|
||||
t->clearFlag(Thread::TracingFlag);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8887,7 +8887,7 @@ class MyProcessor : public Processor {
|
||||
|
||||
t->m->system->visit(t->systemThread, target->systemThread, &visitor);
|
||||
|
||||
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
||||
if (UNLIKELY(t->getFlags() & Thread::UseBackupHeapFlag)) {
|
||||
PROTECT(t, visitor.trace);
|
||||
|
||||
collect(t, Heap::MinorCollection);
|
||||
|
@ -47,8 +47,8 @@ void join(Thread* t, Thread* o)
|
||||
{
|
||||
if (t != o) {
|
||||
assertT(t, o->state != Thread::JoinedState);
|
||||
assertT(t, (o->flags & Thread::SystemFlag) == 0);
|
||||
if (o->flags & Thread::JoinFlag) {
|
||||
assertT(t, (o->getFlags() & Thread::SystemFlag) == 0);
|
||||
if (o->getFlags() & Thread::JoinFlag) {
|
||||
o->systemThread->join();
|
||||
}
|
||||
o->state = Thread::JoinedState;
|
||||
@ -170,7 +170,7 @@ void disposeNoRemove(Thread* m, Thread* o)
|
||||
|
||||
void interruptDaemon(Thread* m, Thread* o)
|
||||
{
|
||||
if (o->flags & Thread::DaemonFlag) {
|
||||
if (o->getFlags() & Thread::DaemonFlag) {
|
||||
interrupt(m, o);
|
||||
}
|
||||
}
|
||||
@ -257,7 +257,7 @@ void killZombies(Thread* t, Thread* o)
|
||||
killZombies(t, child);
|
||||
}
|
||||
|
||||
if ((o->flags & Thread::SystemFlag) == 0) {
|
||||
if ((o->getFlags() & Thread::SystemFlag) == 0) {
|
||||
switch (o->state) {
|
||||
case Thread::ZombieState:
|
||||
join(t, o);
|
||||
@ -689,10 +689,10 @@ void postCollect(Thread* t)
|
||||
t->heapIndex = 0;
|
||||
}
|
||||
|
||||
if (t->flags & Thread::UseBackupHeapFlag) {
|
||||
if (t->getFlags() & Thread::UseBackupHeapFlag) {
|
||||
memset(t->backupHeap, 0, ThreadBackupHeapSizeInBytes);
|
||||
|
||||
t->flags &= ~Thread::UseBackupHeapFlag;
|
||||
t->clearFlag(Thread::UseBackupHeapFlag);
|
||||
t->backupHeapIndex = 0;
|
||||
}
|
||||
|
||||
@ -3441,9 +3441,9 @@ void doCollect(Thread* t, Heap::CollectionType type, int pendingAllocation)
|
||||
THREAD_RESOURCE0(t, t->m->collecting = false);
|
||||
|
||||
#ifdef VM_STRESS
|
||||
bool stress = (t->flags & Thread::StressFlag) != 0;
|
||||
bool stress = (t->getFlags() & Thread::StressFlag) != 0;
|
||||
if (not stress)
|
||||
atomicOr(&(t->flags), Thread::StressFlag);
|
||||
t->setFlag(Thread::StressFlag);
|
||||
#endif
|
||||
|
||||
Machine* m = t->m;
|
||||
@ -3474,7 +3474,7 @@ void doCollect(Thread* t, Heap::CollectionType type, int pendingAllocation)
|
||||
|
||||
#ifdef VM_STRESS
|
||||
if (not stress)
|
||||
atomicAnd(&(t->flags), ~Thread::StressFlag);
|
||||
t->clearFlag(Thread::StressFlag);
|
||||
#endif
|
||||
|
||||
GcFinalizer* finalizeQueue = t->m->finalizeQueue;
|
||||
@ -4070,7 +4070,7 @@ void enter(Thread* t, Thread::State s)
|
||||
assertT(t, t->m->liveCount > 0);
|
||||
--t->m->liveCount;
|
||||
|
||||
if (t->flags & Thread::DaemonFlag) {
|
||||
if (t->getFlags() & Thread::DaemonFlag) {
|
||||
--t->m->daemonCount;
|
||||
}
|
||||
}
|
||||
@ -4182,7 +4182,7 @@ object allocate3(Thread* t,
|
||||
{
|
||||
expect(t, t->criticalLevel == 0);
|
||||
|
||||
if (UNLIKELY(t->flags & Thread::UseBackupHeapFlag)) {
|
||||
if (UNLIKELY(t->getFlags() & Thread::UseBackupHeapFlag)) {
|
||||
expect(t,
|
||||
t->backupHeapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
<= ThreadBackupHeapSizeInWords);
|
||||
@ -4191,7 +4191,7 @@ object allocate3(Thread* t,
|
||||
t->backupHeapIndex += ceilingDivide(sizeInBytes, BytesPerWord);
|
||||
fieldAtOffset<object>(o, 0) = 0;
|
||||
return o;
|
||||
} else if (UNLIKELY(t->flags & Thread::TracingFlag)) {
|
||||
} else if (UNLIKELY(t->getFlags() & Thread::TracingFlag)) {
|
||||
expect(t,
|
||||
t->heapIndex + ceilingDivide(sizeInBytes, BytesPerWord)
|
||||
<= ThreadHeapSizeInWords);
|
||||
|
Loading…
Reference in New Issue
Block a user