mirror of
https://github.com/corda/corda.git
synced 2025-02-11 13:16:10 +00:00
Improve error reporting in case of failure of flaky tests (#2828)
* Improve error reporting in case of failure of flaky tests Also eliminate warnings and other minor changes. * Address code review comments raised by @exFalso Also improve output for stack traces.
This commit is contained in:
parent
9f80cfa249
commit
a2d65f1a6b
@ -15,6 +15,7 @@ import net.corda.nodeapi.RPCApi
|
|||||||
import net.corda.testing.core.SerializationEnvironmentRule
|
import net.corda.testing.core.SerializationEnvironmentRule
|
||||||
import net.corda.testing.internal.testThreadFactory
|
import net.corda.testing.internal.testThreadFactory
|
||||||
import net.corda.testing.node.internal.*
|
import net.corda.testing.node.internal.*
|
||||||
|
import org.apache.activemq.artemis.api.config.ActiveMQDefaultConfiguration
|
||||||
import org.apache.activemq.artemis.api.core.SimpleString
|
import org.apache.activemq.artemis.api.core.SimpleString
|
||||||
import org.junit.After
|
import org.junit.After
|
||||||
import org.junit.Assert.assertEquals
|
import org.junit.Assert.assertEquals
|
||||||
@ -46,15 +47,15 @@ class RPCStabilityTests {
|
|||||||
override val protocolVersion = 0
|
override val protocolVersion = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun waitUntilNumberOfThreadsStable(executorService: ScheduledExecutorService): Int {
|
private fun waitUntilNumberOfThreadsStable(executorService: ScheduledExecutorService): Map<Thread, List<StackTraceElement>> {
|
||||||
val values = ConcurrentLinkedQueue<Int>()
|
val values = ConcurrentLinkedQueue<Map<Thread, List<StackTraceElement>>>()
|
||||||
return poll(executorService, "number of threads to become stable", 250.millis) {
|
return poll(executorService, "number of threads to become stable", 250.millis) {
|
||||||
values.add(Thread.activeCount())
|
values.add(Thread.getAllStackTraces().mapValues { it.value.toList() })
|
||||||
if (values.size > 5) {
|
if (values.size > 5) {
|
||||||
values.poll()
|
values.poll()
|
||||||
}
|
}
|
||||||
val first = values.peek()
|
val first = values.peek()
|
||||||
if (values.size == 5 && values.all { it == first }) {
|
if (values.size == 5 && values.all { it.keys.size == first.keys.size }) {
|
||||||
first
|
first
|
||||||
} else {
|
} else {
|
||||||
null
|
null
|
||||||
@ -64,30 +65,39 @@ class RPCStabilityTests {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun `client and server dont leak threads`() {
|
fun `client and server dont leak threads`() {
|
||||||
val executor = Executors.newScheduledThreadPool(1)
|
|
||||||
fun startAndStop() {
|
fun startAndStop() {
|
||||||
rpcDriver {
|
rpcDriver {
|
||||||
val server = startRpcServer<RPCOps>(ops = DummyOps).get()
|
val server = startRpcServer<RPCOps>(ops = DummyOps).get()
|
||||||
startRpcClient<RPCOps>(server.broker.hostAndPort!!).get()
|
startRpcClient<RPCOps>(server.broker.hostAndPort!!).get()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
repeat(5) {
|
|
||||||
startAndStop()
|
runBlockAndCheckThreads(::startAndStop)
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun runBlockAndCheckThreads(block: () -> Unit) {
|
||||||
|
val executor = Executors.newScheduledThreadPool(1)
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Warm-up so that all the thread pools & co. created
|
||||||
|
block()
|
||||||
|
|
||||||
|
val threadsBefore = waitUntilNumberOfThreadsStable(executor)
|
||||||
|
repeat(5) {
|
||||||
|
block()
|
||||||
|
}
|
||||||
|
val threadsAfter = waitUntilNumberOfThreadsStable(executor)
|
||||||
|
// This is a less than check because threads from other tests may be shutting down while this test is running.
|
||||||
|
// This is therefore a "best effort" check. When this test is run on its own this should be a strict equality.
|
||||||
|
// In case of failure we output the threads along with their stacktraces to get an idea what was running at a time.
|
||||||
|
assert(threadsBefore.keys.size >= threadsAfter.keys.size, { "threadsBefore: $threadsBefore\nthreadsAfter: $threadsAfter" })
|
||||||
|
} finally {
|
||||||
|
executor.shutdownNow()
|
||||||
}
|
}
|
||||||
val numberOfThreadsBefore = waitUntilNumberOfThreadsStable(executor)
|
|
||||||
repeat(5) {
|
|
||||||
startAndStop()
|
|
||||||
}
|
|
||||||
val numberOfThreadsAfter = waitUntilNumberOfThreadsStable(executor)
|
|
||||||
// This is a less than check because threads from other tests may be shutting down while this test is running.
|
|
||||||
// This is therefore a "best effort" check. When this test is run on its own this should be a strict equality.
|
|
||||||
assertTrue(numberOfThreadsBefore >= numberOfThreadsAfter)
|
|
||||||
executor.shutdownNow()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
fun `client doesnt leak threads when it fails to start`() {
|
fun `client doesnt leak threads when it fails to start`() {
|
||||||
val executor = Executors.newScheduledThreadPool(1)
|
|
||||||
fun startAndStop() {
|
fun startAndStop() {
|
||||||
rpcDriver {
|
rpcDriver {
|
||||||
Try.on { startRpcClient<RPCOps>(NetworkHostAndPort("localhost", 9999)).get() }
|
Try.on { startRpcClient<RPCOps>(NetworkHostAndPort("localhost", 9999)).get() }
|
||||||
@ -100,20 +110,10 @@ class RPCStabilityTests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
repeat(5) {
|
runBlockAndCheckThreads(::startAndStop)
|
||||||
startAndStop()
|
|
||||||
}
|
|
||||||
val numberOfThreadsBefore = waitUntilNumberOfThreadsStable(executor)
|
|
||||||
repeat(5) {
|
|
||||||
startAndStop()
|
|
||||||
}
|
|
||||||
val numberOfThreadsAfter = waitUntilNumberOfThreadsStable(executor)
|
|
||||||
|
|
||||||
assertTrue(numberOfThreadsBefore >= numberOfThreadsAfter)
|
|
||||||
executor.shutdownNow()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fun RpcBrokerHandle.getStats(): Map<String, Any> {
|
private fun RpcBrokerHandle.getStats(): Map<String, Any> {
|
||||||
return serverControl.run {
|
return serverControl.run {
|
||||||
mapOf(
|
mapOf(
|
||||||
"connections" to listConnectionIDs().toSet(),
|
"connections" to listConnectionIDs().toSet(),
|
||||||
@ -211,14 +211,14 @@ class RPCStabilityTests {
|
|||||||
val server = startRpcServer<LeakObservableOps>(ops = leakObservableOpsImpl)
|
val server = startRpcServer<LeakObservableOps>(ops = leakObservableOpsImpl)
|
||||||
val proxy = startRpcClient<LeakObservableOps>(server.get().broker.hostAndPort!!).get()
|
val proxy = startRpcClient<LeakObservableOps>(server.get().broker.hostAndPort!!).get()
|
||||||
// Leak many observables
|
// Leak many observables
|
||||||
val N = 200
|
val count = 200
|
||||||
(1..N).map {
|
(1..count).map {
|
||||||
pool.fork { proxy.leakObservable(); Unit }
|
pool.fork { proxy.leakObservable(); Unit }
|
||||||
}.transpose().getOrThrow()
|
}.transpose().getOrThrow()
|
||||||
// In a loop force GC and check whether the server is notified
|
// In a loop force GC and check whether the server is notified
|
||||||
while (true) {
|
while (true) {
|
||||||
System.gc()
|
System.gc()
|
||||||
if (leakObservableOpsImpl.leakedUnsubscribedCount.get() == N) break
|
if (leakObservableOpsImpl.leakedUnsubscribedCount.get() == count) break
|
||||||
Thread.sleep(100)
|
Thread.sleep(100)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -361,11 +361,11 @@ class RPCStabilityTests {
|
|||||||
clients[0].destroyForcibly()
|
clients[0].destroyForcibly()
|
||||||
pollUntilClientNumber(server, numberOfClients - 1)
|
pollUntilClientNumber(server, numberOfClients - 1)
|
||||||
// Kill the rest
|
// Kill the rest
|
||||||
(1..numberOfClients - 1).forEach {
|
(1 until numberOfClients).forEach {
|
||||||
clients[it].destroyForcibly()
|
clients[it].destroyForcibly()
|
||||||
}
|
}
|
||||||
pollUntilClientNumber(server, 0)
|
pollUntilClientNumber(server, 0)
|
||||||
// Now poll until the server detects the disconnects and unsubscribes from all obserables.
|
// Now poll until the server detects the disconnects and un-subscribes from all observables.
|
||||||
pollUntilTrue("number of times subscribe() has been called") { trackSubscriberOpsImpl.subscriberCount.get() == 0 }.get()
|
pollUntilTrue("number of times subscribe() has been called") { trackSubscriberOpsImpl.subscriberCount.get() == 0 }.get()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -391,7 +391,7 @@ class RPCStabilityTests {
|
|||||||
// Construct an RPC session manually so that we can hang in the message handler
|
// Construct an RPC session manually so that we can hang in the message handler
|
||||||
val myQueue = "${RPCApi.RPC_CLIENT_QUEUE_NAME_PREFIX}.test.${random63BitValue()}"
|
val myQueue = "${RPCApi.RPC_CLIENT_QUEUE_NAME_PREFIX}.test.${random63BitValue()}"
|
||||||
val session = startArtemisSession(server.broker.hostAndPort!!)
|
val session = startArtemisSession(server.broker.hostAndPort!!)
|
||||||
session.createTemporaryQueue(myQueue, myQueue)
|
session.createTemporaryQueue(myQueue, ActiveMQDefaultConfiguration.getDefaultRoutingType(), myQueue)
|
||||||
val consumer = session.createConsumer(myQueue, null, -1, -1, false)
|
val consumer = session.createConsumer(myQueue, null, -1, -1, false)
|
||||||
consumer.setMessageHandler {
|
consumer.setMessageHandler {
|
||||||
Thread.sleep(50) // 5x slower than the server producer
|
Thread.sleep(50) // 5x slower than the server producer
|
||||||
@ -428,7 +428,7 @@ class RPCStabilityTests {
|
|||||||
// Construct an RPC client session manually
|
// Construct an RPC client session manually
|
||||||
val myQueue = "${RPCApi.RPC_CLIENT_QUEUE_NAME_PREFIX}.test.${random63BitValue()}"
|
val myQueue = "${RPCApi.RPC_CLIENT_QUEUE_NAME_PREFIX}.test.${random63BitValue()}"
|
||||||
val session = startArtemisSession(server.broker.hostAndPort!!)
|
val session = startArtemisSession(server.broker.hostAndPort!!)
|
||||||
session.createTemporaryQueue(myQueue, myQueue)
|
session.createTemporaryQueue(myQueue, ActiveMQDefaultConfiguration.getDefaultRoutingType(), myQueue)
|
||||||
val consumer = session.createConsumer(myQueue, null, -1, -1, false)
|
val consumer = session.createConsumer(myQueue, null, -1, -1, false)
|
||||||
val replies = ArrayList<Any>()
|
val replies = ArrayList<Any>()
|
||||||
consumer.setMessageHandler {
|
consumer.setMessageHandler {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user