mirror of
https://github.com/corda/corda.git
synced 2025-06-17 22:58:19 +00:00
RPC: call close() on startup failure, add thread leak tests
This commit is contained in:
@ -1,10 +1,11 @@
|
||||
package net.corda.node
|
||||
|
||||
import com.google.common.base.Stopwatch
|
||||
import net.corda.node.driver.FalseNetworkMap
|
||||
import net.corda.node.driver.NetworkMapStartStrategy
|
||||
import net.corda.node.driver.driver
|
||||
import org.junit.Ignore
|
||||
import org.junit.Test
|
||||
import java.util.*
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
@Ignore("Only use locally")
|
||||
@ -13,8 +14,8 @@ class NodeStartupPerformanceTests {
|
||||
// Measure the startup time of nodes. Note that this includes an RPC roundtrip, which causes e.g. Kryo initialisation.
|
||||
@Test
|
||||
fun `single node startup time`() {
|
||||
driver(networkMapStrategy = FalseNetworkMap) {
|
||||
startNetworkMapService().get()
|
||||
driver(networkMapStartStrategy = NetworkMapStartStrategy.Dedicated(startAutomatically = false)) {
|
||||
startDedicatedNetworkMapService().get()
|
||||
val times = ArrayList<Long>()
|
||||
for (i in 1 .. 10) {
|
||||
val time = Stopwatch.createStarted().apply {
|
||||
|
@ -104,7 +104,7 @@ interface DriverDSLExposedInterface {
|
||||
* Starts a network map service node. Note that only a single one should ever be running, so you will probably want
|
||||
* to set networkMapStrategy to FalseNetworkMap in your [driver] call.
|
||||
*/
|
||||
fun startNetworkMapService(): ListenableFuture<Unit>
|
||||
fun startDedicatedNetworkMapService(): ListenableFuture<Unit>
|
||||
|
||||
fun waitForAllNodesToFinish()
|
||||
|
||||
@ -168,6 +168,11 @@ sealed class PortAllocation {
|
||||
}
|
||||
}
|
||||
|
||||
sealed class NetworkMapStartStrategy {
|
||||
data class Dedicated(val startAutomatically: Boolean) : NetworkMapStartStrategy()
|
||||
data class Nominated(val legalName: X500Name, val address: HostAndPort) : NetworkMapStartStrategy()
|
||||
}
|
||||
|
||||
/**
|
||||
* [driver] allows one to start up nodes like this:
|
||||
* driver {
|
||||
@ -201,7 +206,7 @@ fun <A> driver(
|
||||
debugPortAllocation: PortAllocation = PortAllocation.Incremental(5005),
|
||||
systemProperties: Map<String, String> = emptyMap(),
|
||||
useTestClock: Boolean = false,
|
||||
networkMapStrategy: NetworkMapStrategy = DedicatedNetworkMap,
|
||||
networkMapStartStrategy: NetworkMapStartStrategy = NetworkMapStartStrategy.Dedicated(startAutomatically = true),
|
||||
dsl: DriverDSLExposedInterface.() -> A
|
||||
) = genericDriver(
|
||||
driverDsl = DriverDSL(
|
||||
@ -210,7 +215,7 @@ fun <A> driver(
|
||||
systemProperties = systemProperties,
|
||||
driverDirectory = driverDirectory.toAbsolutePath(),
|
||||
useTestClock = useTestClock,
|
||||
networkMapStrategy = networkMapStrategy,
|
||||
networkMapStartStrategy = networkMapStartStrategy,
|
||||
isDebug = isDebug
|
||||
),
|
||||
coerce = { it },
|
||||
@ -412,13 +417,14 @@ class DriverDSL(
|
||||
val driverDirectory: Path,
|
||||
val useTestClock: Boolean,
|
||||
val isDebug: Boolean,
|
||||
val networkMapStrategy: NetworkMapStrategy
|
||||
val networkMapStartStrategy: NetworkMapStartStrategy
|
||||
) : DriverDSLInternalInterface {
|
||||
private val dedicatedNetworkMapAddress = portAllocation.nextHostAndPort()
|
||||
val executorService: ListeningScheduledExecutorService = MoreExecutors.listeningDecorator(
|
||||
Executors.newScheduledThreadPool(2, ThreadFactoryBuilder().setNameFormat("driver-pool-thread-%d").build())
|
||||
)
|
||||
override val shutdownManager = ShutdownManager(executorService)
|
||||
private val dedicatedNetworkMapLegalName = DUMMY_MAP.name
|
||||
var _executorService: ListeningScheduledExecutorService? = null
|
||||
val executorService get() = _executorService!!
|
||||
var _shutdownManager: ShutdownManager? = null
|
||||
override val shutdownManager get() = _shutdownManager!!
|
||||
|
||||
class State {
|
||||
val processes = ArrayList<ListenableFuture<Process>>()
|
||||
@ -449,8 +455,11 @@ class DriverDSL(
|
||||
}
|
||||
|
||||
override fun shutdown() {
|
||||
shutdownManager.shutdown()
|
||||
executorService.shutdown()
|
||||
_shutdownManager?.shutdown()
|
||||
_executorService?.apply {
|
||||
shutdownNow()
|
||||
require(awaitTermination(1, TimeUnit.SECONDS))
|
||||
}
|
||||
}
|
||||
|
||||
private fun establishRpc(nodeAddress: HostAndPort, sslConfig: SSLConfiguration): ListenableFuture<CordaRPCOps> {
|
||||
@ -467,6 +476,12 @@ class DriverDSL(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO move to cmopanion
|
||||
private fun toServiceConfig(address: HostAndPort, legalName: X500Name) = mapOf(
|
||||
"address" to address.toString(),
|
||||
"legalName" to legalName.toString()
|
||||
)
|
||||
|
||||
override fun startNode(
|
||||
providedName: X500Name?,
|
||||
advertisedServices: Set<ServiceInfo>,
|
||||
@ -487,7 +502,17 @@ class DriverDSL(
|
||||
"rpcAddress" to rpcAddress.toString(),
|
||||
"webAddress" to webAddress.toString(),
|
||||
"extraAdvertisedServiceIds" to advertisedServices.map { it.toString() },
|
||||
"networkMapService" to networkMapStrategy.serviceConfig(dedicatedNetworkMapAddress, name, p2pAddress),
|
||||
"networkMapService" to when (networkMapStartStrategy) {
|
||||
is NetworkMapStartStrategy.Dedicated -> toServiceConfig(dedicatedNetworkMapAddress, dedicatedNetworkMapLegalName)
|
||||
is NetworkMapStartStrategy.Nominated -> networkMapStartStrategy.run {
|
||||
if (name != legalName) {
|
||||
toServiceConfig(address, legalName)
|
||||
} else {
|
||||
p2pAddress == address || throw IllegalArgumentException("Passed-in address $address of nominated network map $legalName is wrong, it should be: $p2pAddress")
|
||||
null
|
||||
}
|
||||
}
|
||||
},
|
||||
"useTestClock" to useTestClock,
|
||||
"rpcUsers" to rpcUsers.map {
|
||||
mapOf(
|
||||
@ -574,21 +599,24 @@ class DriverDSL(
|
||||
}
|
||||
|
||||
override fun start() {
|
||||
if (networkMapStrategy.startDedicated) {
|
||||
startNetworkMapService()
|
||||
_executorService = MoreExecutors.listeningDecorator(
|
||||
Executors.newScheduledThreadPool(2, ThreadFactoryBuilder().setNameFormat("driver-pool-thread-%d").build())
|
||||
)
|
||||
_shutdownManager = ShutdownManager(executorService)
|
||||
if (networkMapStartStrategy is NetworkMapStartStrategy.Dedicated && networkMapStartStrategy.startAutomatically) {
|
||||
startDedicatedNetworkMapService()
|
||||
}
|
||||
}
|
||||
|
||||
override fun startNetworkMapService(): ListenableFuture<Unit> {
|
||||
override fun startDedicatedNetworkMapService(): ListenableFuture<Unit> {
|
||||
val debugPort = if (isDebug) debugPortAllocation.nextPort() else null
|
||||
val apiAddress = portAllocation.nextHostAndPort().toString()
|
||||
val networkMapLegalName = networkMapStrategy.legalName
|
||||
val baseDirectory = driverDirectory / networkMapLegalName.commonName
|
||||
val baseDirectory = driverDirectory / dedicatedNetworkMapLegalName.commonName
|
||||
val config = ConfigHelper.loadConfig(
|
||||
baseDirectory = baseDirectory,
|
||||
allowMissingConfig = true,
|
||||
configOverrides = mapOf(
|
||||
"myLegalName" to networkMapLegalName.toString(),
|
||||
"myLegalName" to dedicatedNetworkMapLegalName.toString(),
|
||||
// TODO: remove the webAddress as NMS doesn't need to run a web server. This will cause all
|
||||
// node port numbers to be shifted, so all demos and docs need to be updated accordingly.
|
||||
"webAddress" to apiAddress,
|
||||
@ -684,3 +712,4 @@ fun writeConfig(path: Path, filename: String, config: Config) {
|
||||
path.toFile().mkdirs()
|
||||
File("$path/$filename").writeText(config.root().render(ConfigRenderOptions.defaults()))
|
||||
}
|
||||
|
||||
|
@ -1,47 +0,0 @@
|
||||
package net.corda.node.driver
|
||||
|
||||
import com.google.common.net.HostAndPort
|
||||
import net.corda.core.utilities.DUMMY_MAP
|
||||
import org.bouncycastle.asn1.x500.X500Name
|
||||
|
||||
/**
|
||||
* Instruct the driver how to set up the network map, if at all.
|
||||
* @see FalseNetworkMap
|
||||
* @see DedicatedNetworkMap
|
||||
* @see NominatedNetworkMap
|
||||
*/
|
||||
abstract class NetworkMapStrategy(internal val startDedicated: Boolean, internal val legalName: X500Name) {
|
||||
internal abstract fun serviceConfig(dedicatedAddress: HostAndPort, nodeName: X500Name, p2pAddress: HostAndPort): Map<String, String>?
|
||||
}
|
||||
|
||||
private fun toServiceConfig(address: HostAndPort, legalName: X500Name) = mapOf(
|
||||
"address" to address.toString(),
|
||||
"legalName" to legalName.toString()
|
||||
)
|
||||
|
||||
abstract class AbstractDedicatedNetworkMap(start: Boolean) : NetworkMapStrategy(start, DUMMY_MAP.name) {
|
||||
override fun serviceConfig(dedicatedAddress: HostAndPort, nodeName: X500Name, p2pAddress: HostAndPort) = toServiceConfig(dedicatedAddress, legalName)
|
||||
}
|
||||
|
||||
/**
|
||||
* Do not start a network map.
|
||||
*/
|
||||
object FalseNetworkMap : AbstractDedicatedNetworkMap(false)
|
||||
|
||||
/**
|
||||
* Start a dedicated node to host the network map.
|
||||
*/
|
||||
object DedicatedNetworkMap : AbstractDedicatedNetworkMap(true)
|
||||
|
||||
/**
|
||||
* As in gradle-based demos, nominate a node to host the network map, so that there is one fewer node in total than in the [DedicatedNetworkMap] case.
|
||||
* Will fail if the port you pass in does not match the P2P port the driver assigns to the named node.
|
||||
*/
|
||||
class NominatedNetworkMap(legalName: X500Name, private val address: HostAndPort) : NetworkMapStrategy(false, legalName) {
|
||||
override fun serviceConfig(dedicatedAddress: HostAndPort, nodeName: X500Name, p2pAddress: HostAndPort) = if (nodeName != legalName) {
|
||||
toServiceConfig(address, legalName)
|
||||
} else {
|
||||
p2pAddress == address || throw IllegalArgumentException("Passed-in address $address of nominated network map $legalName is wrong, it should be: $p2pAddress")
|
||||
null
|
||||
}
|
||||
}
|
@ -13,7 +13,6 @@ import com.google.common.collect.Multimaps
|
||||
import com.google.common.collect.SetMultimap
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder
|
||||
import net.corda.core.ErrorOr
|
||||
import net.corda.core.crypto.commonName
|
||||
import net.corda.core.messaging.RPCOps
|
||||
import net.corda.core.random63BitValue
|
||||
import net.corda.core.seconds
|
||||
@ -42,10 +41,8 @@ import rx.Subscriber
|
||||
import rx.Subscription
|
||||
import java.lang.reflect.InvocationTargetException
|
||||
import java.time.Duration
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.Executors
|
||||
import java.util.concurrent.ScheduledFuture
|
||||
import java.util.concurrent.TimeUnit
|
||||
import java.util.*
|
||||
import java.util.concurrent.*
|
||||
|
||||
data class RPCServerConfiguration(
|
||||
/** The number of threads to use for handling RPC requests */
|
||||
@ -101,22 +98,11 @@ class RPCServer(
|
||||
// A mapping from client addresses to IDs of associated Observables
|
||||
private val clientAddressToObservables = Multimaps.synchronizedSetMultimap(HashMultimap.create<SimpleString, RPCApi.ObservableId>())
|
||||
// The scheduled reaper handle.
|
||||
private lateinit var reaperScheduledFuture: ScheduledFuture<*>
|
||||
private var reaperScheduledFuture: ScheduledFuture<*>? = null
|
||||
|
||||
private val observationSendExecutor = Executors.newFixedThreadPool(
|
||||
1,
|
||||
ThreadFactoryBuilder().setNameFormat("rpc-observation-sender-%d").build()
|
||||
)
|
||||
|
||||
private val rpcExecutor = Executors.newScheduledThreadPool(
|
||||
rpcConfiguration.rpcThreadPoolSize,
|
||||
ThreadFactoryBuilder().setNameFormat("rpc-server-handler-pool-%d").build()
|
||||
)
|
||||
|
||||
private val reaperExecutor = Executors.newScheduledThreadPool(
|
||||
1,
|
||||
ThreadFactoryBuilder().setNameFormat("rpc-server-reaper-%d").build()
|
||||
)
|
||||
private var observationSendExecutor: ExecutorService? = null
|
||||
private var rpcExecutor: ScheduledExecutorService? = null
|
||||
private var reaperExecutor: ScheduledExecutorService? = null
|
||||
|
||||
private val sessionAndConsumers = ArrayList<ArtemisConsumer>(rpcConfiguration.consumerPoolSize)
|
||||
private val sessionAndProducerPool = LazyStickyPool(rpcConfiguration.producerPoolBound) {
|
||||
@ -125,8 +111,8 @@ class RPCServer(
|
||||
session.start()
|
||||
ArtemisProducer(sessionFactory, session, session.createProducer())
|
||||
}
|
||||
private lateinit var clientBindingRemovalConsumer: ClientConsumer
|
||||
private lateinit var serverControl: ActiveMQServerControl
|
||||
private var clientBindingRemovalConsumer: ClientConsumer? = null
|
||||
private var serverControl: ActiveMQServerControl? = null
|
||||
|
||||
private fun createObservableSubscriptionMap(): ObservableSubscriptionMap {
|
||||
val onObservableRemove = RemovalListener<RPCApi.ObservableId, ObservableSubscription> {
|
||||
@ -137,39 +123,55 @@ class RPCServer(
|
||||
}
|
||||
|
||||
fun start(activeMqServerControl: ActiveMQServerControl) {
|
||||
log.info("Starting RPC server with configuration $rpcConfiguration")
|
||||
reaperScheduledFuture = reaperExecutor.scheduleAtFixedRate(
|
||||
this::reapSubscriptions,
|
||||
rpcConfiguration.reapInterval.toMillis(),
|
||||
rpcConfiguration.reapInterval.toMillis(),
|
||||
TimeUnit.MILLISECONDS
|
||||
)
|
||||
val sessions = ArrayList<ClientSession>()
|
||||
for (i in 1 .. rpcConfiguration.consumerPoolSize) {
|
||||
val sessionFactory = serverLocator.createSessionFactory()
|
||||
val session = sessionFactory.createSession(rpcServerUsername, rpcServerPassword, false, true, true, false, DEFAULT_ACK_BATCH_SIZE)
|
||||
val consumer = session.createConsumer(RPCApi.RPC_SERVER_QUEUE_NAME)
|
||||
consumer.setMessageHandler(this@RPCServer::clientArtemisMessageHandler)
|
||||
sessionAndConsumers.add(ArtemisConsumer(sessionFactory, session, consumer))
|
||||
sessions.add(session)
|
||||
}
|
||||
clientBindingRemovalConsumer = sessionAndConsumers[0].session.createConsumer(RPCApi.RPC_CLIENT_BINDING_REMOVALS)
|
||||
clientBindingRemovalConsumer.setMessageHandler(this::bindingRemovalArtemisMessageHandler)
|
||||
serverControl = activeMqServerControl
|
||||
lifeCycle.transition(State.UNSTARTED, State.STARTED)
|
||||
// We delay the consumer session start because Artemis starts delivering messages immediately, so we need to be
|
||||
// fully initialised.
|
||||
sessions.forEach {
|
||||
it.start()
|
||||
try {
|
||||
lifeCycle.requireState(State.UNSTARTED)
|
||||
log.info("Starting RPC server with configuration $rpcConfiguration")
|
||||
observationSendExecutor = Executors.newFixedThreadPool(
|
||||
1,
|
||||
ThreadFactoryBuilder().setNameFormat("rpc-observation-sender-%d").build()
|
||||
)
|
||||
rpcExecutor = Executors.newScheduledThreadPool(
|
||||
rpcConfiguration.rpcThreadPoolSize,
|
||||
ThreadFactoryBuilder().setNameFormat("rpc-server-handler-pool-%d").build()
|
||||
)
|
||||
reaperExecutor = Executors.newScheduledThreadPool(
|
||||
1,
|
||||
ThreadFactoryBuilder().setNameFormat("rpc-server-reaper-%d").build()
|
||||
)
|
||||
reaperScheduledFuture = reaperExecutor!!.scheduleAtFixedRate(
|
||||
this::reapSubscriptions,
|
||||
rpcConfiguration.reapInterval.toMillis(),
|
||||
rpcConfiguration.reapInterval.toMillis(),
|
||||
TimeUnit.MILLISECONDS
|
||||
)
|
||||
val sessions = ArrayList<ClientSession>()
|
||||
for (i in 1 .. rpcConfiguration.consumerPoolSize) {
|
||||
val sessionFactory = serverLocator.createSessionFactory()
|
||||
val session = sessionFactory.createSession(rpcServerUsername, rpcServerPassword, false, true, true, false, DEFAULT_ACK_BATCH_SIZE)
|
||||
val consumer = session.createConsumer(RPCApi.RPC_SERVER_QUEUE_NAME)
|
||||
consumer.setMessageHandler(this@RPCServer::clientArtemisMessageHandler)
|
||||
sessionAndConsumers.add(ArtemisConsumer(sessionFactory, session, consumer))
|
||||
sessions.add(session)
|
||||
}
|
||||
clientBindingRemovalConsumer = sessionAndConsumers[0].session.createConsumer(RPCApi.RPC_CLIENT_BINDING_REMOVALS)
|
||||
clientBindingRemovalConsumer!!.setMessageHandler(this::bindingRemovalArtemisMessageHandler)
|
||||
serverControl = activeMqServerControl
|
||||
lifeCycle.transition(State.UNSTARTED, State.STARTED)
|
||||
// We delay the consumer session start because Artemis starts delivering messages immediately, so we need to be
|
||||
// fully initialised.
|
||||
sessions.forEach {
|
||||
it.start()
|
||||
}
|
||||
} catch (exception: Throwable) {
|
||||
close()
|
||||
throw exception
|
||||
}
|
||||
}
|
||||
|
||||
fun close() {
|
||||
reaperScheduledFuture.cancel(false)
|
||||
rpcExecutor.shutdownNow()
|
||||
reaperExecutor.shutdownNow()
|
||||
rpcExecutor.awaitTermination(500, TimeUnit.MILLISECONDS)
|
||||
reaperExecutor.awaitTermination(500, TimeUnit.MILLISECONDS)
|
||||
reaperScheduledFuture?.cancel(false)
|
||||
rpcExecutor?.shutdownNow()
|
||||
reaperExecutor?.shutdownNow()
|
||||
sessionAndConsumers.forEach {
|
||||
it.consumer.close()
|
||||
it.session.close()
|
||||
@ -182,7 +184,7 @@ class RPCServer(
|
||||
it.session.close()
|
||||
it.sessionFactory.close()
|
||||
}
|
||||
lifeCycle.transition(State.STARTED, State.FINISHED)
|
||||
lifeCycle.transition(State.FINISHED)
|
||||
}
|
||||
|
||||
private fun bindingRemovalArtemisMessageHandler(artemisMessage: ClientMessage) {
|
||||
@ -211,7 +213,7 @@ class RPCServer(
|
||||
val rpcContext = RpcContext(
|
||||
currentUser = getUser(artemisMessage)
|
||||
)
|
||||
rpcExecutor.submit {
|
||||
rpcExecutor!!.submit {
|
||||
val result = ErrorOr.catch {
|
||||
try {
|
||||
CURRENT_RPC_CONTEXT.set(rpcContext)
|
||||
@ -239,9 +241,9 @@ class RPCServer(
|
||||
observableMap,
|
||||
clientAddressToObservables,
|
||||
clientToServer.clientAddress,
|
||||
serverControl,
|
||||
serverControl!!,
|
||||
sessionAndProducerPool,
|
||||
observationSendExecutor,
|
||||
observationSendExecutor!!,
|
||||
kryoPool
|
||||
)
|
||||
observableContext.sendMessage(reply)
|
||||
|
Reference in New Issue
Block a user