mirror of
https://github.com/corda/corda.git
synced 2025-06-17 22:58:19 +00:00
BFT fixes (#844)
* Fix BFT config that only worked with clusters of size 4 * Shutdown BFT properly so that tests can run back to back in theory * Wait for initial connection between client and all replicas before touching it * Add test for non-trivial BFT cluster * Shutdown NodeBasedTest nodes in parallel, as BFT shutdown time is non-trivial * Overlapping ports check no longer assumes all on localhost * Fix overlapping ports test to actually check the messages
This commit is contained in:
@ -1,14 +1,14 @@
|
||||
package net.corda.node.services
|
||||
|
||||
import net.corda.core.contracts.DummyContract
|
||||
import net.corda.core.contracts.StateAndRef
|
||||
import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.contracts.TransactionType
|
||||
import com.google.common.util.concurrent.Futures
|
||||
import com.google.common.util.concurrent.ListenableFuture
|
||||
import net.corda.core.*
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.crypto.CompositeKey
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.crypto.appendToCommonName
|
||||
import net.corda.core.getOrThrow
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.node.services.ServiceInfo
|
||||
import net.corda.core.node.services.ServiceType
|
||||
import net.corda.core.utilities.ALICE
|
||||
import net.corda.core.utilities.DUMMY_CA
|
||||
import net.corda.core.utilities.DUMMY_NOTARY
|
||||
@ -16,69 +16,107 @@ import net.corda.flows.NotaryError
|
||||
import net.corda.flows.NotaryException
|
||||
import net.corda.flows.NotaryFlow
|
||||
import net.corda.node.internal.AbstractNode
|
||||
import net.corda.node.internal.Node
|
||||
import net.corda.node.services.transactions.BFTNonValidatingNotaryService
|
||||
import net.corda.node.services.transactions.minClusterSize
|
||||
import net.corda.node.services.transactions.minCorrectReplicas
|
||||
import net.corda.node.utilities.ServiceIdentityGenerator
|
||||
import net.corda.node.utilities.transaction
|
||||
import net.corda.testing.node.NodeBasedTest
|
||||
import org.bouncycastle.asn1.x500.X500Name
|
||||
import org.junit.Test
|
||||
import java.util.*
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertFailsWith
|
||||
import java.nio.file.Files
|
||||
import kotlin.test.*
|
||||
|
||||
class BFTNotaryServiceTests : NodeBasedTest() {
|
||||
@Test
|
||||
fun `detect double spend`() {
|
||||
val clusterName = X500Name("CN=BFT,O=R3,OU=corda,L=Zurich,C=CH")
|
||||
startBFTNotaryCluster(clusterName, 4, BFTNonValidatingNotaryService.type)
|
||||
val alice = startNode(ALICE.name).getOrThrow()
|
||||
val notaryParty = alice.netMapCache.getNotary(clusterName)!!
|
||||
val inputState = issueState(alice, notaryParty)
|
||||
val firstTxBuilder = TransactionType.General.Builder(notaryParty).withItems(inputState)
|
||||
val firstSpendTx = alice.services.signInitialTransaction(firstTxBuilder)
|
||||
alice.services.startFlow(NotaryFlow.Client(firstSpendTx)).resultFuture.getOrThrow()
|
||||
val secondSpendBuilder = TransactionType.General.Builder(notaryParty).withItems(inputState).also {
|
||||
it.addOutputState(DummyContract.SingleOwnerState(0, alice.info.legalIdentity))
|
||||
}
|
||||
val secondSpendTx = alice.services.signInitialTransaction(secondSpendBuilder)
|
||||
val secondSpend = alice.services.startFlow(NotaryFlow.Client(secondSpendTx))
|
||||
val ex = assertFailsWith(NotaryException::class) {
|
||||
secondSpend.resultFuture.getOrThrow()
|
||||
}
|
||||
val error = ex.error as NotaryError.Conflict
|
||||
assertEquals(error.txId, secondSpendTx.id)
|
||||
companion object {
|
||||
private val clusterName = X500Name("CN=BFT,O=R3,OU=corda,L=Zurich,C=CH")
|
||||
private val serviceType = BFTNonValidatingNotaryService.type
|
||||
}
|
||||
|
||||
private fun issueState(node: AbstractNode, notary: Party) = node.run {
|
||||
database.transaction {
|
||||
val builder = DummyContract.generateInitial(Random().nextInt(), notary, info.legalIdentity.ref(0))
|
||||
val stx = services.signInitialTransaction(builder)
|
||||
services.recordTransactions(listOf(stx))
|
||||
StateAndRef(builder.outputStates().first(), StateRef(stx.id, 0))
|
||||
}
|
||||
}
|
||||
|
||||
private fun startBFTNotaryCluster(clusterName: X500Name,
|
||||
clusterSize: Int,
|
||||
serviceType: ServiceType) {
|
||||
require(clusterSize > 0)
|
||||
val replicaNames = (0 until clusterSize).map { DUMMY_NOTARY.name.appendToCommonName(" $it") }
|
||||
ServiceIdentityGenerator.generateToDisk(
|
||||
private fun bftNotaryCluster(clusterSize: Int): ListenableFuture<Party> {
|
||||
Files.deleteIfExists("config" / "currentView") // XXX: Make config object warn if this exists?
|
||||
val replicaIds = (0 until clusterSize)
|
||||
val replicaNames = replicaIds.map { DUMMY_NOTARY.name.appendToCommonName(" $it") }
|
||||
val party = ServiceIdentityGenerator.generateToDisk(
|
||||
replicaNames.map { baseDirectory(it) },
|
||||
DUMMY_CA,
|
||||
serviceType.id,
|
||||
clusterName,
|
||||
minCorrectReplicas(clusterSize))
|
||||
val serviceInfo = ServiceInfo(serviceType, clusterName)
|
||||
val notaryClusterAddresses = (0 until clusterSize).map { "localhost:${11000 + it * 10}" }
|
||||
(0 until clusterSize).forEach {
|
||||
clusterName).party
|
||||
val advertisedServices = setOf(ServiceInfo(serviceType, clusterName))
|
||||
val config = mapOf("notaryClusterAddresses" to replicaIds.map { "localhost:${11000 + it * 10}" })
|
||||
return Futures.allAsList(replicaIds.map {
|
||||
startNode(
|
||||
replicaNames[it],
|
||||
advertisedServices = setOf(serviceInfo),
|
||||
configOverrides = mapOf("bftReplicaId" to it, "notaryClusterAddresses" to notaryClusterAddresses)
|
||||
).getOrThrow()
|
||||
advertisedServices = advertisedServices,
|
||||
configOverrides = mapOf("bftReplicaId" to it) + config
|
||||
)
|
||||
}).map { party }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `detect double spend 1 faulty`() {
|
||||
detectDoubleSpend(1)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `detect double spend 2 faulty`() {
|
||||
detectDoubleSpend(2)
|
||||
}
|
||||
|
||||
private fun detectDoubleSpend(faultyReplicas: Int) {
|
||||
val clusterSize = minClusterSize(faultyReplicas)
|
||||
val aliceFuture = startNode(ALICE.name)
|
||||
val notary = bftNotaryCluster(clusterSize).getOrThrow()
|
||||
aliceFuture.getOrThrow().run {
|
||||
val issueTx = signInitialTransaction(notary) {
|
||||
addOutputState(DummyContract.SingleOwnerState(owner = info.legalIdentity))
|
||||
}
|
||||
database.transaction {
|
||||
services.recordTransactions(issueTx)
|
||||
}
|
||||
val spendTxs = (1..10).map {
|
||||
signInitialTransaction(notary, true) {
|
||||
addInputState(issueTx.tx.outRef<ContractState>(0))
|
||||
}
|
||||
}
|
||||
assertEquals(spendTxs.size, spendTxs.map { it.id }.distinct().size)
|
||||
val flows = spendTxs.map { NotaryFlow.Client(it) }
|
||||
val stateMachines = flows.map { services.startFlow(it) }
|
||||
val results = stateMachines.map { ErrorOr.catch { it.resultFuture.getOrThrow() } }
|
||||
val successfulIndex = results.mapIndexedNotNull { index, result ->
|
||||
if (result.error == null) {
|
||||
val signers = result.getOrThrow().map { it.by }
|
||||
assertEquals(minCorrectReplicas(clusterSize), signers.size)
|
||||
signers.forEach {
|
||||
assertTrue(it in (notary.owningKey as CompositeKey).leafKeys)
|
||||
}
|
||||
index
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}.single()
|
||||
spendTxs.zip(results).forEach { (tx, result) ->
|
||||
if (result.error != null) {
|
||||
val error = (result.error as NotaryException).error as NotaryError.Conflict
|
||||
assertEquals(tx.id, error.txId)
|
||||
val (stateRef, consumingTx) = error.conflict.verified().stateHistory.entries.single()
|
||||
assertEquals(StateRef(issueTx.id, 0), stateRef)
|
||||
assertEquals(spendTxs[successfulIndex].id, consumingTx.id)
|
||||
assertEquals(0, consumingTx.inputIndex)
|
||||
assertEquals(info.legalIdentity, consumingTx.requestingParty)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun AbstractNode.signInitialTransaction(
|
||||
notary: Party,
|
||||
makeUnique: Boolean = false,
|
||||
block: TransactionType.General.Builder.() -> Any?
|
||||
) = services.signInitialTransaction(TransactionType.General.Builder(notary).apply {
|
||||
block()
|
||||
if (makeUnique) {
|
||||
addAttachment(SecureHash.randomSHA256())
|
||||
}
|
||||
})
|
||||
|
@ -1,7 +1,8 @@
|
||||
package net.corda.node
|
||||
|
||||
import net.corda.core.DeclaredField
|
||||
import net.corda.core.DeclaredField.Companion.declaredField
|
||||
import net.corda.node.internal.Node
|
||||
import java.lang.reflect.Field
|
||||
import java.lang.reflect.Method
|
||||
import java.lang.reflect.Proxy
|
||||
|
||||
@ -11,7 +12,7 @@ internal object SerialFilter {
|
||||
private val undecided: Any
|
||||
private val rejected: Any
|
||||
private val serialFilterLock: Any
|
||||
private val serialFilterField: Field
|
||||
private val serialFilterField: DeclaredField<Any>
|
||||
|
||||
init {
|
||||
// ObjectInputFilter and friends are in java.io in Java 9 but sun.misc in backports:
|
||||
@ -31,8 +32,8 @@ internal object SerialFilter {
|
||||
undecided = statusEnum.getField("UNDECIDED").get(null)
|
||||
rejected = statusEnum.getField("REJECTED").get(null)
|
||||
val configClass = Class.forName("${filterInterface.name}\$Config")
|
||||
serialFilterLock = configClass.getDeclaredField("serialFilterLock").also { it.isAccessible = true }.get(null)
|
||||
serialFilterField = configClass.getDeclaredField("serialFilter").also { it.isAccessible = true }
|
||||
serialFilterLock = declaredField<Any>(configClass, "serialFilterLock").value
|
||||
serialFilterField = declaredField(configClass, "serialFilter")
|
||||
}
|
||||
|
||||
internal fun install(acceptClass: (Class<*>) -> Boolean) {
|
||||
@ -46,7 +47,7 @@ internal object SerialFilter {
|
||||
}
|
||||
// Can't simply use the setter as in non-trampoline mode Capsule has inited the filter in premain:
|
||||
synchronized(serialFilterLock) {
|
||||
serialFilterField.set(null, filter)
|
||||
serialFilterField.value = filter
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -669,8 +669,10 @@ abstract class AbstractNode(open val configuration: NodeConfiguration,
|
||||
BFTNonValidatingNotaryService.type -> with(configuration as FullNodeConfiguration) {
|
||||
val replicaId = bftReplicaId ?: throw IllegalArgumentException("bftReplicaId value must be specified in the configuration")
|
||||
BFTSMaRtConfig(notaryClusterAddresses).use { config ->
|
||||
val client = BFTSMaRt.Client(config, replicaId).also { tokenizableServices += it } // (Ab)use replicaId for clientId.
|
||||
BFTNonValidatingNotaryService(config, services, timeWindowChecker, replicaId, database, client)
|
||||
BFTNonValidatingNotaryService(config, services, timeWindowChecker, replicaId, database).also {
|
||||
tokenizableServices += it.client
|
||||
runOnStop += it::dispose
|
||||
}
|
||||
}
|
||||
}
|
||||
else -> {
|
||||
|
@ -6,6 +6,7 @@ import co.paralleluniverse.fibers.Suspendable
|
||||
import co.paralleluniverse.strands.Strand
|
||||
import com.google.common.util.concurrent.ListenableFuture
|
||||
import com.google.common.util.concurrent.SettableFuture
|
||||
import net.corda.core.DeclaredField.Companion.declaredField
|
||||
import net.corda.core.ErrorOr
|
||||
import net.corda.core.abbreviate
|
||||
import net.corda.core.crypto.SecureHash
|
||||
@ -40,11 +41,7 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
override val flowInitiator: FlowInitiator) : Fiber<Unit>(id.toString(), scheduler), FlowStateMachine<R> {
|
||||
companion object {
|
||||
// Used to work around a small limitation in Quasar.
|
||||
private val QUASAR_UNBLOCKER = run {
|
||||
val field = Fiber::class.java.getDeclaredField("SERIALIZER_BLOCKER")
|
||||
field.isAccessible = true
|
||||
field.get(null)
|
||||
}
|
||||
private val QUASAR_UNBLOCKER = declaredField<Any>(Fiber::class, "SERIALIZER_BLOCKER").value
|
||||
|
||||
/**
|
||||
* Return the current [FlowStateMachineImpl] or null if executing outside of one.
|
||||
|
@ -1,8 +1,10 @@
|
||||
package net.corda.node.services.transactions
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import com.google.common.util.concurrent.SettableFuture
|
||||
import net.corda.core.crypto.DigitalSignature
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.getOrThrow
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.node.services.TimeWindowChecker
|
||||
import net.corda.core.serialization.deserialize
|
||||
@ -14,7 +16,6 @@ import net.corda.core.utilities.unwrap
|
||||
import net.corda.flows.NotaryException
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import org.jetbrains.exposed.sql.Database
|
||||
import java.nio.file.Path
|
||||
import kotlin.concurrent.thread
|
||||
|
||||
/**
|
||||
@ -25,18 +26,27 @@ import kotlin.concurrent.thread
|
||||
class BFTNonValidatingNotaryService(config: BFTSMaRtConfig,
|
||||
services: ServiceHubInternal,
|
||||
timeWindowChecker: TimeWindowChecker,
|
||||
serverId: Int,
|
||||
db: Database,
|
||||
private val client: BFTSMaRt.Client) : NotaryService {
|
||||
replicaId: Int,
|
||||
db: Database) : NotaryService {
|
||||
val client = BFTSMaRt.Client(config, replicaId) // (Ab)use replicaId for clientId.
|
||||
private val replicaHolder = SettableFuture.create<Replica>()
|
||||
|
||||
init {
|
||||
// Replica startup must be in parallel with other replicas, otherwise the constructor may not return:
|
||||
val configHandle = config.handle()
|
||||
thread(name = "BFTSmartServer-$serverId", isDaemon = true) {
|
||||
thread(name = "BFT SMaRt replica $replicaId init", isDaemon = true) {
|
||||
configHandle.use {
|
||||
Server(configHandle.path, serverId, db, "bft_smart_notary_committed_states", services, timeWindowChecker)
|
||||
replicaHolder.set(Replica(it, replicaId, db, "bft_smart_notary_committed_states", services, timeWindowChecker))
|
||||
log.info("BFT SMaRt replica $replicaId is running.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fun dispose() {
|
||||
replicaHolder.getOrThrow().dispose()
|
||||
client.dispose()
|
||||
}
|
||||
|
||||
companion object {
|
||||
val type = SimpleNotaryService.type.getSubType("bft")
|
||||
private val log = loggerFor<BFTNonValidatingNotaryService>()
|
||||
@ -67,12 +77,12 @@ class BFTNonValidatingNotaryService(config: BFTSMaRtConfig,
|
||||
}
|
||||
}
|
||||
|
||||
private class Server(configHome: Path,
|
||||
id: Int,
|
||||
db: Database,
|
||||
tableName: String,
|
||||
services: ServiceHubInternal,
|
||||
timeWindowChecker: TimeWindowChecker) : BFTSMaRt.Server(configHome, id, db, tableName, services, timeWindowChecker) {
|
||||
private class Replica(config: BFTSMaRtConfig,
|
||||
replicaId: Int,
|
||||
db: Database,
|
||||
tableName: String,
|
||||
services: ServiceHubInternal,
|
||||
timeWindowChecker: TimeWindowChecker) : BFTSMaRt.Replica(config, replicaId, db, tableName, services, timeWindowChecker) {
|
||||
|
||||
override fun executeCommand(command: ByteArray): ByteArray {
|
||||
val request = command.deserialize<BFTSMaRt.CommitRequest>()
|
||||
|
@ -1,12 +1,17 @@
|
||||
package net.corda.node.services.transactions
|
||||
|
||||
import bftsmart.communication.ServerCommunicationSystem
|
||||
import bftsmart.communication.client.netty.NettyClientServerCommunicationSystemClientSide
|
||||
import bftsmart.communication.client.netty.NettyClientServerSession
|
||||
import bftsmart.tom.MessageContext
|
||||
import bftsmart.tom.ServiceProxy
|
||||
import bftsmart.tom.ServiceReplica
|
||||
import bftsmart.tom.core.TOMLayer
|
||||
import bftsmart.tom.core.messages.TOMMessage
|
||||
import bftsmart.tom.server.defaultservices.DefaultRecoverable
|
||||
import bftsmart.tom.server.defaultservices.DefaultReplier
|
||||
import bftsmart.tom.util.Extractor
|
||||
import net.corda.core.DeclaredField.Companion.declaredField
|
||||
import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.contracts.TimeWindow
|
||||
import net.corda.core.crypto.DigitalSignature
|
||||
@ -20,6 +25,7 @@ import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.toTypedArray
|
||||
import net.corda.core.transactions.FilteredTransaction
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.utilities.debug
|
||||
@ -28,7 +34,7 @@ import net.corda.flows.NotaryError
|
||||
import net.corda.flows.NotaryException
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.transactions.BFTSMaRt.Client
|
||||
import net.corda.node.services.transactions.BFTSMaRt.Server
|
||||
import net.corda.node.services.transactions.BFTSMaRt.Replica
|
||||
import net.corda.node.utilities.JDBCHashMap
|
||||
import net.corda.node.utilities.transaction
|
||||
import org.jetbrains.exposed.sql.Database
|
||||
@ -37,10 +43,9 @@ import java.util.*
|
||||
|
||||
/**
|
||||
* Implements a replicated transaction commit log based on the [BFT-SMaRt](https://github.com/bft-smart/library)
|
||||
* consensus algorithm. Every replica in the cluster is running a [Server] maintaining the state, and a[Client] is used
|
||||
* to to relay state modification requests to all [Server]s.
|
||||
* consensus algorithm. Every replica in the cluster is running a [Replica] maintaining the state, and a [Client] is used
|
||||
* to relay state modification requests to all [Replica]s.
|
||||
*/
|
||||
// TODO: Write bft-smart host config file based on Corda node configuration.
|
||||
// TODO: Define and document the configuration of the bft-smart cluster.
|
||||
// TODO: Potentially update the bft-smart API for our use case or rebuild client and server from lower level building
|
||||
// blocks bft-smart provides.
|
||||
@ -49,18 +54,18 @@ import java.util.*
|
||||
// consensus about membership changes). Nodes that join the cluster for the first time or re-join can go through
|
||||
// a "recovering" state and request missing data from their peers.
|
||||
object BFTSMaRt {
|
||||
/** Sent from [Client] to [Server]. */
|
||||
/** Sent from [Client] to [Replica]. */
|
||||
@CordaSerializable
|
||||
data class CommitRequest(val tx: Any, val callerIdentity: Party)
|
||||
|
||||
/** Sent from [Server] to [Client]. */
|
||||
/** Sent from [Replica] to [Client]. */
|
||||
@CordaSerializable
|
||||
sealed class ReplicaResponse {
|
||||
data class Error(val error: NotaryError) : ReplicaResponse()
|
||||
data class Signature(val txSignature: DigitalSignature) : ReplicaResponse()
|
||||
}
|
||||
|
||||
/** An aggregate response from all replica ([Server]) replies sent from [Client] back to the calling application. */
|
||||
/** An aggregate response from all replica ([Replica]) replies sent from [Client] back to the calling application. */
|
||||
@CordaSerializable
|
||||
sealed class ClusterResponse {
|
||||
data class Error(val error: NotaryError) : ClusterResponse()
|
||||
@ -68,15 +73,26 @@ object BFTSMaRt {
|
||||
}
|
||||
|
||||
class Client(config: BFTSMaRtConfig, private val clientId: Int) : SingletonSerializeAsToken() {
|
||||
private val configHandle = config.handle()
|
||||
|
||||
companion object {
|
||||
private val log = loggerFor<Client>()
|
||||
}
|
||||
|
||||
/** A proxy for communicating with the BFT cluster */
|
||||
private val proxy: ServiceProxy by lazy {
|
||||
configHandle.use { buildProxy(it.path) }
|
||||
private val proxy = ServiceProxy(clientId, config.path.toString(), buildResponseComparator(), buildExtractor())
|
||||
private val sessionTable = (proxy.communicationSystem as NettyClientServerCommunicationSystemClientSide).declaredField<Map<Int, NettyClientServerSession>>("sessionTable").value
|
||||
|
||||
fun dispose() {
|
||||
proxy.close() // XXX: Does this do enough?
|
||||
}
|
||||
|
||||
private fun awaitClientConnectionToCluster() {
|
||||
// TODO: Hopefully we only need to wait for the client's initial connection to the cluster, and this method can be moved to some startup code.
|
||||
while (true) {
|
||||
val inactive = sessionTable.entries.mapNotNull { if (it.value.channel.isActive) null else it.key }
|
||||
if (inactive.isEmpty()) break
|
||||
log.info("Client-replica channels not yet active: $clientId to $inactive")
|
||||
Thread.sleep((inactive.size * 100).toLong())
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -85,16 +101,10 @@ object BFTSMaRt {
|
||||
*/
|
||||
fun commitTransaction(transaction: Any, otherSide: Party): ClusterResponse {
|
||||
require(transaction is FilteredTransaction || transaction is SignedTransaction) { "Unsupported transaction type: ${transaction.javaClass.name}" }
|
||||
val request = CommitRequest(transaction, otherSide)
|
||||
val responseBytes = proxy.invokeOrdered(request.serialize().bytes)
|
||||
val response = responseBytes.deserialize<ClusterResponse>()
|
||||
return response
|
||||
}
|
||||
|
||||
private fun buildProxy(configHome: Path): ServiceProxy {
|
||||
val comparator = buildResponseComparator()
|
||||
val extractor = buildExtractor()
|
||||
return ServiceProxy(clientId, configHome.toString(), comparator, extractor)
|
||||
awaitClientConnectionToCluster()
|
||||
val requestBytes = CommitRequest(transaction, otherSide).serialize().bytes
|
||||
val responseBytes = proxy.invokeOrdered(requestBytes)
|
||||
return responseBytes.deserialize<ClusterResponse>()
|
||||
}
|
||||
|
||||
/** A comparator to check if replies from two replicas are the same. */
|
||||
@ -136,29 +146,46 @@ object BFTSMaRt {
|
||||
}
|
||||
}
|
||||
|
||||
/** ServiceReplica doesn't have any kind of shutdown method, so we add one in this subclass. */
|
||||
private class CordaServiceReplica(replicaId: Int, configHome: Path, owner: DefaultRecoverable) : ServiceReplica(replicaId, configHome.toString(), owner, owner, null, DefaultReplier()) {
|
||||
private val tomLayerField = declaredField<TOMLayer>(ServiceReplica::class, "tomLayer")
|
||||
private val csField = declaredField<ServerCommunicationSystem>(ServiceReplica::class, "cs")
|
||||
fun dispose() {
|
||||
// Half of what restart does:
|
||||
val tomLayer = tomLayerField.value
|
||||
tomLayer.shutdown() // Non-blocking.
|
||||
val cs = csField.value
|
||||
cs.join()
|
||||
cs.serversConn.join()
|
||||
tomLayer.join()
|
||||
tomLayer.deliveryThread.join()
|
||||
// TODO: At the cluster level, join all Sender/Receiver threads.
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maintains the commit log and executes commit commands received from the [Client].
|
||||
*
|
||||
* The validation logic can be specified by implementing the [executeCommand] method.
|
||||
*/
|
||||
@Suppress("LeakingThis")
|
||||
abstract class Server(configHome: Path,
|
||||
val replicaId: Int,
|
||||
val db: Database,
|
||||
tableName: String,
|
||||
val services: ServiceHubInternal,
|
||||
val timeWindowChecker: TimeWindowChecker) : DefaultRecoverable() {
|
||||
abstract class Replica(config: BFTSMaRtConfig,
|
||||
replicaId: Int,
|
||||
private val db: Database,
|
||||
tableName: String,
|
||||
private val services: ServiceHubInternal,
|
||||
private val timeWindowChecker: TimeWindowChecker) : DefaultRecoverable() {
|
||||
companion object {
|
||||
private val log = loggerFor<Server>()
|
||||
private val log = loggerFor<Replica>()
|
||||
}
|
||||
|
||||
// TODO: Use Requery with proper DB schema instead of JDBCHashMap.
|
||||
// Must be initialised before ServiceReplica is started
|
||||
val commitLog = db.transaction { JDBCHashMap<StateRef, UniquenessProvider.ConsumingTx>(tableName) }
|
||||
private val commitLog = db.transaction { JDBCHashMap<StateRef, UniquenessProvider.ConsumingTx>(tableName) }
|
||||
@Suppress("LeakingThis")
|
||||
private val replica = CordaServiceReplica(replicaId, config.path, this)
|
||||
|
||||
init {
|
||||
// TODO: Looks like this statement is blocking. Investigate the bft-smart node startup.
|
||||
ServiceReplica(replicaId, configHome.toString(), this, this, null, DefaultReplier())
|
||||
fun dispose() {
|
||||
replica.dispose()
|
||||
}
|
||||
|
||||
override fun appExecuteUnordered(command: ByteArray, msgCtx: MessageContext): ByteArray? {
|
||||
@ -166,10 +193,7 @@ object BFTSMaRt {
|
||||
}
|
||||
|
||||
override fun appExecuteBatch(command: Array<ByteArray>, mcs: Array<MessageContext>): Array<ByteArray?> {
|
||||
val replies = command.zip(mcs) { c, _ ->
|
||||
executeCommand(c)
|
||||
}
|
||||
return replies.toTypedArray()
|
||||
return Arrays.stream(command).map(this::executeCommand).toTypedArray()
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -12,27 +12,27 @@ import java.nio.file.Files
|
||||
* Each instance of this class creates such a configHome, accessible via [path].
|
||||
* The files are deleted on [close] typically via [use], see [PathManager] for details.
|
||||
*/
|
||||
class BFTSMaRtConfig(replicaAddresses: List<HostAndPort>) : PathManager<BFTSMaRtConfig>(Files.createTempDirectory("bft-smart-config")) {
|
||||
class BFTSMaRtConfig(private val replicaAddresses: List<HostAndPort>, debug: Boolean = false) : PathManager<BFTSMaRtConfig>(Files.createTempDirectory("bft-smart-config")) {
|
||||
companion object {
|
||||
internal val portIsClaimedFormat = "Port %s is claimed by another replica: %s"
|
||||
}
|
||||
|
||||
init {
|
||||
val claimedPorts = mutableSetOf<Int>()
|
||||
replicaAddresses.map { it.port }.forEach { base ->
|
||||
val claimedPorts = mutableSetOf<HostAndPort>()
|
||||
val n = replicaAddresses.size
|
||||
(0 until n).forEach { replicaId ->
|
||||
// Each replica claims the configured port and the next one:
|
||||
(0..1).map { base + it }.forEach { port ->
|
||||
replicaPorts(replicaId).forEach { port ->
|
||||
claimedPorts.add(port) || throw IllegalArgumentException(portIsClaimedFormat.format(port, claimedPorts))
|
||||
}
|
||||
}
|
||||
configWriter("hosts.config") {
|
||||
replicaAddresses.forEachIndexed { index, address ->
|
||||
// The documentation strongly recommends IP addresses:
|
||||
println("${index} ${InetAddress.getByName(address.host).hostAddress} ${address.port}")
|
||||
println("$index ${InetAddress.getByName(address.host).hostAddress} ${address.port}")
|
||||
}
|
||||
}
|
||||
val n = replicaAddresses.size
|
||||
val systemConfig = String.format(javaClass.getResource("system.config.printf").readText(), n, maxFaultyReplicas(n))
|
||||
val systemConfig = String.format(javaClass.getResource("system.config.printf").readText(), n, maxFaultyReplicas(n), if (debug) 1 else 0, (0 until n).joinToString(","))
|
||||
configWriter("system.config") {
|
||||
print(systemConfig)
|
||||
}
|
||||
@ -46,6 +46,11 @@ class BFTSMaRtConfig(replicaAddresses: List<HostAndPort>) : PathManager<BFTSMaRt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun replicaPorts(replicaId: Int): List<HostAndPort> {
|
||||
val base = replicaAddresses[replicaId]
|
||||
return (0..1).map { HostAndPort.fromParts(base.host, base.port + it) }
|
||||
}
|
||||
}
|
||||
|
||||
fun maxFaultyReplicas(clusterSize: Int) = (clusterSize - 1) / 3
|
||||
|
@ -32,9 +32,8 @@ object ServiceIdentityGenerator {
|
||||
serviceCa: CertificateAndKeyPair,
|
||||
serviceId: String,
|
||||
serviceName: X500Name,
|
||||
threshold: Int = 1) {
|
||||
threshold: Int = 1): PartyAndCertificate {
|
||||
log.trace { "Generating a group identity \"serviceName\" for nodes: ${dirs.joinToString()}" }
|
||||
|
||||
val keyPairs = (1..dirs.size).map { generateKeyPair() }
|
||||
val notaryKey = CompositeKey.Builder().addKeys(keyPairs.map { it.public }).build(threshold)
|
||||
// TODO: This doesn't work until we have composite keys in X.509 certificates, so we make up a certificate that nothing checks
|
||||
@ -42,15 +41,16 @@ object ServiceIdentityGenerator {
|
||||
// serviceCa.keyPair, serviceName, notaryKey)
|
||||
val notaryCert = X509Utilities.createSelfSignedCACertificate(serviceName, generateKeyPair())
|
||||
val notaryCertPath = X509Utilities.createCertificatePath(serviceCa.certificate, notaryCert, revocationEnabled = false)
|
||||
val notaryParty = PartyAndCertificate(serviceName, notaryKey, notaryCert, notaryCertPath).serialize()
|
||||
|
||||
val notaryParty = PartyAndCertificate(serviceName, notaryKey, notaryCert, notaryCertPath)
|
||||
val notaryPartyBytes = notaryParty.serialize()
|
||||
val privateKeyFile = "$serviceId-private-key"
|
||||
val publicKeyFile = "$serviceId-public"
|
||||
keyPairs.zip(dirs) { keyPair, dir ->
|
||||
Files.createDirectories(dir)
|
||||
val privateKeyFile = "$serviceId-private-key"
|
||||
val publicKeyFile = "$serviceId-public"
|
||||
notaryParty.writeToFile(dir.resolve(publicKeyFile))
|
||||
notaryPartyBytes.writeToFile(dir.resolve(publicKeyFile))
|
||||
// Use storageKryo as our whitelist is not available in the gradle build environment:
|
||||
keyPair.serialize(storageKryo()).writeToFile(dir.resolve(privateKeyFile))
|
||||
}
|
||||
return notaryParty
|
||||
}
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ system.communication.useSignatures = 0
|
||||
system.communication.useMACs = 1
|
||||
|
||||
#Set to 1 if SMaRt should use the standard output to display debug messages, set to 0 if otherwise
|
||||
system.debug = 0
|
||||
system.debug = %s
|
||||
|
||||
#Print information about the replica when it is shutdown
|
||||
system.shutdownhook = true
|
||||
@ -109,7 +109,7 @@ system.totalordermulticast.sync_ckp = false
|
||||
|
||||
#Replicas ID for the initial view, separated by a comma.
|
||||
# The number of replicas in this parameter should be equal to that specified in 'system.servers.num'
|
||||
system.initial.view = 0,1,2,3
|
||||
system.initial.view = %s
|
||||
|
||||
#The ID of the trust third party (TTP)
|
||||
system.ttp.id = 7002
|
||||
|
@ -2,9 +2,9 @@ package net.corda.node.services.transactions
|
||||
|
||||
import com.google.common.net.HostAndPort
|
||||
import net.corda.node.services.transactions.BFTSMaRtConfig.Companion.portIsClaimedFormat
|
||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.junit.Test
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertFailsWith
|
||||
|
||||
class BFTSMaRtConfigTests {
|
||||
@Test
|
||||
@ -29,12 +29,12 @@ class BFTSMaRtConfigTests {
|
||||
@Test
|
||||
fun `overlapping port ranges are rejected`() {
|
||||
fun addresses(vararg ports: Int) = ports.map { HostAndPort.fromParts("localhost", it) }
|
||||
assertFailsWith(IllegalArgumentException::class, portIsClaimedFormat.format(11001, setOf(11000, 11001))) {
|
||||
BFTSMaRtConfig(addresses(11000, 11001)).use {}
|
||||
}
|
||||
assertFailsWith(IllegalArgumentException::class, portIsClaimedFormat.format(11001, setOf(11001, 11002))) {
|
||||
BFTSMaRtConfig(addresses(11001, 11000)).use {}
|
||||
}
|
||||
assertThatThrownBy { BFTSMaRtConfig(addresses(11000, 11001)).use {} }
|
||||
.isInstanceOf(IllegalArgumentException::class.java)
|
||||
.hasMessage(portIsClaimedFormat.format("localhost:11001", setOf("localhost:11000", "localhost:11001")))
|
||||
assertThatThrownBy { BFTSMaRtConfig(addresses(11001, 11000)).use {} }
|
||||
.isInstanceOf(IllegalArgumentException::class.java)
|
||||
.hasMessage(portIsClaimedFormat.format("localhost:11001", setOf("localhost:11001", "localhost:11002", "localhost:11000")))
|
||||
BFTSMaRtConfig(addresses(11000, 11002)).use {} // Non-overlapping.
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user