mirror of
https://github.com/corda/corda.git
synced 2025-01-14 16:59:52 +00:00
Merge branch 'master' into os-merge-f88542f
This commit is contained in:
commit
45d6bca071
@ -15,6 +15,8 @@ import com.nhaarman.mockito_kotlin.whenever
|
||||
import net.corda.bridge.internal.BridgeInstance
|
||||
import net.corda.bridge.services.api.BridgeMode
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.serialization.SerializationDefaults
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.node.services.config.CertChainPolicyConfig
|
||||
import net.corda.node.services.config.EnterpriseConfiguration
|
||||
@ -22,11 +24,17 @@ import net.corda.node.services.config.MutualExclusionConfiguration
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.messaging.ArtemisMessagingServer
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingClient
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.BRIDGE_CONTROL
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.BRIDGE_NOTIFY
|
||||
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2P_PREFIX
|
||||
import net.corda.nodeapi.internal.bridging.BridgeControl
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.DUMMY_BANK_A_NAME
|
||||
import net.corda.testing.core.MAX_MESSAGE_SIZE
|
||||
import net.corda.testing.core.SerializationEnvironmentRule
|
||||
import net.corda.testing.internal.rigorousMock
|
||||
import org.apache.activemq.artemis.api.core.RoutingType
|
||||
import org.apache.activemq.artemis.api.core.SimpleString
|
||||
import org.junit.Assert.assertEquals
|
||||
import org.junit.Assert.assertNull
|
||||
import org.junit.Rule
|
||||
@ -57,6 +65,7 @@ class BridgeIntegrationTest {
|
||||
config.createBridgeKeyStores(DUMMY_BANK_A_NAME)
|
||||
val (artemisServer, artemisClient) = createArtemis()
|
||||
try {
|
||||
installBridgeControlResponder(artemisClient)
|
||||
val bridge = BridgeInstance(config, BridgeVersionInfo(1, "1.1", "Dummy", "Test"))
|
||||
val stateFollower = bridge.activeChange.toBlocking().iterator
|
||||
assertEquals(false, stateFollower.next())
|
||||
@ -75,7 +84,6 @@ class BridgeIntegrationTest {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
fun `Load bridge (float inner) and float outer and stand them up`() {
|
||||
val bridgeFolder = tempFolder.root.toPath()
|
||||
@ -94,6 +102,7 @@ class BridgeIntegrationTest {
|
||||
assertEquals(NetworkHostAndPort("0.0.0.0", 10005), floatConfig.inboundConfig!!.listeningAddress)
|
||||
val (artemisServer, artemisClient) = createArtemis()
|
||||
try {
|
||||
installBridgeControlResponder(artemisClient)
|
||||
val bridge = BridgeInstance(bridgeConfig, BridgeVersionInfo(1, "1.1", "Dummy", "Test"))
|
||||
val bridgeStateFollower = bridge.activeChange.toBlocking().iterator
|
||||
val float = BridgeInstance(floatConfig, BridgeVersionInfo(1, "1.1", "Dummy", "Test"))
|
||||
@ -142,4 +151,20 @@ class BridgeIntegrationTest {
|
||||
artemisClient.start()
|
||||
return Pair(artemisServer, artemisClient)
|
||||
}
|
||||
|
||||
private fun installBridgeControlResponder(artemisClient: ArtemisMessagingClient) {
|
||||
val artemis = artemisClient.started!!
|
||||
val inboxAddress = SimpleString("${P2P_PREFIX}Test")
|
||||
artemis.session.createQueue(inboxAddress, RoutingType.ANYCAST, inboxAddress, true)
|
||||
artemis.session.createQueue(BRIDGE_NOTIFY, RoutingType.ANYCAST, BRIDGE_NOTIFY, false)
|
||||
val controlConsumer = artemis.session.createConsumer(BRIDGE_NOTIFY)
|
||||
controlConsumer.setMessageHandler { msg ->
|
||||
val bridgeControl = BridgeControl.NodeToBridgeSnapshot("Test", listOf(inboxAddress.toString()), emptyList())
|
||||
val controlPacket = bridgeControl.serialize(context = SerializationDefaults.P2P_CONTEXT).bytes
|
||||
val artemisMessage = artemis.session.createMessage(false)
|
||||
artemisMessage.writeBodyBufferBytes(controlPacket)
|
||||
artemis.producer.send(BRIDGE_CONTROL, artemisMessage)
|
||||
msg.acknowledge()
|
||||
}
|
||||
}
|
||||
}
|
@ -22,17 +22,15 @@ import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.internal.exists
|
||||
import net.corda.core.internal.readObject
|
||||
import net.corda.core.node.NetworkParameters
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.internal.SerializationEnvironmentImpl
|
||||
import net.corda.core.serialization.internal.effectiveSerializationEnv
|
||||
import net.corda.core.serialization.internal.nodeSerializationEnv
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.nodeapi.internal.ShutdownHook
|
||||
import net.corda.nodeapi.internal.addShutdownHook
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import net.corda.nodeapi.internal.network.NETWORK_PARAMS_FILE_NAME
|
||||
import net.corda.nodeapi.internal.network.SignedNetworkParameters
|
||||
import net.corda.nodeapi.internal.network.verifiedNetworkMapCert
|
||||
import net.corda.nodeapi.internal.serialization.AMQP_P2P_CONTEXT
|
||||
import net.corda.nodeapi.internal.serialization.AMQP_STORAGE_CONTEXT
|
||||
import net.corda.nodeapi.internal.serialization.SerializationFactoryImpl
|
||||
@ -50,7 +48,7 @@ class BridgeInstance(val conf: BridgeConfiguration,
|
||||
private val shutdown = AtomicBoolean(false)
|
||||
private var shutdownHook: ShutdownHook? = null
|
||||
|
||||
private lateinit var networkParameters: NetworkParameters
|
||||
private var maxMessageSize: Int = -1
|
||||
private lateinit var bridgeAuditService: BridgeAuditService
|
||||
private var bridgeSupervisorService: BridgeSupervisorService? = null
|
||||
private var floatSupervisorService: FloatSupervisorService? = null
|
||||
@ -112,17 +110,15 @@ class BridgeInstance(val conf: BridgeConfiguration,
|
||||
val onExit: CordaFuture<BridgeInstance> get() = _exitFuture
|
||||
|
||||
private fun retrieveNetworkParameters() {
|
||||
val trustRoot = conf.loadTrustStore().getCertificate(X509Utilities.CORDA_ROOT_CA)
|
||||
val networkParamsFile = conf.baseDirectory / NETWORK_PARAMS_FILE_NAME
|
||||
require(networkParamsFile.exists()) { "No network-parameters file found." }
|
||||
networkParameters = networkParamsFile.readObject<SignedNetworkParameters>().verifiedNetworkMapCert(trustRoot)
|
||||
log.info("Loaded network parameters: $networkParameters")
|
||||
check(networkParameters.minimumPlatformVersion <= versionInfo.platformVersion) {
|
||||
"Node's platform version is lower than network's required minimumPlatformVersion"
|
||||
}
|
||||
val networkParameters = networkParamsFile.readObject<SignedNetworkParameters>().raw.deserialize()
|
||||
maxMessageSize = networkParameters.maxMessageSize
|
||||
log.info("Loaded maxMessageSize from network-parameters file: $maxMessageSize")
|
||||
}
|
||||
|
||||
private fun createServices() {
|
||||
require(maxMessageSize > 0) { "maxMessageSize not initialised" }
|
||||
bridgeAuditService = LoggingBridgeAuditService(conf)
|
||||
when (conf.bridgeMode) {
|
||||
// In the SenderReceiver mode the inbound and outbound message paths are run from within a single bridge process.
|
||||
@ -131,8 +127,8 @@ class BridgeInstance(val conf: BridgeConfiguration,
|
||||
// The process also runs a TLS/AMQP 1.0 server socket, which is can receive connections and messages from peers,
|
||||
// validate the messages and then forwards the packets to the Artemis inbox queue of the node.
|
||||
BridgeMode.SenderReceiver -> {
|
||||
floatSupervisorService = FloatSupervisorServiceImpl(conf, networkParameters.maxMessageSize, bridgeAuditService)
|
||||
bridgeSupervisorService = BridgeSupervisorServiceImpl(conf, networkParameters.maxMessageSize, bridgeAuditService, floatSupervisorService!!.amqpListenerService)
|
||||
floatSupervisorService = FloatSupervisorServiceImpl(conf, maxMessageSize, bridgeAuditService)
|
||||
bridgeSupervisorService = BridgeSupervisorServiceImpl(conf, maxMessageSize, bridgeAuditService, floatSupervisorService!!.amqpListenerService)
|
||||
}
|
||||
// In the FloatInner mode the process runs the full outbound message path as in the SenderReceiver mode, but the inbound path is split.
|
||||
// This 'Float Inner/Bridge Controller' process runs the more trusted portion of the inbound path.
|
||||
@ -141,7 +137,7 @@ class BridgeInstance(val conf: BridgeConfiguration,
|
||||
// node inboxes, before transferring the message to Artemis. Potentially it might carry out deeper checks of received packets.
|
||||
// However, the 'Float Inner' is not directly exposed to the internet, or peers and does not host the TLS/AMQP 1.0 server socket.
|
||||
BridgeMode.FloatInner -> {
|
||||
bridgeSupervisorService = BridgeSupervisorServiceImpl(conf, networkParameters.maxMessageSize, bridgeAuditService, null)
|
||||
bridgeSupervisorService = BridgeSupervisorServiceImpl(conf, maxMessageSize, bridgeAuditService, null)
|
||||
}
|
||||
// In the FloatOuter mode this process runs a minimal AMQP proxy that is designed to run in a DMZ zone.
|
||||
// The process holds the minimum data necessary to act as the TLS/AMQP 1.0 receiver socket and tries
|
||||
@ -156,7 +152,7 @@ class BridgeInstance(val conf: BridgeConfiguration,
|
||||
// holding potentially sensitive information and are then forwarded across the control tunnel to the 'Float Inner' process for more
|
||||
// complete validation checks.
|
||||
BridgeMode.FloatOuter -> {
|
||||
floatSupervisorService = FloatSupervisorServiceImpl(conf, networkParameters.maxMessageSize, bridgeAuditService)
|
||||
floatSupervisorService = FloatSupervisorServiceImpl(conf, maxMessageSize, bridgeAuditService)
|
||||
}
|
||||
}
|
||||
statusFollower = ServiceStateCombiner(listOf(bridgeAuditService, floatSupervisorService, bridgeSupervisorService).filterNotNull())
|
||||
|
@ -86,6 +86,12 @@ class SimpleMessageFilterService(val conf: BridgeConfiguration,
|
||||
override fun sendMessageToLocalBroker(inboundMessage: ReceivedMessage) {
|
||||
try {
|
||||
validateMessage(inboundMessage)
|
||||
} catch (ex: Exception) {
|
||||
auditService.packetDropEvent(inboundMessage, "Packet Failed validation checks: " + ex.message)
|
||||
inboundMessage.complete(true) // consume the bad message, so that it isn't redelivered forever.
|
||||
return
|
||||
}
|
||||
try {
|
||||
val session = inboundSession
|
||||
val producer = inboundProducer
|
||||
if (session == null || producer == null) {
|
||||
@ -102,8 +108,8 @@ class SimpleMessageFilterService(val conf: BridgeConfiguration,
|
||||
producer.send(SimpleString(inboundMessage.topic), artemisMessage, { _ -> inboundMessage.complete(true) })
|
||||
auditService.packetAcceptedEvent(inboundMessage)
|
||||
} catch (ex: Exception) {
|
||||
auditService.packetDropEvent(inboundMessage, "Packet Failed validation checks: " + ex.message)
|
||||
inboundMessage.complete(false)
|
||||
log.error("Error trying to forward message", ex)
|
||||
inboundMessage.complete(false) // delivery failure. NAK back to source and await re-delivery attempts
|
||||
}
|
||||
}
|
||||
}
|
@ -153,18 +153,18 @@ class FloatControlListenerService(val conf: BridgeConfiguration,
|
||||
private fun onControlMessage(receivedMessage: ReceivedMessage) {
|
||||
if (!receivedMessage.checkTunnelControlTopic()) {
|
||||
auditService.packetDropEvent(receivedMessage, "Invalid control topic packet received on topic ${receivedMessage.topic}!!")
|
||||
receivedMessage.complete(false)
|
||||
receivedMessage.complete(true)
|
||||
return
|
||||
}
|
||||
val controlMessage = try {
|
||||
if (CordaX500Name.parse(receivedMessage.sourceLegalName) != floatClientName) {
|
||||
auditService.packetDropEvent(receivedMessage, "Invalid control source legal name!!")
|
||||
receivedMessage.complete(false)
|
||||
receivedMessage.complete(true)
|
||||
return
|
||||
}
|
||||
receivedMessage.payload.deserialize<TunnelControlMessage>()
|
||||
} catch (ex: Exception) {
|
||||
receivedMessage.complete(false)
|
||||
receivedMessage.complete(true)
|
||||
return
|
||||
}
|
||||
lock.withLock {
|
||||
@ -206,12 +206,12 @@ class FloatControlListenerService(val conf: BridgeConfiguration,
|
||||
}
|
||||
}
|
||||
if (amqpControl == null) {
|
||||
message.complete(false)
|
||||
message.complete(true) // consume message so it isn't resent forever
|
||||
return
|
||||
}
|
||||
if (!message.topic.startsWith(P2P_PREFIX)) {
|
||||
auditService.packetDropEvent(message, "Message topic is not a valid peer namespace ${message.topic}")
|
||||
message.complete(false)
|
||||
message.complete(true) // consume message so it isn't resent forever
|
||||
return
|
||||
}
|
||||
val appProperties = message.applicationProperties.map { Pair(it.key!!.toString(), it.value) }.toList()
|
||||
|
@ -22,6 +22,7 @@ import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.debug
|
||||
import net.corda.nodeapi.internal.config.SSLConfiguration
|
||||
import net.corda.nodeapi.internal.protonwrapper.messages.MessageStatus
|
||||
import net.corda.nodeapi.internal.protonwrapper.messages.ReceivedMessage
|
||||
@ -172,17 +173,17 @@ class TunnelingBridgeReceiverService(val conf: BridgeConfiguration,
|
||||
private fun onFloatMessage(receivedMessage: ReceivedMessage) {
|
||||
if (!receivedMessage.checkTunnelDataTopic()) {
|
||||
auditService.packetDropEvent(receivedMessage, "Invalid float inbound topic received ${receivedMessage.topic}!!")
|
||||
receivedMessage.complete(false)
|
||||
receivedMessage.complete(true)
|
||||
return
|
||||
}
|
||||
val innerMessage = try {
|
||||
receivedMessage.payload.deserialize<FloatDataPacket>()
|
||||
} catch (ex: Exception) {
|
||||
auditService.packetDropEvent(receivedMessage, "Unable to decode Float Control message")
|
||||
receivedMessage.complete(false)
|
||||
receivedMessage.complete(true)
|
||||
return
|
||||
}
|
||||
log.info("Received $innerMessage")
|
||||
log.debug { "Received message from ${innerMessage.sourceLegalName}" }
|
||||
val onwardMessage = object : ReceivedMessage {
|
||||
override val topic: String = innerMessage.topic
|
||||
override val applicationProperties: Map<Any?, Any?> = innerMessage.originalHeaders.toMap()
|
||||
|
@ -32,6 +32,7 @@ class DirectBridgeSenderService(val conf: BridgeConfiguration,
|
||||
private val statusFollower: ServiceStateCombiner
|
||||
private var statusSubscriber: Subscription? = null
|
||||
private var connectionSubscriber: Subscription? = null
|
||||
private var listenerActiveSubscriber: Subscription? = null
|
||||
private var bridgeControlListener: BridgeControlListener = BridgeControlListener(conf, conf.outboundConfig!!.socksProxyConfig, { ForwardingArtemisMessageClient(artemisConnectionService) })
|
||||
|
||||
init {
|
||||
@ -56,10 +57,15 @@ class DirectBridgeSenderService(val conf: BridgeConfiguration,
|
||||
override fun start() {
|
||||
statusSubscriber = statusFollower.activeChange.subscribe { ready ->
|
||||
if (ready) {
|
||||
listenerActiveSubscriber = bridgeControlListener.activeChange.subscribe {
|
||||
stateHelper.active = it
|
||||
}
|
||||
bridgeControlListener.start()
|
||||
stateHelper.active = true
|
||||
auditService.statusChangeEvent("Waiting for activation by at least one bridge control inbox registration")
|
||||
} else {
|
||||
stateHelper.active = false
|
||||
listenerActiveSubscriber?.unsubscribe()
|
||||
listenerActiveSubscriber = null
|
||||
bridgeControlListener.stop()
|
||||
}
|
||||
}
|
||||
@ -67,6 +73,8 @@ class DirectBridgeSenderService(val conf: BridgeConfiguration,
|
||||
|
||||
override fun stop() {
|
||||
stateHelper.active = false
|
||||
listenerActiveSubscriber?.unsubscribe()
|
||||
listenerActiveSubscriber = null
|
||||
bridgeControlListener.stop()
|
||||
connectionSubscriber?.unsubscribe()
|
||||
connectionSubscriber = null
|
||||
|
@ -76,7 +76,7 @@ class FilterServiceTest {
|
||||
filterService.start()
|
||||
// Not ready so packet dropped
|
||||
val fakeMessage = rigorousMock<ReceivedMessage>().also {
|
||||
doNothing().whenever(it).complete(false) // NAK'd
|
||||
doNothing().whenever(it).complete(true) // ACK was called
|
||||
}
|
||||
filterService.sendMessageToLocalBroker(fakeMessage)
|
||||
assertEquals(TestAuditService.AuditEvent.PACKET_DROP, auditFollower.next()) // Dropped as not ready
|
||||
@ -154,7 +154,7 @@ class FilterServiceTest {
|
||||
|
||||
// empty legal name
|
||||
val badMessage1 = rigorousMock<ReceivedMessage>().also {
|
||||
doNothing().whenever(it).complete(false) // NAK was called
|
||||
doNothing().whenever(it).complete(true) // ACK was called
|
||||
doReturn("").whenever(it).sourceLegalName
|
||||
doReturn(inboxTopic).whenever(it).topic
|
||||
doReturn(ByteArray(1)).whenever(it).payload
|
||||
@ -165,7 +165,7 @@ class FilterServiceTest {
|
||||
verify(dummyProducer, times(0)).send(ArgumentMatchers.any(), eq(dummyMessage), ArgumentMatchers.any()) // not sent
|
||||
// bad legal name
|
||||
val badMessage2 = rigorousMock<ReceivedMessage>().also {
|
||||
doNothing().whenever(it).complete(false) // NAK was called
|
||||
doNothing().whenever(it).complete(true) // ACK was called
|
||||
doReturn("CN=Test").whenever(it).sourceLegalName
|
||||
doReturn(inboxTopic).whenever(it).topic
|
||||
doReturn(ByteArray(1)).whenever(it).payload
|
||||
@ -176,7 +176,7 @@ class FilterServiceTest {
|
||||
verify(dummyProducer, times(0)).send(ArgumentMatchers.any(), eq(dummyMessage), ArgumentMatchers.any()) // not sent
|
||||
// empty payload
|
||||
val badMessage3 = rigorousMock<ReceivedMessage>().also {
|
||||
doNothing().whenever(it).complete(false) // NAK was called
|
||||
doNothing().whenever(it).complete(true) // ACK was called
|
||||
doReturn(DUMMY_BANK_B_NAME.toString()).whenever(it).sourceLegalName
|
||||
doReturn(inboxTopic).whenever(it).topic
|
||||
doReturn(ByteArray(0)).whenever(it).payload
|
||||
@ -187,7 +187,7 @@ class FilterServiceTest {
|
||||
verify(dummyProducer, times(0)).send(ArgumentMatchers.any(), eq(dummyMessage), ArgumentMatchers.any()) // not sent
|
||||
// bad topic
|
||||
val badMessage4 = rigorousMock<ReceivedMessage>().also {
|
||||
doNothing().whenever(it).complete(false) // NAK was called
|
||||
doNothing().whenever(it).complete(true) // ACK was called
|
||||
doReturn(DUMMY_BANK_B_NAME.toString()).whenever(it).sourceLegalName
|
||||
doReturn("bridge.control").whenever(it).topic
|
||||
doReturn(ByteArray(1)).whenever(it).payload
|
||||
@ -198,7 +198,7 @@ class FilterServiceTest {
|
||||
verify(dummyProducer, times(0)).send(ArgumentMatchers.any(), eq(dummyMessage), ArgumentMatchers.any()) // not sent
|
||||
// Non-whitelist header header
|
||||
val badMessage5 = rigorousMock<ReceivedMessage>().also {
|
||||
doNothing().whenever(it).complete(false) // NAK was called
|
||||
doNothing().whenever(it).complete(true) // ACK was called
|
||||
doReturn(DUMMY_BANK_B_NAME.toString()).whenever(it).sourceLegalName
|
||||
doReturn(inboxTopic).whenever(it).topic
|
||||
doReturn(ByteArray(1)).whenever(it).payload
|
||||
|
@ -146,164 +146,3 @@ which is then referenced within a custom flow:
|
||||
:end-before: DOCEND TopupIssuer
|
||||
|
||||
For examples on testing ``@CordaService`` implementations, see the oracle example :doc:`here <oracles>`
|
||||
|
||||
.. _database_migration_ref:
|
||||
|
||||
Database Migration
|
||||
==================
|
||||
|
||||
As a database migration tool, we use the open source library liquibase <http://www.liquibase.org/>.
|
||||
|
||||
Migration is enabled by specifying true in the ``database.runMigration`` node configuration setting (default behaviour is false).
|
||||
When enabled, the database state is checked, and updated during node startup.
|
||||
|
||||
The default behaviour (``database.runMigration=false``) is to just check the database state, and fail if it is not up to date. To bring the database to the correct state we provide an advanced migration tool. See below for details.
|
||||
|
||||
For example, if migration is enabled, after deploying a new version of the code that contains database migrations (see example below for a possible scenario), they are executed at that point (during startup).
|
||||
|
||||
Possible database changes range from schema changes to data changes. (The database changes are grouped together in `changesets`. See the example below.).
|
||||
|
||||
About Liquibase
|
||||
---------------
|
||||
|
||||
Liquibase will create a table called ``DATABASECHANGELOG``, that will store information about each executed change (like timestamp, description, user, md5 hash so it can't be changed, etc).
|
||||
This table will be used every time a migration command is run to determine what changesets need to be applied.
|
||||
Changesets should never be modified once they were executed. Any correction should be applied in a new changeset.
|
||||
We can also "tag" the database at each release to make rollback easier.
|
||||
|
||||
Database changes are maintained in several xml files per ``MappedSchema``, so that only migrations corresponding to the node’s configured schemas are run.
|
||||
The migration file(s) for all ``MappedSchemas`` are dynamically included in the global changelog, as long as they are present on the classpath and are either explicitly declared in the ``MappedSchema`` implementation, or follow a naming convention based on the ``MappedSchema`` name.
|
||||
(The migration tool that we provide can generate liquibase files with the correct name for a schema)
|
||||
|
||||
Our convention is to maintain a "master" changelog file per ``MappedSchema`` which will include "version" changelogs.
|
||||
By following our versioning convention, and using the node-info schema as an example, if there are any database changes for release 12, the changes will be added to a new file called: ``node-info.changelog-v12.xml`` which has to be included in ``node-info.changelog-master.xml``.
|
||||
|
||||
|
||||
Example:
|
||||
--------
|
||||
|
||||
Let's suppose that at some point, at version 12, there is a need to add a new column: ``contentSize`` to the ``DBAttachment`` entity.
|
||||
|
||||
This means we have to:
|
||||
- In the source code, add the ``contentSize`` property and map it to a new column.
|
||||
- create the column in the ``node_attachments`` table.
|
||||
- Run an update to set the size of all existing attachments, to not break the code that uses the new property
|
||||
|
||||
.. code-block:: kotlin
|
||||
|
||||
class DBAttachment(
|
||||
...
|
||||
@Column(name = "content")
|
||||
@Lob
|
||||
var content: ByteArray,
|
||||
|
||||
//newly added column
|
||||
@Column(name = "content_size")
|
||||
var contentSize: Int,
|
||||
...
|
||||
)
|
||||
|
||||
The ``DBAttachment`` entity is included in the ``NodeServicesV1`` schema, so we create a file ``node-services.changelog-v12.xml`` with this changeset:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<changeSet author="R3.Corda" id="add content_size column">
|
||||
<addColumn tableName="node_attachments">
|
||||
<column name="content_size" type="INT"/>
|
||||
</addColumn>
|
||||
<update tableName="node_attachments">
|
||||
<column name="content_size" valueComputed="length(content)"/>
|
||||
</update>
|
||||
<rollback>
|
||||
<dropColumn tableName="node_attachments" columnName="content_size"/>
|
||||
</rollback>
|
||||
</changeSet>
|
||||
|
||||
And include it in `node-services.changelog-master.xml`:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<databaseChangeLog>
|
||||
<!--the original schema-->
|
||||
<include file="migration/node-services.changelog-init.xml"/>
|
||||
|
||||
<!--migrations from previous releases-->
|
||||
<include file="migration/node-services.changelog-v4.xml"/>
|
||||
<include file="migration/node-services.changelog-v7.xml"/>
|
||||
|
||||
<!--added now-->
|
||||
<include file="migration/node-services.changelog-v12.xml"/>
|
||||
</databaseChangeLog>
|
||||
|
||||
|
||||
By adding the rollback script, we give users the option to revert to an older version of the software.
|
||||
|
||||
An easy way to manage the db version is to tag it on every release (or on every release that has migrations)
|
||||
<http://www.liquibase.org/documentation/changes/tag_database.html>
|
||||
|
||||
|
||||
Usage:
|
||||
------
|
||||
|
||||
Configurations:
|
||||
|
||||
- To enable migration at startup, set:
|
||||
- ``database.runMigration = true`` // false by default.
|
||||
|
||||
Migration tool:
|
||||
---------------
|
||||
|
||||
The Migration tool will be distributed as a standalone jar file, with the following options:
|
||||
|
||||
.. table::
|
||||
|
||||
==================================== =======================================================================
|
||||
Option Description
|
||||
==================================== =======================================================================
|
||||
--help Print help message
|
||||
--mode Either 'NODE' or 'DOORMAN'. By default 'NODE'
|
||||
--base-directory The node or doorman directory
|
||||
--config-file The name of the config file. By default 'node.conf' for a simple node and 'network-management.conf' for a doorman.
|
||||
--doorman-jar-path The path to the doorman fat jar
|
||||
--create-migration-sql-for-cordapp Create migration files for a CorDapp. You can specify the fully qualified of the `MappedSchema` class. If not specified it will generate foll all schemas that don't have migrations. The output directory is the base-directory, where a `migration` folder is created.
|
||||
--dry-run Output the database migration to the specified output file. The output directory is the base-directory. You can specify a file name or 'CONSOLE' if you want to send the output to the console.
|
||||
--execute-migration This option will run the db migration on the configured database
|
||||
--release-lock Releases whatever locks are on the database change log table, in case shutdown failed.
|
||||
==================================== =======================================================================
|
||||
|
||||
It is intended to be used by R3 Corda node administrators.
|
||||
Currently it has these features :
|
||||
- it allows running the migration on the database (`--execute-migration` )
|
||||
- offers the option to inspect the actual sql statements that will be run as part of the current migration (`--dry-run` )
|
||||
- can be used to release the migration lock (`--release-lock`)
|
||||
- when a CorDapp released by the open source community is ready to be deployed on a production node, using this tool it can be "upgraded" (`--create-migration-sql-for-cordapp`). See below for details.
|
||||
|
||||
CorDapps:
|
||||
---------
|
||||
|
||||
CorDapp developers who decide to store contract state in custom entities can create migration files for the ``MappedSchema`` they define.
|
||||
|
||||
There are 2 ways of associating a migration file with a schema:
|
||||
1) By overriding ``val migrationResource: String`` and pointing to a file that needs to be in the classpath
|
||||
2) By putting a file on the classpath in a `migration` package whose name is the hyphenated name of the schema. (All supported file extensions will be appended to the name)
|
||||
|
||||
CorDapp developers can use any of the supported formats (xml, sql, json, yaml) for the migration files they create.
|
||||
|
||||
In case CorDapp developers distribute their CorDapps with migration files, these will be automatically applied when the CorDapp is deployed on an R3 Corda node.
|
||||
If they are deployed on a standard ("Open source") Corda node, then the migration will be ignored, and the database tables will be generated by Hibernate.
|
||||
|
||||
In case CorDapp developers don't distribute a CorDapp with migration files, then the organisation that decides to deploy this CordApp on an R3 Corda ("Enterprise Blockchain") node has the responsibility to manage the database.
|
||||
|
||||
The following options are available:
|
||||
1) In case the organisation is running a demo or trial node on the default H2 database, then the CorDapp will just work when deployed by relying on the migration tool provided by hibernate, which is not intended for production.
|
||||
2) In case the organisation is running a production node (with live data) on an enterprise database, then they will have to manage the database migration for the CorDapp.
|
||||
|
||||
These are the steps to do this:
|
||||
- deploy the CorDapp on your node (copy the jar in the `cordapps` folder)
|
||||
- find out the name of the MappedSchema containing the new contract state entities and hyphenate it. For example:``net.corda.finance.schemasCommercialPaperSchemaV1``
|
||||
- call the migration tool ``java -jar migration-tool.jar --base-directory path_to_node --create-migration-sql-for-cordapp net.corda.finance.schemasCommercialPaperSchemaV1``
|
||||
- this will generate a file called ``commercial-paper-schema-v1.changelog-master.sql`` in a folder called ``migration`` in the `base-directory`
|
||||
- in case you don't specify the actual MappedSchema name, the tool will generate one sql file for each schema defined in the CorDapp
|
||||
- inspect the file(s) to make sure it is correct
|
||||
- create a jar with the `migration` folder (by convention it could be named: originalCorDappName-migration.jar), and deploy this jar together with the CorDapp
|
||||
- To make sure that the new migration will be used, the migration tool can be run in a `dry-run` mode and inspect the output file
|
||||
|
306
docs/source/database-migration.rst
Normal file
306
docs/source/database-migration.rst
Normal file
@ -0,0 +1,306 @@
|
||||
.. highlight:: kotlin
|
||||
.. raw:: html
|
||||
|
||||
<script type="text/javascript" src="_static/jquery.js"></script>
|
||||
<script type="text/javascript" src="_static/codesets.js"></script>
|
||||
|
||||
|
||||
Database Concerns for R3 Corda Nodes
|
||||
====================================
|
||||
|
||||
Corda - the platform, and the installed third-party CorDapps store their data in a relational database (see :doc:`api-persistence`).
|
||||
|
||||
When Corda is first installed, or when a new CorDapp is installed, associated tables, indexes, foreign-keys, etc must be created.
|
||||
|
||||
Similarly, when Corda is upgraded, or when a new version of a CorDapp is installed, their database schemas may have changed,
|
||||
but the existing data needs to be preserved or changed accordingly.
|
||||
|
||||
Corda supports multiple database management systems, so CorDapp developers need to keep this database portability requirement in mind when writing and testing the code.
|
||||
|
||||
To address these concerns, R3 Corda provides a mechanism to make it straightforward to migrate from the old schemas to the new ones whilst preserving data.
|
||||
It does this by integrating a specialised database migration library.
|
||||
|
||||
Also R3 Corda makes it easy to "lift" a CorDapp that does not handle the database migration (e.g.: the CorDapp developers did not include db migration scripts)
|
||||
|
||||
This document is addressed to Node Administrators and CorDapp developers.
|
||||
|
||||
* Node Administrators need to understand how the manage the underlying database.
|
||||
* CorDapp Developers need to understand how to write migration scripts.
|
||||
|
||||
|
||||
Database Migration
|
||||
==================
|
||||
|
||||
"Database migrations" (or schema migrations) in this document, refers to the evolution of the database schema or the actual data
|
||||
that a Corda Node uses when new releases of Corda or CorDapps are installed.
|
||||
|
||||
On a high level, this means that the corda binaries will ship with scripts that cover everything from the creation of the schema
|
||||
for the initial install to changes on subsequent versions.
|
||||
|
||||
A Corda Node runs on top of a database that contains internal node tables, vault tables and CorDapp tables.
|
||||
The database migration framework will handle all of these in the same way, as evolutions of schema and data.
|
||||
|
||||
As a database migration framework, we use the open source library `Liquibase <http://www.liquibase.org/>`_.
|
||||
|
||||
.. note::
|
||||
This advanced feature is only provided in R3 Corda("Enterprise Blockchain").
|
||||
Whenever an upgraded version of Corda or a new version of a CorDapp is shipped that requires a different database schema to its predecessor,
|
||||
it is the responsibility of the party shipping the code (R3 in the case of Corda; the app developer in the case of a CorDapp) to also provide the migration scripts.
|
||||
Once such a change has been applied to the actual database, this fact is recorded in the database by the database migration library (see below),
|
||||
hence providing a mechanism to determine the 'version' of any given schema.
|
||||
|
||||
|
||||
About Liquibase
|
||||
---------------
|
||||
|
||||
Liquibase is a generic framework to implement an automated, version based database migration framework that supports a large number of databases.
|
||||
|
||||
It works by maintaining a list of applied change sets.
|
||||
|
||||
A changeset can be something very simple like adding a new column to a table.
|
||||
|
||||
It stores each executed changeset with columns like id, author, timestamp, description, md5 hash, etc in a table called ``DATABASECHANGELOG``.
|
||||
|
||||
This changelog table will be read every time a migration command is run to determine what change sets need to be executed.
|
||||
|
||||
It represents the "version" of the database (The sum of the executed change sets at any point).
|
||||
|
||||
Change sets are scripts written in a supported format (xml, yml, sql), and should never be modified once they have been executed. Any necessary correction should be applied in a new changeset.
|
||||
|
||||
For documentation around liquibase see: `The Official website <http://www.liquibase.org>`_ and `Tutorial <https://www.thoughts-on-java.org/database-migration-with-liquibase-getting-started>`_.
|
||||
(Understanding how liquibase works is highly recommended for understanding how database migrations work in Corda.)
|
||||
|
||||
Integration with the Corda node
|
||||
===============================
|
||||
|
||||
Operational
|
||||
-----------
|
||||
By default, a node will *not* attempt to execute database migration scripts at startup (even when a new version has been deployed), but will check the database "version" (see above),
|
||||
and halt if the database is not in sync with the node, to avoid data corruption.
|
||||
|
||||
To bring the database to the correct state we provide an advanced migration tool. (see below)
|
||||
|
||||
Running the migration at startup automatically can be configured by specifying true in the ``database.runMigration`` node configuration setting (default behaviour is false).
|
||||
|
||||
We recommend Node administrators to leave the default behaviour in production, and use the migration tool to have better control. (See below)
|
||||
|
||||
It is safe to run at startup if you have implemented the usual best practices for database management ( e.g. running a backup before installing a new version, etc).
|
||||
|
||||
|
||||
Migration scripts structure
|
||||
---------------------------
|
||||
Corda provides migration scripts in an XML format for its internal node and vault tables.
|
||||
CorDapps should provide migration scripts for the tables they manage.
|
||||
|
||||
In Corda, ``MappedSchemas`` (see :doc:`api-persistence`) manage JPA Entities and thus the corresponding database tables.
|
||||
So ``MappedSchemas`` are the natural place to point to the changelog file(s) that contain the changesets for those tables.
|
||||
|
||||
Nodes can configure which ``MappedSchemas`` are included which means only the required tables are created.
|
||||
|
||||
To follow standard best practices, our convention for structuring the changelogs is to have a "master" changelog file per ``MappedSchema`` that will only include release changelogs. (see example below )
|
||||
|
||||
Example:
|
||||
|
||||
As a hypothetical scenario, let's suppose that at some point (maybe for security reasons) the ``owner`` column of the ``PersistentCashState`` entity needs to be stored as a hash instead of the X500 name of the owning party.
|
||||
|
||||
This means, as a CorDapp developer we have to do these generic steps:
|
||||
|
||||
1. In the ``PersistentCashState`` entity we need to replace
|
||||
|
||||
.. code-block:: kotlin
|
||||
|
||||
@Column(name = "owner_name")
|
||||
var owner: AbstractParty,
|
||||
|
||||
with:
|
||||
|
||||
.. code-block:: kotlin
|
||||
|
||||
@Column(name = "owner_name_hash", length = MAX_HASH_HEX_SIZE)
|
||||
var ownerHash: String,
|
||||
|
||||
2. Add a ``owner_key_hash`` column to the ``contract_cash_states`` table. (Each JPA Entity usually defines a table name as a @Table annotation.)
|
||||
|
||||
3. Run an update to set the ``owner_key_hash`` to the hash of the ``owner_name``. This is needed to convert the existing data to the new (hashed) format.
|
||||
|
||||
4. Delete the ``owner_name`` column
|
||||
|
||||
Steps 2. 3. and 4. can be expressed very easily like this:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<changeSet author="R3.Corda" id="replace owner_name with owner_hash">
|
||||
<addColumn tableName="contract_cash_states">
|
||||
<column name="owner_name_hash" type="nvarchar(130)"/>
|
||||
</addColumn>
|
||||
<update tableName="contract_cash_states">
|
||||
<column name="owner_name_hash" valueComputed="hash(owner_name)"/>
|
||||
</update>
|
||||
<dropColumn tableName="contract_cash_states" columnName="owner_name"/>
|
||||
</changeSet>
|
||||
|
||||
The ``PersistentCashState`` entity is included in the ``CashSchemaV1`` schema, so based on the above mentioned convention we create a file ``cash.changelog-v2.xml`` with the above changeset and include in `cash.changelog-master.xml`.
|
||||
|
||||
.. code-block:: kotlin
|
||||
|
||||
@CordaSerializable
|
||||
object CashSchemaV1 : MappedSchema(
|
||||
schemaFamily = CashSchema.javaClass, version = 1, mappedTypes = listOf(PersistentCashState::class.java)) {
|
||||
|
||||
override val migrationResource = "cash.changelog-master"
|
||||
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<databaseChangeLog>
|
||||
<!--the original schema-->
|
||||
<include file="migration/cash.changelog-init.xml"/>
|
||||
|
||||
<!--added now-->
|
||||
<include file="migration/cash.changelog-v2.xml"/>
|
||||
</databaseChangeLog>
|
||||
|
||||
|
||||
As we can see in this example, database migrations can "destroy" data, so it is therefore good practice to backup the database before executing the migration scripts.
|
||||
|
||||
Migration tool:
|
||||
===============
|
||||
|
||||
The Advanced Database migration tool is distributed as a standalone jar file named db-migration-tool-${corda_version}.jar
|
||||
|
||||
It is intended to be used by R3 Corda node administrators.
|
||||
|
||||
Currently it has these features:
|
||||
1. It allows running the migration on the database (`--execute-migration` )
|
||||
2. Offers the option to inspect the actual sql statements that will be run as part of the current migration (`--dry-run` )
|
||||
3. Sometimes, when a node or the migration tool crashes while running migrations, Liquibase will not release the lock.
|
||||
This can happen during some long database operation, or when an admin kills the process.
|
||||
( This cannot happen during normal operation of a node. Only during the migration process.)
|
||||
See: <http://www.liquibase.org/documentation/databasechangeloglock_table.html>.
|
||||
The tool provides a "release-lock" command that would forcibly unlock the db migration.
|
||||
4. When a CorDapp that does not is ready to be deployed on a R3 Corda production node,
|
||||
using this tool, the CorDapp can be "lifted" (`--create-migration-sql-for-cordapp`).
|
||||
The reason this is needed is because those CorDapps don't handle this enterprise level concern.
|
||||
See below for details.
|
||||
5. The tool is intended to be used to run database migrations on "Doorman" instances. There is no difference from running over a normal node.
|
||||
|
||||
|
||||
It has the following command line options:
|
||||
|
||||
.. table::
|
||||
|
||||
==================================== =======================================================================
|
||||
Option Description
|
||||
==================================== =======================================================================
|
||||
--help Print help message
|
||||
--mode Either 'NODE' or 'DOORMAN'. By default 'NODE'
|
||||
--base-directory(*) The node or doorman directory
|
||||
--config-file The name of the config file. By default 'node.conf' for a simple node and 'network-management.conf' for a doorman.
|
||||
--doorman-jar-path The path to the doorman fat jar
|
||||
--create-migration-sql-for-cordapp Create migration files for a CorDapp. You can specify the fully qualified name of the `MappedSchema` class. If not specified it will generate the migration for all schemas that don't have migrations. The output directory is the base-directory, where a `migration` folder is created.
|
||||
--dry-run Output the database migration to the specified output file. The output directory is the base-directory. You can specify a file name or 'CONSOLE' if you want to send the output to the console.
|
||||
--execute-migration This option will run the db migration on the configured database. This is the only command that will actually write to the database.
|
||||
--release-lock Releases whatever locks are on the database change log table, in case shutdown failed.
|
||||
==================================== =======================================================================
|
||||
|
||||
For example:
|
||||
|
||||
``java -jar db-migration-tool-R3.CORDA-3.0-DP3-RC01.jar --base-directory /path/to/node --execute-migration``
|
||||
|
||||
|
||||
|
||||
How-To:
|
||||
=======
|
||||
|
||||
Node Administrator installing Corda for the first time
|
||||
------------------------------------------------------
|
||||
- run normal installations steps
|
||||
- Using the db migration tool attempt a dry-run to inspect the output sql
|
||||
``--base-directory /path/to/node --dry-run``
|
||||
- The output sql from the above command can be executed directly on the database or this command can be run:
|
||||
``--base-directory /path/to/node --execute-migration``
|
||||
- At this point the corda node can be started successfully
|
||||
|
||||
|
||||
Node Administrator installing new version of Corda
|
||||
--------------------------------------------------
|
||||
- deploy new version of Corda
|
||||
- attempt to start node. If there are db migrations in the new release, then the node will exit and will show how many changes are needed
|
||||
- The same steps as above can be executed: dry-run and/or execute-migration
|
||||
|
||||
|
||||
Node Administrator installing new CorDapp
|
||||
-----------------------------------------
|
||||
- deploy new CorDapp to the node
|
||||
- same steps as above
|
||||
|
||||
|
||||
Node Administrator installing new version of CorDapp
|
||||
----------------------------------------------------
|
||||
- replace old CorDapp with new version of CorDapp
|
||||
- same steps as above
|
||||
|
||||
|
||||
Node Administrator installing a CorDapp developed by the OS community
|
||||
---------------------------------------------------------------------
|
||||
The Corda (OS) project does not have support for database migrations as this is an Enterprise feature.
|
||||
So CorDapps contributed by the OS community will not have this concern addressed by their original developers
|
||||
To help R3 Corda users, we offer support in the migration tool for "Lifting" a Cordapp
|
||||
|
||||
These are the steps:
|
||||
- deploy the CorDapp on your node (copy the jar in the `cordapps` folder)
|
||||
- find out the name of the MappedSchema containing the new contract state entities.
|
||||
- call the migration tool: ``--base-directory /path/to/node --create-migration-sql-for-cordapp com.example.MyMappedSchema``
|
||||
- this will generate a file called ``my-mapped-schema.changelog-master.sql`` in a folder called ``migration`` in the `base-directory`
|
||||
- in case you don't specify the actual MappedSchema name, the tool will generate one sql file for each schema defined in the CorDapp
|
||||
- inspect the file(s) to make sure it is correct. This is a standard sql file with some liquibase metadata as comments.
|
||||
- create a jar with the `migration` folder (by convention it could be named: originalCorDappName-migration.jar), and deploy this jar together with the CorDapp
|
||||
- To make sure that the new migration will be used, the migration tool can be run in a `dry-run` mode and inspect the output file
|
||||
|
||||
|
||||
Node Administrator deploying a new version of a CorDapp developed by the OS community
|
||||
--------------------------------------------------------------------------------
|
||||
This is a slightly more complicated scenario.
|
||||
|
||||
The Node Administrator will have to understand the changes (if any) that happened in the latest version.
|
||||
|
||||
If there are changes that require schema changes, the Node Administrator will have to write and test those.
|
||||
|
||||
The way to do that is to create a new changeset in the existing changelog for that CorDapp ( generated as above)
|
||||
|
||||
See `Liquibase Sql Format <http://www.liquibase.org/documentation/sql_format.html>`_
|
||||
|
||||
|
||||
CorDapp developer developing a new CorDapp
|
||||
------------------------------------------
|
||||
|
||||
CorDapp developers who decide to store contract state in custom entities can create migration files for the ``MappedSchema`` they define.
|
||||
|
||||
There are 2 ways of associating a migration file with a schema:
|
||||
1) By overriding ``val migrationResource: String`` and pointing to a file that needs to be in the classpath
|
||||
2) By putting a file on the classpath in a `migration` package whose name is the hyphenated name of the schema. (All supported file extensions will be appended to the name)
|
||||
|
||||
CorDapp developers can use any of the supported formats (xml, sql, json, yaml) for the migration files they create.
|
||||
|
||||
In case CorDapp developers distribute their CorDapps with migration files, these will be automatically applied when the CorDapp is deployed on an R3 Corda node.
|
||||
If they are deployed on a standard ("Open source") Corda node, then the migration will be ignored, and the database tables will be generated by Hibernate.
|
||||
|
||||
In case CorDapp developers don't distribute a CorDapp with migration files, then the organisation that decides to deploy this CordApp on an R3 Corda ("Enterprise Blockchain") node has the responsibility to manage the database.
|
||||
|
||||
During development or demo on the default H2 database, then the CorDapp will just work when deployed even if there are no migration scripts, by relying on the primitive migration tool provided by hibernate, which is not intended for production.
|
||||
|
||||
A very important aspect to be remembered is that the CorDapp will have to work on all supported Corda databases.
|
||||
It is the responsibility of the developers to test the migration scripts and the CorDapp against all the databases.
|
||||
In the future we will provide aditional tooling to assist with this aspect.
|
||||
|
||||
CorDapp developer developing a new version of an exiting CorDapp
|
||||
----------------------------------------------------------------
|
||||
Depending on the changes to the ``PersistentEntities`` a changelog will have to be created as per the liquibase documentation and the example above.
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
When seeing behavour similar to `this <https://stackoverflow.com/questions/15528795/liquibase-lock-reasons>`_
|
||||
|
||||
You can run ``--base-directory /path/to/node --release-lock``
|
||||
|
||||
|
@ -34,6 +34,7 @@ We look forward to seeing what you can do with Corda!
|
||||
|
||||
quickstart-index.rst
|
||||
key-concepts.rst
|
||||
operations-guide.rst
|
||||
building-a-cordapp-index.rst
|
||||
corda-nodes-index.rst
|
||||
corda-networks-index.rst
|
||||
|
4
docs/source/operations-guide.rst
Normal file
4
docs/source/operations-guide.rst
Normal file
@ -0,0 +1,4 @@
|
||||
Operations Guide
|
||||
================
|
||||
|
||||
* :doc:`Database Migration <database-migration>`
|
@ -60,25 +60,27 @@ class PersistentCertificateSigningRequestStorage(private val database: CordaPers
|
||||
}
|
||||
|
||||
override fun saveRequest(request: PKCS10CertificationRequest): String {
|
||||
val requestId = SecureHash.randomSHA256().toString()
|
||||
database.transaction(TransactionIsolationLevel.SERIALIZABLE) {
|
||||
val legalNameOrRejectMessage = try {
|
||||
validateRequestAndParseLegalName(request)
|
||||
return database.transaction(TransactionIsolationLevel.SERIALIZABLE) {
|
||||
val (legalName, exception) = try {
|
||||
val legalName = parseLegalName(request)
|
||||
// Return existing request ID, if request already exists for the same request.
|
||||
validateRequest(legalName, request)?.let { return@transaction it }
|
||||
Pair(legalName, null)
|
||||
} catch (e: RequestValidationException) {
|
||||
e.rejectMessage
|
||||
Pair(e.legalName, e)
|
||||
}
|
||||
|
||||
val requestEntity = CertificateSigningRequestEntity(
|
||||
requestId = requestId,
|
||||
legalName = legalNameOrRejectMessage as? CordaX500Name,
|
||||
requestId = SecureHash.randomSHA256().toString(),
|
||||
legalName = legalName,
|
||||
publicKeyHash = toSupportedPublicKey(request.subjectPublicKeyInfo).hash,
|
||||
request = request,
|
||||
remark = legalNameOrRejectMessage as? String,
|
||||
remark = exception?.rejectMessage,
|
||||
modifiedBy = CertificateSigningRequestStorage.DOORMAN_SIGNATURE,
|
||||
status = if (legalNameOrRejectMessage is CordaX500Name) RequestStatus.NEW else RequestStatus.REJECTED
|
||||
status = if (exception == null) RequestStatus.NEW else RequestStatus.REJECTED
|
||||
)
|
||||
session.save(requestEntity)
|
||||
session.save(requestEntity) as String
|
||||
}
|
||||
return requestId
|
||||
}
|
||||
|
||||
private fun DatabaseTransaction.findRequest(requestId: String,
|
||||
@ -159,41 +161,56 @@ class PersistentCertificateSigningRequestStorage(private val database: CordaPers
|
||||
}
|
||||
}
|
||||
|
||||
private fun DatabaseTransaction.validateRequestAndParseLegalName(request: PKCS10CertificationRequest): CordaX500Name {
|
||||
// It's important that we always use the toString() output of CordaX500Name as it standardises the string format
|
||||
// to make querying possible.
|
||||
val legalName = try {
|
||||
private fun parseLegalName(request: PKCS10CertificationRequest): CordaX500Name {
|
||||
return try {
|
||||
CordaX500Name.build(X500Principal(request.subject.encoded))
|
||||
} catch (e: IllegalArgumentException) {
|
||||
throw RequestValidationException(request.subject.toString(), "Name validation failed: ${e.message}")
|
||||
}
|
||||
return when {
|
||||
// Check if requested role is valid.
|
||||
request.getCertRole() !in allowedCertRoles -> throw RequestValidationException(legalName.toString(), "Requested certificate role ${request.getCertRole()} is not allowed.")
|
||||
// TODO consider scenario: There is a CSR that is signed but the certificate itself has expired or was revoked
|
||||
// Also, at the moment we assume that once the CSR is approved it cannot be rejected.
|
||||
// What if we approved something by mistake.
|
||||
nonRejectedRequestExists(CertificateSigningRequestEntity::legalName.name, legalName) -> throw RequestValidationException(legalName.toString(), "Duplicate legal name")
|
||||
//TODO Consider following scenario: There is a CSR that is signed but the certificate itself has expired or was revoked
|
||||
nonRejectedRequestExists(CertificateSigningRequestEntity::publicKeyHash.name, toSupportedPublicKey(request.subjectPublicKeyInfo).hash) -> throw RequestValidationException(legalName.toString(), "Duplicate public key")
|
||||
else -> legalName
|
||||
throw RequestValidationException(null, request.subject.toString(), rejectMessage = "Name validation failed: ${e.message}")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if "non-rejected" request exists with provided column and value.
|
||||
* Validate certificate signing request, returns request ID if same request already exists.
|
||||
*/
|
||||
private fun DatabaseTransaction.nonRejectedRequestExists(columnName: String, value: Any): Boolean {
|
||||
private fun DatabaseTransaction.validateRequest(legalName: CordaX500Name, request: PKCS10CertificationRequest): String? {
|
||||
// Check if the same request exists and returns the request id.
|
||||
val existingRequestByPubKeyHash = nonRejectedRequest(CertificateSigningRequestEntity::publicKeyHash.name, toSupportedPublicKey(request.subjectPublicKeyInfo).hash)
|
||||
|
||||
existingRequestByPubKeyHash?.let {
|
||||
// Compare subject, attribute.
|
||||
// We cannot compare the request directly because it contains nonce.
|
||||
if (it.request.subject == request.subject && it.request.attributes.asList() == request.attributes.asList()) {
|
||||
return it.requestId
|
||||
} else {
|
||||
//TODO Consider following scenario: There is a CSR that is signed but the certificate itself has expired or was revoked
|
||||
throw RequestValidationException(legalName, rejectMessage = "Duplicate public key")
|
||||
}
|
||||
}
|
||||
// Check if requested role is valid.
|
||||
if (request.getCertRole() !in allowedCertRoles)
|
||||
throw RequestValidationException(legalName, rejectMessage = "Requested certificate role ${request.getCertRole()} is not allowed.")
|
||||
// TODO consider scenario: There is a CSR that is signed but the certificate itself has expired or was revoked
|
||||
// Also, at the moment we assume that once the CSR is approved it cannot be rejected.
|
||||
// What if we approved something by mistake.
|
||||
if (nonRejectedRequest(CertificateSigningRequestEntity::legalName.name, legalName) != null) throw RequestValidationException(legalName, rejectMessage = "Duplicate legal name")
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve "non-rejected" request which matches provided column and value predicate.
|
||||
*/
|
||||
private fun <T : Any> DatabaseTransaction.nonRejectedRequest(columnName: String, value: T): CertificateSigningRequestEntity? {
|
||||
val query = session.criteriaBuilder.run {
|
||||
val criteriaQuery = createQuery(CertificateSigningRequestEntity::class.java)
|
||||
criteriaQuery.from(CertificateSigningRequestEntity::class.java).run {
|
||||
val valueQuery = equal(get<CordaX500Name>(columnName), value)
|
||||
val valueQuery = equal(get<T>(columnName), value)
|
||||
val statusQuery = notEqual(get<RequestStatus>(CertificateSigningRequestEntity::status.name), RequestStatus.REJECTED)
|
||||
criteriaQuery.where(and(valueQuery, statusQuery))
|
||||
}
|
||||
}
|
||||
return session.createQuery(query).setMaxResults(1).resultList.isNotEmpty()
|
||||
return session.createQuery(query).setMaxResults(1).uniqueResult()
|
||||
}
|
||||
|
||||
private class RequestValidationException(subjectName: String, val rejectMessage: String) : Exception("Validation failed for $subjectName. $rejectMessage.")
|
||||
private data class RequestValidationException(val legalName: CordaX500Name?, val subjectName: String = legalName.toString(), val rejectMessage: String) : Exception("Validation failed for $subjectName. $rejectMessage.")
|
||||
}
|
@ -34,10 +34,11 @@ import java.security.cert.CertPath
|
||||
class PersistentNodeInfoStorage(private val database: CordaPersistence) : NodeInfoStorage {
|
||||
override fun putNodeInfo(nodeInfoAndSigned: NodeInfoAndSigned): SecureHash {
|
||||
val (nodeInfo, signedNodeInfo) = nodeInfoAndSigned
|
||||
val nodeCaCert = nodeInfo.legalIdentitiesAndCerts[0].certPath.x509Certificates.find { CertRole.extract(it) == NODE_CA }
|
||||
nodeCaCert ?: throw IllegalArgumentException("Missing Node CA")
|
||||
val nodeInfoHash = signedNodeInfo.raw.hash
|
||||
|
||||
// Extract identities issued by the intermediate CAs (doorman).
|
||||
val registeredIdentities = nodeInfo.legalIdentitiesAndCerts.map { it.certPath.x509Certificates.single { CertRole.extract(it) in setOf(CertRole.SERVICE_IDENTITY, NODE_CA) } }
|
||||
|
||||
database.transaction {
|
||||
val count = session.createQuery(
|
||||
"select count(*) from ${NodeInfoEntity::class.java.name} where nodeInfoHash = :nodeInfoHash and isCurrent = true", java.lang.Long::class.java)
|
||||
@ -50,13 +51,21 @@ class PersistentNodeInfoStorage(private val database: CordaPersistence) : NodeIn
|
||||
}
|
||||
|
||||
// TODO Move these checks out of data access layer
|
||||
val request = requireNotNull(getSignedRequestByPublicHash(nodeCaCert.publicKey.encoded.sha256())) {
|
||||
"Node-info not registered with us"
|
||||
}
|
||||
request.certificateData?.certificateStatus.let {
|
||||
require(it == CertificateStatus.VALID) { "Certificate is no longer valid: $it" }
|
||||
// For each identity known by the doorman, validate against it's CSR.
|
||||
val requests = registeredIdentities.map {
|
||||
val request = requireNotNull(getSignedRequestByPublicHash(it.publicKey.hash)) {
|
||||
"Node-info not registered with us"
|
||||
}
|
||||
request.certificateData?.certificateStatus.let {
|
||||
require(it == CertificateStatus.VALID) { "Certificate is no longer valid: $it" }
|
||||
}
|
||||
CertRole.extract(it) to request
|
||||
}
|
||||
|
||||
// Ensure only 1 NodeCA identity.
|
||||
// TODO: make this support multiple node identities.
|
||||
val (_, request) = requireNotNull(requests.singleOrNull { it.first == CertRole.NODE_CA }) { "Require exactly 1 Node CA identity in the node-info." }
|
||||
|
||||
val existingNodeInfos = session.fromQuery<NodeInfoEntity>(
|
||||
"n where n.certificateSigningRequest = :csr and n.isCurrent = true order by n.publishedAt desc")
|
||||
.setParameter("csr", request)
|
||||
|
@ -69,6 +69,28 @@ class PersistentCertificateRequestStorageTest : TestBase() {
|
||||
assertThat(storage.getRequests(RequestStatus.NEW).map { it.requestId }).containsOnly(requestId)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `submit same request twice returns the first request ID`() {
|
||||
val keyPair = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)
|
||||
val request = createRequest("LegalName", keyPair, certRole = CertRole. NODE_CA).first
|
||||
val firstRequestId = storage.saveRequest(request)
|
||||
|
||||
assertNotNull(storage.getRequest(firstRequestId)).apply {
|
||||
assertEquals(request, this.request)
|
||||
}
|
||||
assertThat(storage.getRequests(RequestStatus.NEW).map { it.requestId }).containsOnly(firstRequestId)
|
||||
|
||||
val request2 = createRequest("LegalName", keyPair, certRole = CertRole.NODE_CA).first
|
||||
val secondRequestId = storage.saveRequest(request2)
|
||||
|
||||
assertEquals(firstRequestId, secondRequestId)
|
||||
|
||||
assertNotNull(storage.getRequest(secondRequestId)).apply {
|
||||
assertEquals(request, this.request)
|
||||
}
|
||||
assertThat(storage.getRequests(RequestStatus.NEW).map { it.requestId }).containsOnly(firstRequestId)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `invalid cert role request`() {
|
||||
val request = createRequest("LegalName", certRole = CertRole.INTERMEDIATE_CA).first
|
||||
|
@ -13,10 +13,10 @@ package com.r3.corda.networkmanage.common.persistence
|
||||
import com.r3.corda.networkmanage.TestBase
|
||||
import com.r3.corda.networkmanage.common.persistence.entity.NodeInfoEntity
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.crypto.sha256
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.CertRole
|
||||
import net.corda.core.internal.hash
|
||||
import net.corda.core.node.NodeInfo
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.days
|
||||
import net.corda.nodeapi.internal.NodeInfoAndSigned
|
||||
@ -31,6 +31,7 @@ import net.corda.testing.internal.createDevIntermediateCaCertPath
|
||||
import net.corda.testing.internal.signWith
|
||||
import net.corda.testing.node.MockServices
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
@ -216,17 +217,66 @@ class PersistentNodeInfoStorageTest : TestBase() {
|
||||
val acceptedUpdate = nodeInfoStorage.getAcceptedParametersUpdate(nodeInfoHash2)
|
||||
assertThat(acceptedUpdate?.networkParameters?.hash).isEqualTo(netParamsHash.toString())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `persist node info with multiple node CA identities`() {
|
||||
val (nodeInfo1, nodeKeyPair1) = createValidNodeInfo("Alice", requestStorage)
|
||||
val (nodeInfo2, nodeKeyPair2) = createValidNodeInfo("Bob", requestStorage)
|
||||
|
||||
val multiIdentityNodeInfo = nodeInfo1.copy(legalIdentitiesAndCerts = nodeInfo1.legalIdentitiesAndCerts + nodeInfo2.legalIdentitiesAndCerts)
|
||||
val signedNodeInfo = multiIdentityNodeInfo.signWith(listOf(nodeKeyPair1, nodeKeyPair2))
|
||||
|
||||
assertThatThrownBy { nodeInfoStorage.putNodeInfo(NodeInfoAndSigned(signedNodeInfo)) }
|
||||
.isInstanceOf(IllegalArgumentException::class.java)
|
||||
.hasMessageContaining("Require exactly 1 Node CA identity in the node-info.")
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `persist node info with service identity`() {
|
||||
val (nodeInfo, nodeKeyPairs) = createValidNodeInfo(requestStorage, CertRole.NODE_CA to "Alice", CertRole.SERVICE_IDENTITY to "Alice Notary")
|
||||
val signedNodeInfo = nodeInfo.signWith(nodeKeyPairs)
|
||||
nodeInfoStorage.putNodeInfo(NodeInfoAndSigned(signedNodeInfo))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `persist node info with unregistered service identity`() {
|
||||
val (nodeInfo1, nodeKeyPair1) = createValidNodeInfo("Alice", requestStorage)
|
||||
// Create a unregistered cert path with valid intermediate cert.
|
||||
val (identity, key) = TestNodeInfoBuilder().addServiceIdentity(CordaX500Name("Test", "London", "GB"), Crypto.generateKeyPair())
|
||||
|
||||
val multiIdentityNodeInfo = nodeInfo1.copy(legalIdentitiesAndCerts = nodeInfo1.legalIdentitiesAndCerts + identity)
|
||||
val signedNodeInfo = multiIdentityNodeInfo.signWith(listOf(nodeKeyPair1, key))
|
||||
|
||||
assertThatThrownBy { nodeInfoStorage.putNodeInfo(NodeInfoAndSigned(signedNodeInfo)) }
|
||||
.isInstanceOf(IllegalArgumentException::class.java)
|
||||
.hasMessageContaining("Node-info not registered with us")
|
||||
}
|
||||
}
|
||||
|
||||
internal fun createValidSignedNodeInfo(organisation: String,
|
||||
storage: CertificateSigningRequestStorage): Pair<NodeInfoAndSigned, PrivateKey> {
|
||||
val (csr, nodeKeyPair) = createRequest(organisation, certRole = CertRole.NODE_CA)
|
||||
val requestId = storage.saveRequest(csr)
|
||||
storage.markRequestTicketCreated(requestId)
|
||||
storage.approveRequest(requestId, "TestUser")
|
||||
private fun createValidNodeInfo(organisation: String, storage: CertificateSigningRequestStorage): Pair<NodeInfo, PrivateKey> {
|
||||
val (nodeInfo, keys) = createValidNodeInfo(storage, CertRole.NODE_CA to organisation)
|
||||
return Pair(nodeInfo, keys.single())
|
||||
}
|
||||
|
||||
private fun createValidNodeInfo(storage: CertificateSigningRequestStorage, vararg identities: Pair<CertRole, String>): Pair<NodeInfo, List<PrivateKey>> {
|
||||
val nodeInfoBuilder = TestNodeInfoBuilder()
|
||||
val (identity, key) = nodeInfoBuilder.addIdentity(CordaX500Name.build(X500Principal(csr.subject.encoded)), nodeKeyPair)
|
||||
storage.putCertificatePath(requestId, identity.certPath, "Test")
|
||||
val (_, signedNodeInfo) = nodeInfoBuilder.buildWithSigned(1)
|
||||
return Pair(NodeInfoAndSigned(signedNodeInfo), key)
|
||||
}
|
||||
val keys = identities.map { (certRole, name) ->
|
||||
val (csr, nodeKeyPair) = createRequest(name, certRole = certRole)
|
||||
val requestId = storage.saveRequest(csr)
|
||||
storage.markRequestTicketCreated(requestId)
|
||||
storage.approveRequest(requestId, "TestUser")
|
||||
val (identity, key) = when (certRole) {
|
||||
CertRole.NODE_CA -> nodeInfoBuilder.addLegalIdentity(CordaX500Name.build(X500Principal(csr.subject.encoded)), nodeKeyPair)
|
||||
CertRole.SERVICE_IDENTITY -> nodeInfoBuilder.addServiceIdentity(CordaX500Name.build(X500Principal(csr.subject.encoded)), nodeKeyPair)
|
||||
else -> throw IllegalArgumentException("Unsupported cert role $certRole.")
|
||||
}
|
||||
storage.putCertificatePath(requestId, identity.certPath, "Test")
|
||||
key
|
||||
}
|
||||
return Pair(nodeInfoBuilder.build(), keys)
|
||||
}
|
||||
|
||||
internal fun createValidSignedNodeInfo(organisation: String, storage: CertificateSigningRequestStorage): Pair<NodeInfoAndSigned, PrivateKey> {
|
||||
val (nodeInfo, key) = createValidNodeInfo(organisation, storage)
|
||||
return Pair(NodeInfoAndSigned(nodeInfo.signWith(listOf(key))), key)
|
||||
}
|
||||
|
@ -27,6 +27,8 @@ import org.apache.activemq.artemis.api.core.RoutingType
|
||||
import org.apache.activemq.artemis.api.core.SimpleString
|
||||
import org.apache.activemq.artemis.api.core.client.ClientConsumer
|
||||
import org.apache.activemq.artemis.api.core.client.ClientMessage
|
||||
import rx.Observable
|
||||
import rx.subjects.PublishSubject
|
||||
import java.util.*
|
||||
|
||||
class BridgeControlListener(val config: NodeSSLConfiguration,
|
||||
@ -47,6 +49,13 @@ class BridgeControlListener(val config: NodeSSLConfiguration,
|
||||
private val log = contextLogger()
|
||||
}
|
||||
|
||||
val active: Boolean
|
||||
get() = validInboundQueues.isNotEmpty()
|
||||
|
||||
private val _activeChange = PublishSubject.create<Boolean>().toSerialized()
|
||||
val activeChange: Observable<Boolean>
|
||||
get() = _activeChange
|
||||
|
||||
fun start() {
|
||||
stop()
|
||||
bridgeManager.start()
|
||||
@ -73,6 +82,9 @@ class BridgeControlListener(val config: NodeSSLConfiguration,
|
||||
}
|
||||
|
||||
fun stop() {
|
||||
if (active) {
|
||||
_activeChange.onNext(false)
|
||||
}
|
||||
validInboundQueues.clear()
|
||||
controlConsumer?.close()
|
||||
controlConsumer = null
|
||||
@ -112,7 +124,11 @@ class BridgeControlListener(val config: NodeSSLConfiguration,
|
||||
for (outQueue in controlMessage.sendQueues) {
|
||||
bridgeManager.deployBridge(outQueue.queueName, outQueue.targets.first(), outQueue.legalNames.toSet())
|
||||
}
|
||||
val wasActive = active
|
||||
validInboundQueues.addAll(controlMessage.inboxQueues)
|
||||
if (!wasActive && active) {
|
||||
_activeChange.onNext(true)
|
||||
}
|
||||
}
|
||||
is BridgeControl.BridgeToNodeSnapshotRequest -> {
|
||||
log.error("Message from Bridge $controlMessage detected on wrong topic!")
|
||||
|
@ -133,7 +133,7 @@ class SchemaMigration(
|
||||
check && !run -> {
|
||||
val unRunChanges = liquibase.listUnrunChangeSets(Contexts(), LabelExpression())
|
||||
if (unRunChanges.isNotEmpty()) {
|
||||
throw IllegalStateException("There are ${unRunChanges.size} outstanding database changes that need to be run. Please use the provided tools to update the database.")
|
||||
throw IllegalStateException("There are ${unRunChanges.size} outstanding database changes that need to be run. Please use the advanced migration tool. See: https://http://docs.corda.r3.com/database-migration.html")
|
||||
}
|
||||
}
|
||||
(outputWriter != null) && !check && !run -> liquibase.update(Contexts(), outputWriter)
|
||||
|
@ -41,23 +41,23 @@ class SignedNodeInfoTest {
|
||||
|
||||
@Test
|
||||
fun `verifying single identity`() {
|
||||
nodeInfoBuilder.addIdentity(ALICE_NAME)
|
||||
nodeInfoBuilder.addLegalIdentity(ALICE_NAME)
|
||||
val (nodeInfo, signedNodeInfo) = nodeInfoBuilder.buildWithSigned()
|
||||
assertThat(signedNodeInfo.verified()).isEqualTo(nodeInfo)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `verifying multiple identities`() {
|
||||
nodeInfoBuilder.addIdentity(ALICE_NAME)
|
||||
nodeInfoBuilder.addIdentity(BOB_NAME)
|
||||
nodeInfoBuilder.addLegalIdentity(ALICE_NAME)
|
||||
nodeInfoBuilder.addLegalIdentity(BOB_NAME)
|
||||
val (nodeInfo, signedNodeInfo) = nodeInfoBuilder.buildWithSigned()
|
||||
assertThat(signedNodeInfo.verified()).isEqualTo(nodeInfo)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `verifying missing signature`() {
|
||||
val (_, aliceKey) = nodeInfoBuilder.addIdentity(ALICE_NAME)
|
||||
nodeInfoBuilder.addIdentity(BOB_NAME)
|
||||
val (_, aliceKey) = nodeInfoBuilder.addLegalIdentity(ALICE_NAME)
|
||||
nodeInfoBuilder.addLegalIdentity(BOB_NAME)
|
||||
val nodeInfo = nodeInfoBuilder.build()
|
||||
val signedNodeInfo = nodeInfo.signWith(listOf(aliceKey))
|
||||
assertThatThrownBy { signedNodeInfo.verified() }
|
||||
@ -80,7 +80,7 @@ class SignedNodeInfoTest {
|
||||
|
||||
@Test
|
||||
fun `verifying extra signature`() {
|
||||
val (_, aliceKey) = nodeInfoBuilder.addIdentity(ALICE_NAME)
|
||||
val (_, aliceKey) = nodeInfoBuilder.addLegalIdentity(ALICE_NAME)
|
||||
val nodeInfo = nodeInfoBuilder.build()
|
||||
val signedNodeInfo = nodeInfo.signWith(listOf(aliceKey, generateKeyPair().private))
|
||||
assertThatThrownBy { signedNodeInfo.verified() }
|
||||
@ -90,7 +90,7 @@ class SignedNodeInfoTest {
|
||||
|
||||
@Test
|
||||
fun `verifying incorrect signature`() {
|
||||
nodeInfoBuilder.addIdentity(ALICE_NAME)
|
||||
nodeInfoBuilder.addLegalIdentity(ALICE_NAME)
|
||||
val nodeInfo = nodeInfoBuilder.build()
|
||||
val signedNodeInfo = nodeInfo.signWith(listOf(generateKeyPair().private))
|
||||
assertThatThrownBy { signedNodeInfo.verified() }
|
||||
@ -100,8 +100,8 @@ class SignedNodeInfoTest {
|
||||
|
||||
@Test
|
||||
fun `verifying with signatures in wrong order`() {
|
||||
val (_, aliceKey) = nodeInfoBuilder.addIdentity(ALICE_NAME)
|
||||
val (_, bobKey) = nodeInfoBuilder.addIdentity(BOB_NAME)
|
||||
val (_, aliceKey) = nodeInfoBuilder.addLegalIdentity(ALICE_NAME)
|
||||
val (_, bobKey) = nodeInfoBuilder.addLegalIdentity(BOB_NAME)
|
||||
val nodeInfo = nodeInfoBuilder.build()
|
||||
val signedNodeInfo = nodeInfo.signWith(listOf(bobKey, aliceKey))
|
||||
assertThatThrownBy { signedNodeInfo.verified() }
|
||||
|
@ -83,8 +83,8 @@ class NetworkMapClientTest {
|
||||
@Test
|
||||
fun `errors return a meaningful error message`() {
|
||||
val nodeInfoBuilder = TestNodeInfoBuilder()
|
||||
val (_, aliceKey) = nodeInfoBuilder.addIdentity(ALICE_NAME)
|
||||
nodeInfoBuilder.addIdentity(BOB_NAME)
|
||||
val (_, aliceKey) = nodeInfoBuilder.addLegalIdentity(ALICE_NAME)
|
||||
nodeInfoBuilder.addLegalIdentity(BOB_NAME)
|
||||
val nodeInfo3 = nodeInfoBuilder.build()
|
||||
val signedNodeInfo3 = nodeInfo3.signWith(listOf(aliceKey))
|
||||
|
||||
|
@ -30,7 +30,7 @@ import java.security.cert.X509Certificate
|
||||
class TestNodeInfoBuilder(private val intermediateAndRoot: Pair<CertificateAndKeyPair, X509Certificate> = DEV_INTERMEDIATE_CA to DEV_ROOT_CA.certificate) {
|
||||
private val identitiesAndPrivateKeys = ArrayList<Pair<PartyAndCertificate, PrivateKey>>()
|
||||
|
||||
fun addIdentity(name: CordaX500Name, nodeKeyPair: KeyPair = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)): Pair<PartyAndCertificate, PrivateKey> {
|
||||
fun addLegalIdentity(name: CordaX500Name, nodeKeyPair: KeyPair = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)): Pair<PartyAndCertificate, PrivateKey> {
|
||||
val nodeCertificateAndKeyPair = createDevNodeCa(intermediateAndRoot.first, name, nodeKeyPair)
|
||||
val identityKeyPair = Crypto.generateKeyPair()
|
||||
val identityCert = X509Utilities.createCertificate(
|
||||
@ -39,12 +39,35 @@ class TestNodeInfoBuilder(private val intermediateAndRoot: Pair<CertificateAndKe
|
||||
nodeCertificateAndKeyPair.keyPair,
|
||||
nodeCertificateAndKeyPair.certificate.subjectX500Principal,
|
||||
identityKeyPair.public)
|
||||
val certPath = X509Utilities.buildCertPath(
|
||||
identityCert,
|
||||
nodeCertificateAndKeyPair.certificate,
|
||||
|
||||
val certs = arrayOf(identityCert, nodeCertificateAndKeyPair.certificate)
|
||||
val key = identityKeyPair.private
|
||||
|
||||
val certPath = X509Utilities.buildCertPath(*certs,
|
||||
intermediateAndRoot.first.certificate,
|
||||
intermediateAndRoot.second)
|
||||
return Pair(PartyAndCertificate(certPath), identityKeyPair.private).also {
|
||||
|
||||
return Pair(PartyAndCertificate(certPath), key).also {
|
||||
identitiesAndPrivateKeys += it
|
||||
}
|
||||
}
|
||||
|
||||
fun addServiceIdentity(name: CordaX500Name, nodeKeyPair: KeyPair = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)): Pair<PartyAndCertificate, PrivateKey> {
|
||||
val serviceCert = X509Utilities.createCertificate(
|
||||
CertificateType.SERVICE_IDENTITY,
|
||||
intermediateAndRoot.first.certificate,
|
||||
intermediateAndRoot.first.keyPair,
|
||||
name.x500Principal,
|
||||
nodeKeyPair.public)
|
||||
|
||||
val certs = arrayOf(serviceCert)
|
||||
val key = nodeKeyPair.private
|
||||
|
||||
val certPath = X509Utilities.buildCertPath(*certs,
|
||||
intermediateAndRoot.first.certificate,
|
||||
intermediateAndRoot.second)
|
||||
|
||||
return Pair(PartyAndCertificate(certPath), key).also {
|
||||
identitiesAndPrivateKeys += it
|
||||
}
|
||||
}
|
||||
@ -72,7 +95,7 @@ class TestNodeInfoBuilder(private val intermediateAndRoot: Pair<CertificateAndKe
|
||||
|
||||
fun createNodeInfoAndSigned(vararg names: CordaX500Name, serial: Long = 1, platformVersion: Int = 1): NodeInfoAndSigned {
|
||||
val nodeInfoBuilder = TestNodeInfoBuilder()
|
||||
names.forEach { nodeInfoBuilder.addIdentity(it) }
|
||||
names.forEach { nodeInfoBuilder.addLegalIdentity(it) }
|
||||
return nodeInfoBuilder.buildWithSigned(serial, platformVersion)
|
||||
}
|
||||
|
||||
|
15
tools/dbmigration/README.md
Normal file
15
tools/dbmigration/README.md
Normal file
@ -0,0 +1,15 @@
|
||||
Database Migration Tool
|
||||
-----------------------
|
||||
|
||||
Documentation for this tool can be found [here](http://docs.corda.r3.com/website/releases/docs_head/database-migration.html)
|
||||
|
||||
|
||||
To build this from the command line in Unix:
|
||||
|
||||
Run ``./gradlew tools:dbmigration:buildMigrationTool`` to create a fat jar in ``tools/dbmigration/build/``
|
||||
|
||||
|
||||
To build this from the command line in Windows:
|
||||
|
||||
Run ``gradlew tools:dbmigration:buildMigrationTool`` to create a fat jar in ``tools/dbmigration/build/``
|
||||
|
@ -81,7 +81,7 @@ private fun initOptionParser(): OptionParser = OptionParser().apply {
|
||||
.withOptionalArg()
|
||||
|
||||
accepts(RUN_MIGRATION,
|
||||
"This option will run the db migration on the configured database")
|
||||
"This option will run the db migration on the configured database. This is the only command that will actually write to the database.")
|
||||
|
||||
accepts(DRY_RUN, """Output the database migration to the specified output file.
|
||||
|The output directory is the base-directory.
|
||||
@ -89,7 +89,7 @@ private fun initOptionParser(): OptionParser = OptionParser().apply {
|
||||
.withOptionalArg()
|
||||
|
||||
accepts(CREATE_MIGRATION_CORDAPP, """Create migration files for a CorDapp.
|
||||
|You can specify the fully qualified of the `MappedSchema` class. If not specified it will generate foll all schemas that don't have migrations.
|
||||
|You can specify the fully qualified name of the `MappedSchema` class. If not specified it will generate the migration for all schemas that don't have migrations.
|
||||
|The output directory is the base-directory, where a `migration` folder is created.""".trimMargin())
|
||||
.withOptionalArg()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user