Merge branch 'master' into kat-remove-network-services

This commit is contained in:
Katelyn Baker 2018-06-01 15:00:13 +01:00 committed by GitHub
commit c56e4a12e9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 370 additions and 193 deletions

View File

@ -21,6 +21,9 @@ import net.corda.nodeapi.internal.zookeeper.ZkClient
import net.corda.nodeapi.internal.zookeeper.ZkLeader import net.corda.nodeapi.internal.zookeeper.ZkLeader
import java.lang.management.ManagementFactory import java.lang.management.ManagementFactory
import java.net.InetAddress import java.net.InetAddress
import java.util.concurrent.Executors
import java.util.concurrent.ScheduledFuture
import java.util.concurrent.TimeUnit
class ExternalMasterElectionService(val conf: BridgeConfiguration, class ExternalMasterElectionService(val conf: BridgeConfiguration,
val auditService: BridgeAuditService, val auditService: BridgeAuditService,
@ -28,9 +31,12 @@ class ExternalMasterElectionService(val conf: BridgeConfiguration,
private var haElector: ZkLeader? = null private var haElector: ZkLeader? = null
private var leaderListener: CordaLeaderListener? = null private var leaderListener: CordaLeaderListener? = null
private val scheduler = Executors.newSingleThreadScheduledExecutor()
private var becomeMasterFuture: ScheduledFuture<*>? = null
companion object { companion object {
val log = contextLogger() val log = contextLogger()
const val DELAYED_LEADER_START = 5000L
} }
init { init {
@ -38,6 +44,21 @@ class ExternalMasterElectionService(val conf: BridgeConfiguration,
require(conf.haConfig!!.haConnectionString.split(',').all { it.startsWith("zk://") }) { "Only Zookeeper HA mode 'zk://IPADDR:PORT supported" } require(conf.haConfig!!.haConnectionString.split(',').all { it.startsWith("zk://") }) { "Only Zookeeper HA mode 'zk://IPADDR:PORT supported" }
} }
private fun becomeMaster() {
auditService.statusChangeEvent("Acquired leadership. Going active")
stateHelper.active = true
becomeMasterFuture = null
}
private fun becomeSlave() {
log.info("Cancelling leadership")
becomeMasterFuture?.apply {
cancel(false)
}
becomeMasterFuture = null
stateHelper.active = false
}
override fun start() { override fun start() {
val zkConf = conf.haConfig!!.haConnectionString.split(',').map { it.replace("zk://", "") }.joinToString(",") val zkConf = conf.haConfig!!.haConnectionString.split(',').map { it.replace("zk://", "") }.joinToString(",")
val leaderPriority = conf.haConfig!!.haPriority val leaderPriority = conf.haConfig!!.haPriority
@ -49,13 +70,16 @@ class ExternalMasterElectionService(val conf: BridgeConfiguration,
haElector = leaderElector haElector = leaderElector
val listener = object : CordaLeaderListener { val listener = object : CordaLeaderListener {
override fun notLeader() { override fun notLeader() {
auditService.statusChangeEvent("Loss of leadership signalled by Zookeeper") auditService.statusChangeEvent("Leadership loss signalled from Zookeeper")
stateHelper.active = false becomeSlave()
} }
override fun isLeader() { override fun isLeader() {
auditService.statusChangeEvent("Acquired leadership from Zookeeper. Going active") log.info("Zookeeper has signalled leadership acquired. Delay master claim for a short period to allow old master to close")
stateHelper.active = true becomeMasterFuture?.apply {
cancel(false)
}
becomeMasterFuture = scheduler.schedule(::becomeMaster, DELAYED_LEADER_START, TimeUnit.MILLISECONDS)
} }
} }
@ -68,7 +92,7 @@ class ExternalMasterElectionService(val conf: BridgeConfiguration,
override fun stop() { override fun stop() {
auditService.statusChangeEvent("Stop requested") auditService.statusChangeEvent("Stop requested")
stateHelper.active = false becomeSlave()
haElector?.apply { haElector?.apply {
if (leaderListener != null) { if (leaderListener != null) {
removeLeadershipListener(leaderListener!!) removeLeadershipListener(leaderListener!!)

View File

@ -16,10 +16,7 @@ import net.corda.core.CordaInternal
import net.corda.core.crypto.SecureHash import net.corda.core.crypto.SecureHash
import net.corda.core.identity.Party import net.corda.core.identity.Party
import net.corda.core.identity.PartyAndCertificate import net.corda.core.identity.PartyAndCertificate
import net.corda.core.internal.FlowIORequest import net.corda.core.internal.*
import net.corda.core.internal.FlowStateMachine
import net.corda.core.internal.abbreviate
import net.corda.core.internal.uncheckedCast
import net.corda.core.messaging.DataFeed import net.corda.core.messaging.DataFeed
import net.corda.core.node.NodeInfo import net.corda.core.node.NodeInfo
import net.corda.core.node.ServiceHub import net.corda.core.node.ServiceHub
@ -27,7 +24,10 @@ import net.corda.core.serialization.CordaSerializable
import net.corda.core.serialization.SerializationDefaults import net.corda.core.serialization.SerializationDefaults
import net.corda.core.serialization.serialize import net.corda.core.serialization.serialize
import net.corda.core.transactions.SignedTransaction import net.corda.core.transactions.SignedTransaction
import net.corda.core.utilities.* import net.corda.core.utilities.ProgressTracker
import net.corda.core.utilities.UntrustworthyData
import net.corda.core.utilities.debug
import net.corda.core.utilities.toNonEmptySet
import org.slf4j.Logger import org.slf4j.Logger
import java.time.Duration import java.time.Duration

View File

@ -20,14 +20,12 @@ import net.corda.core.cordapp.CordappContext
import net.corda.core.crypto.* import net.corda.core.crypto.*
import net.corda.core.identity.CordaX500Name import net.corda.core.identity.CordaX500Name
import net.corda.core.node.ServicesForResolution import net.corda.core.node.ServicesForResolution
import net.corda.core.serialization.SerializationContext import net.corda.core.serialization.*
import net.corda.core.serialization.SerializedBytes
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.serialize
import net.corda.core.transactions.SignedTransaction import net.corda.core.transactions.SignedTransaction
import net.corda.core.transactions.TransactionBuilder import net.corda.core.transactions.TransactionBuilder
import net.corda.core.transactions.WireTransaction import net.corda.core.transactions.WireTransaction
import net.corda.core.utilities.OpaqueBytes import net.corda.core.utilities.OpaqueBytes
import net.corda.core.utilities.UntrustworthyData
import org.bouncycastle.asn1.x500.X500Name import org.bouncycastle.asn1.x500.X500Name
import org.bouncycastle.asn1.x500.X500NameBuilder import org.bouncycastle.asn1.x500.X500NameBuilder
import org.bouncycastle.asn1.x500.style.BCStyle import org.bouncycastle.asn1.x500.style.BCStyle
@ -516,3 +514,14 @@ fun <T> Iterable<T>.sumByLong(selector: (T) -> Long): Long = this.map { selector
internal fun SignedTransaction.pushToLoggingContext() { internal fun SignedTransaction.pushToLoggingContext() {
MDC.put("tx_id", id.toString()) MDC.put("tx_id", id.toString())
} }
fun <T : Any> SerializedBytes<Any>.checkPayloadIs(type: Class<T>): UntrustworthyData<T> {
val payloadData: T = try {
val serializer = SerializationDefaults.SERIALIZATION_FACTORY
serializer.deserialize(this, type, SerializationDefaults.P2P_CONTEXT)
} catch (ex: Exception) {
throw IllegalArgumentException("Payload invalid", ex)
}
return type.castIfPossible(payloadData)?.let { UntrustworthyData(it) }
?: throw IllegalArgumentException("We were expecting a ${type.name} but we instead got a ${payloadData.javaClass.name} ($payloadData)")
}

View File

@ -42,14 +42,3 @@ class UntrustworthyData<out T>(@PublishedApi internal val fromUntrustedWorld: T)
} }
inline fun <T, R> UntrustworthyData<T>.unwrap(validator: (T) -> R): R = validator(fromUntrustedWorld) inline fun <T, R> UntrustworthyData<T>.unwrap(validator: (T) -> R): R = validator(fromUntrustedWorld)
fun <T : Any> SerializedBytes<Any>.checkPayloadIs(type: Class<T>): UntrustworthyData<T> {
val payloadData: T = try {
val serializer = SerializationDefaults.SERIALIZATION_FACTORY
serializer.deserialize(this, type, SerializationDefaults.P2P_CONTEXT)
} catch (ex: Exception) {
throw IllegalArgumentException("Payload invalid", ex)
}
return type.castIfPossible(payloadData)?.let { UntrustworthyData(it) } ?: throw IllegalArgumentException("We were expecting a ${type.name} but we instead got a " +
"${payloadData.javaClass.name} ($payloadData)")
}

View File

@ -1,5 +1,10 @@
@import "theme.css"; @import "theme.css";
/* override table no-wrap */
.wy-table-responsive table td, .wy-table-responsive table th {
white-space: normal;
}
/* Highlights */ /* Highlights */
.highlight .k, .highlight .k,

View File

@ -5,41 +5,32 @@
<script type="text/javascript" src="_static/codesets.js"></script> <script type="text/javascript" src="_static/codesets.js"></script>
Database Concerns for Corda Enterprise Nodes
============================================
Corda - the platform, and the installed third-party CorDapps store their data in a relational database (see :doc:`api-persistence`).
When Corda is first installed, or when a new CorDapp is installed, associated tables, indexes, foreign-keys, etc must be created.
Similarly, when Corda is upgraded, or when a new version of a CorDapp is installed, their database schemas may have changed,
but the existing data needs to be preserved or changed accordingly.
Corda supports multiple database management systems, so CorDapp developers need to keep this database portability requirement in mind when writing and testing the code.
To address these concerns, Corda Enterprise provides a mechanism to make it straightforward to migrate from the old schemas to the new ones whilst preserving data.
It does this by integrating a specialised database migration library.
Also Corda Enterprise makes it easy to "lift" a CorDapp that does not handle the database migration (e.g.: the CorDapp developers did not include db migration scripts)
This document is addressed to Node Administrators and CorDapp developers.
* Node Administrators need to understand how the manage the underlying database.
* CorDapp Developers need to understand how to write migration scripts.
Database Migration Database Migration
================== ==================
"Database migrations" (or schema migrations) in this document, refers to the evolution of the database schema or the actual data Corda - the platform, and the installed third-party CorDapps store their data in a relational database (see
that a Corda Node uses when new releases of Corda or CorDapps are installed. :doc:`api-persistence`). When Corda is first installed, or when a new CorDapp is installed, associated tables, indexes,
foreign-keys, etc. must be created. Similarly, when Corda is upgraded, or when a new version of a CorDapp is installed,
their database schemas may have changed, but the existing data needs to be preserved or changed accordingly.
On a high level, this means that the corda binaries will ship with scripts that cover everything from the creation of the schema Corda supports multiple database management systems, so CorDapp developers need to keep this database portability
for the initial install to changes on subsequent versions. requirement in mind when writing and testing the code. To address these concerns, Corda Enterprise provides a mechanism
to make it straightforward to migrate from the old schemas to the new ones whilst preserving data. It does this by
integrating a specialised database migration library. Also Corda Enterprise makes it easy to "lift" a CorDapp that does
not handle the database migration (e.g.: the CorDapp developers did not include database migration scripts).
A Corda Node runs on top of a database that contains internal node tables, vault tables and CorDapp tables. This document is addressed to node administrators and CorDapp developers.
* Node administrators need to understand how the manage the underlying database.
* CorDapp Developers need to understand how to write migration scripts.
"Database migrations" (or schema migrations) in this document, refers to the evolution of the database schema or the
actual data that a Corda Node uses when new releases of Corda or CorDapps are installed. On a high level, this means
that the Corda binaries will ship with scripts that cover everything from the creation of the schema for the initial
install to changes on subsequent versions.
A Corda node runs on top of a database that contains internal node tables, vault tables and CorDapp tables.
The database migration framework will handle all of these in the same way, as evolutions of schema and data. The database migration framework will handle all of these in the same way, as evolutions of schema and data.
As a database migration framework, we use the open source library `Liquibase <http://www.liquibase.org/>`_. As a database migration framework, we use the open source library `Liquibase <http://www.liquibase.org/>`_.
.. note:: .. note::
@ -53,55 +44,45 @@ As a database migration framework, we use the open source library `Liquibase <ht
About Liquibase About Liquibase
--------------- ---------------
Liquibase is a generic framework to implement an automated, version based database migration framework that supports a large number of databases. Liquibase is a tool that implements an automated, version based database migration framework with support for a
large number of databases. It works by maintaining a list of applied changesets. A changeset can be something very
simple like adding a new column to a table. It stores each executed changeset with columns like id, author, timestamp,
description, md5 hash, etc in a table called ``DATABASECHANGELOG``. This changelog table will be read every time a
migration command is run to determine what changesets need to be executed. It represents the "version" of the database
(the sum of the executed changesets at any point). Changesets are scripts written in a supported format (xml, yml,
sql), and should never be modified once they have been executed. Any necessary correction should be applied in a new
changeset.
It works by maintaining a list of applied change sets. For documentation around Liquibase see: `The Official website <http://www.liquibase.org>`_ and `Tutorial <https://www.thoughts-on-java.org/database-migration-with-liquibase-getting-started>`_.
A changeset can be something very simple like adding a new column to a table. (Understanding how Liquibase works is highly recommended for understanding how database migrations work in Corda.)
It stores each executed changeset with columns like id, author, timestamp, description, md5 hash, etc in a table called ``DATABASECHANGELOG``.
This changelog table will be read every time a migration command is run to determine what change sets need to be executed.
It represents the "version" of the database (The sum of the executed change sets at any point).
Change sets are scripts written in a supported format (xml, yml, sql), and should never be modified once they have been executed. Any necessary correction should be applied in a new changeset.
For documentation around liquibase see: `The Official website <http://www.liquibase.org>`_ and `Tutorial <https://www.thoughts-on-java.org/database-migration-with-liquibase-getting-started>`_.
(Understanding how liquibase works is highly recommended for understanding how database migrations work in Corda.)
Integration with the Corda node Integration with the Corda node
=============================== -------------------------------
Operational By default, a node will *not* attempt to execute database migration scripts at startup (even when a new version has been
----------- deployed), but will check the database "version" and halt if the database is not in sync with the node, to
By default, a node will *not* attempt to execute database migration scripts at startup (even when a new version has been deployed), but will check the database "version" (see above), avoid data corruption. To bring the database to the correct state we provide an advanced :ref:`migration-tool`.
and halt if the database is not in sync with the node, to avoid data corruption.
To bring the database to the correct state we provide an advanced migration tool. (see below)
Running the migration at startup automatically can be configured by specifying true in the ``database.runMigration`` node configuration setting (default behaviour is false).
We recommend Node administrators to leave the default behaviour in production, and use the migration tool to have better control. (See below)
It is safe to run at startup if you have implemented the usual best practices for database management ( e.g. running a backup before installing a new version, etc).
Running the migration at startup automatically can be configured by specifying true in the ``database.runMigration``
node configuration setting (default behaviour is false). We recommend node administrators to leave the default behaviour
in production, and use the migration tool to have better control. It is safe to run at startup if you have
implemented the usual best practices for database management (e.g. running a backup before installing a new version, etc.).
Migration scripts structure Migration scripts structure
--------------------------- ---------------------------
Corda provides migration scripts in an XML format for its internal node and vault tables.
CorDapps should provide migration scripts for the tables they manage.
In Corda, ``MappedSchemas`` (see :doc:`api-persistence`) manage JPA Entities and thus the corresponding database tables. Corda provides migration scripts in an XML format for its internal node and vault tables. CorDapps should provide
So ``MappedSchemas`` are the natural place to point to the changelog file(s) that contain the changesets for those tables. migration scripts for the tables they manage. In Corda, ``MappedSchemas`` (see :doc:`api-persistence`) manage JPA
Entities and thus the corresponding database tables. So ``MappedSchemas`` are the natural place to point to the
Nodes can configure which ``MappedSchemas`` are included which means only the required tables are created. changelog file(s) that contain the changesets for those tables. Nodes can configure which ``MappedSchemas`` are included
which means only the required tables are created. To follow standard best practices, our convention for structuring the
To follow standard best practices, our convention for structuring the changelogs is to have a "master" changelog file per ``MappedSchema`` that will only include release changelogs. (see example below ) changelogs is to have a "master" changelog file per ``MappedSchema`` that will only include release changelogs (see example below).
Example: Example:
As a hypothetical scenario, let's suppose that at some point (maybe for security reasons) the ``owner`` column of the ``PersistentCashState`` entity needs to be stored as a hash instead of the X500 name of the owning party. As a hypothetical scenario, let's suppose that at some point (maybe for security reasons) the ``owner`` column of the
``PersistentCashState`` entity needs to be stored as a hash instead of the X500 name of the owning party.
This means, as a CorDapp developer we have to do these generic steps: This means, as a CorDapp developer we have to do these generic steps:
@ -161,25 +142,28 @@ The ``PersistentCashState`` entity is included in the ``CashSchemaV1`` schema, s
</databaseChangeLog> </databaseChangeLog>
As we can see in this example, database migrations can "destroy" data, so it is therefore good practice to backup the database before executing the migration scripts. As we can see in this example, database migrations can "destroy" data, so it is therefore good practice to backup the
database before executing the migration scripts.
Migration tool: .. _migration-tool:
===============
The Advanced Database migration tool is distributed as a standalone jar file named db-migration-tool-${corda_version}.jar Migration tool
--------------
The migration tool is distributed as a standalone jar file named ``db-migration-tool-${corda_version}.jar``.
It is intended to be used by Corda Enterprise node administrators. It is intended to be used by Corda Enterprise node administrators.
Currently it has these features: Currently it has these features:
1. It allows running the migration on the database (`--execute-migration` )
2. Offers the option to inspect the actual sql statements that will be run as part of the current migration (`--dry-run` ) 1. It allows running the migration on the database (``--execute-migration`` )
2. Offers the option to inspect the actual SQL statements that will be run as part of the current migration (``--dry-run`` )
3. Sometimes, when a node or the migration tool crashes while running migrations, Liquibase will not release the lock. 3. Sometimes, when a node or the migration tool crashes while running migrations, Liquibase will not release the lock.
This can happen during some long database operation, or when an admin kills the process. This can happen during some long database operation, or when an admin kills the process.
( This cannot happen during normal operation of a node. Only during the migration process.) ( This cannot happen during normal operation of a node. Only during the migration process.)
See: <http://www.liquibase.org/documentation/databasechangeloglock_table.html>. See: <http://www.liquibase.org/documentation/databasechangeloglock_table.html>.
The tool provides a "release-lock" command that would forcibly unlock the db migration. The tool provides a "release-lock" command that would forcibly unlock the db migration.
4. When a CorDapp that does not is ready to be deployed on a Corda Enterprise production node, 4. When a CorDapp that does not is ready to be deployed on a Corda Enterprise production node,
using this tool, the CorDapp can be "lifted" (`--create-migration-sql-for-cordapp`). using this tool, the CorDapp can be "lifted" (``--create-migration-sql-for-cordapp``).
The reason this is needed is because those CorDapps don't handle this enterprise level concern. The reason this is needed is because those CorDapps don't handle this enterprise level concern.
See below for details. See below for details.
@ -195,79 +179,69 @@ It has the following command line options:
--base-directory(*) The node directory --base-directory(*) The node directory
--config-file The name of the config file, by default 'node.conf' --config-file The name of the config file, by default 'node.conf'
--doorman-jar-path For internal use only --doorman-jar-path For internal use only
--create-migration-sql-for-cordapp Create migration files for a CorDapp. You can specify the fully qualified name of the `MappedSchema` class. If not specified it will generate the migration for all schemas that don't have migrations. The output directory is the base-directory, where a `migration` folder is created. --create-migration-sql-for-cordapp Create migration files for a CorDapp. You can specify the fully qualified
--dry-run Output the database migration to the specified output file. The output directory is the base-directory. You can specify a file name or 'CONSOLE' if you want to send the output to the console. name of the ``MappedSchema`` class. If not specified it will generate the migration
--execute-migration This option will run the db migration on the configured database. This is the only command that will actually write to the database. for all schemas that don't have migrations. The output directory is the
base-directory, where a ``migration`` folder is created.
--dry-run Output the database migration to the specified output file. The output directory
is the base-directory. You can specify a file name or 'CONSOLE' if you want to send the output to the console.
--execute-migration This option will run the db migration on the configured database. This is the
only command that will actually write to the database.
--release-lock Releases whatever locks are on the database change log table, in case shutdown failed. --release-lock Releases whatever locks are on the database change log table, in case shutdown failed.
==================================== ======================================================================= ==================================== =======================================================================
For example: For example::
``java -jar db-migration-tool-R3.CORDA-3.0-DP3-RC01.jar --base-directory /path/to/node --execute-migration`` java -jar db-migration-tool-R3.CORDA-3.0-DP3-RC01.jar --base-directory /path/to/node --execute-migration
.. note:: When running the migration tool, prefer using absolute paths when specifying the "base-directory". .. note:: When running the migration tool, prefer using absolute paths when specifying the "base-directory".
How-To: Examples
======= --------
Node Administrator installing Corda for the first time The first time you set up your node, you will want to create the necessary database tables. Run the normal installation
------------------------------------------------------ steps. Using the db migration tool, attempt a dry-run to inspect the output SQL::
- run normal installations steps
- Using the db migration tool attempt a dry-run to inspect the output sql
``--base-directory /path/to/node --dry-run``
- The output sql from the above command can be executed directly on the database or this command can be run:
``--base-directory /path/to/node --execute-migration``
- At this point the corda node can be started successfully
--base-directory /path/to/node --dry-run
Node Administrator installing new version of Corda The output sql from the above command can be executed directly on the database or this command can be run::
--------------------------------------------------
- deploy new version of Corda
- attempt to start node. If there are db migrations in the new release, then the node will exit and will show how many changes are needed
- The same steps as above can be executed: dry-run and/or execute-migration
--base-directory /path/to/node --execute-migration
Node Administrator installing new CorDapp At this point the node can be started successfully.
-----------------------------------------
- deploy new CorDapp to the node
- same steps as above
When upgrading, deploy the new version of Corda. Attempt to start the node. If there are database migrations in the new
release, then the node will exit and will show how many changes are needed. You can then use the same commands
as above, either to do a dry run or execute the migrations.
Node Administrator installing new version of CorDapp The same is true when installing or upgrading a CorDapp. Do a dry run, check the SQL, then trigger a migration.
----------------------------------------------------
- replace old CorDapp with new version of CorDapp
- same steps as above
Node administrator installing a CorDapp targeted at the open source node
------------------------------------------------------------------------
Node Administrator installing a CorDapp developed by the OS community The open source Corda codebase does not have support for Liquibase, so CorDapps contributed by the OS community
--------------------------------------------------------------------- will not have this concern addressed by their original developers.
The Corda (OS) project does not have support for database migrations as this is an Enterprise feature.
So CorDapps contributed by the OS community will not have this concern addressed by their original developers To help Corda Enterprise users, we offer support in the migration tool for "lifting" a CorDapp to support Liquibase.
To help Corda Enterprise users, we offer support in the migration tool for "Lifting" a Cordapp
These are the steps: These are the steps:
- deploy the CorDapp on your node (copy the jar in the `cordapps` folder)
- find out the name of the MappedSchema containing the new contract state entities. 1. Deploy the CorDapp on your node (copy the jar into the ``cordapps`` folder)
- call the migration tool: ``--base-directory /path/to/node --create-migration-sql-for-cordapp com.example.MyMappedSchema`` 2. Find out the name of the ``MappedSchema`` containing the new contract state entities.
- this will generate a file called ``my-mapped-schema.changelog-master.sql`` in a folder called ``migration`` in the `base-directory` 3. Call the migration tool: ``--base-directory /path/to/node --create-migration-sql-for-cordapp com.example.MyMappedSchema``
- in case you don't specify the actual MappedSchema name, the tool will generate one sql file for each schema defined in the CorDapp This will generate a file called ``my-mapped-schema.changelog-master.sql`` in a folder called ``migration`` in the ``base-directory``.
- inspect the file(s) to make sure it is correct. This is a standard sql file with some liquibase metadata as comments. In case you don't specify the actual ``MappedSchema`` name, the tool will generate one SQL file for each schema defined in the CorDapp
- create a jar with the `migration` folder (by convention it could be named: originalCorDappName-migration.jar), and deploy this jar together with the CorDapp 4. Inspect the file(s) to make sure it is correct. This is a standard SQL file with some Liquibase metadata as comments.
- To make sure that the new migration will be used, the migration tool can be run in a `dry-run` mode and inspect the output file 5. Create a jar with the ``migration`` folder (by convention it could be named: ``originalCorDappName-migration.jar``),
and deploy this jar together with the CorDapp.
6. To make sure that the new migration will be used, do a dry run with the migration tool and inspect the output file.
Node Administrator deploying a new version of a CorDapp developed by the OS community Node administrator deploying a new version of a CorDapp developed by the OS community
-------------------------------------------------------------------------------- -------------------------------------------------------------------------------------
This is a slightly more complicated scenario.
The Node Administrator will have to understand the changes (if any) that happened in the latest version. This is a slightly more complicated scenario. You will have to understand the changes (if any) that happened in the latest version. If there are changes that require schema adjustments, you will have to write and test those migrations. The way to do that is to create a new changeset in the existing changelog for that CorDapp (generated as above). See `Liquibase Sql Format <http://www.liquibase.org/documentation/sql_format.html>`_
If there are changes that require schema changes, the Node Administrator will have to write and test those.
The way to do that is to create a new changeset in the existing changelog for that CorDapp ( generated as above)
See `Liquibase Sql Format <http://www.liquibase.org/documentation/sql_format.html>`_
CorDapp developer developing a new CorDapp CorDapp developer developing a new CorDapp
@ -276,30 +250,50 @@ CorDapp developer developing a new CorDapp
CorDapp developers who decide to store contract state in custom entities can create migration files for the ``MappedSchema`` they define. CorDapp developers who decide to store contract state in custom entities can create migration files for the ``MappedSchema`` they define.
There are 2 ways of associating a migration file with a schema: There are 2 ways of associating a migration file with a schema:
1) By overriding ``val migrationResource: String`` and pointing to a file that needs to be in the classpath
2) By putting a file on the classpath in a `migration` package whose name is the hyphenated name of the schema. (All supported file extensions will be appended to the name)
CorDapp developers can use any of the supported formats (xml, sql, json, yaml) for the migration files they create. 1) By overriding ``val migrationResource: String`` and pointing to a file that needs to be in the classpath.
2) By putting a file on the classpath in a ``migration`` package whose name is the hyphenated name of the schema (all supported file extensions will be appended to the name).
In case CorDapp developers distribute their CorDapps with migration files, these will be automatically applied when the CorDapp is deployed on a Corda Enterprise node. CorDapp developers can use any of the supported formats (XML, SQL, JSON, YAML) for the migration files they create. In
If they are deployed on a standard ("Open source") Corda node, then the migration will be ignored, and the database tables will be generated by Hibernate. case CorDapp developers distribute their CorDapps with migration files, these will be automatically applied when the
CorDapp is deployed on a Corda Enterprise node. If they are deployed on an open source Corda node, then the
migration will be ignored, and the database tables will be generated by Hibernate. In case CorDapp developers don't
distribute a CorDapp with migration files, then the organisation that decides to deploy this CordApp on a Corda
Enterprise node has the responsibility to manage the database.
In case CorDapp developers don't distribute a CorDapp with migration files, then the organisation that decides to deploy this CordApp on a Corda Enterprise node has the responsibility to manage the database. During development or demo on the default H2 database, then the CorDapp will just work when deployed even if there are
no migration scripts, by relying on the primitive migration tool provided by Hibernate, which is not intended for
production.
During development or demo on the default H2 database, then the CorDapp will just work when deployed even if there are no migration scripts, by relying on the primitive migration tool provided by hibernate, which is not intended for production. .. warning:: A very important aspect to be remembered is that the CorDapp will have to work on all supported Corda databases.
It is the responsibility of the developers to test the migration scripts and the CorDapp against all the databases.
In the future we will provide aditional tooling to assist with this aspect.
A very important aspect to be remembered is that the CorDapp will have to work on all supported Corda databases. When developing a new version of an existing CorDapp, depending on the changes to the ``PersistentEntities``, a
It is the responsibility of the developers to test the migration scripts and the CorDapp against all the databases. changelog will have to be created as per the Liquibase documentation and the example above.
In the future we will provide aditional tooling to assist with this aspect.
CorDapp developer developing a new version of an exiting CorDapp
----------------------------------------------------------------
Depending on the changes to the ``PersistentEntities`` a changelog will have to be created as per the liquibase documentation and the example above.
Troubleshooting Troubleshooting
--------------- ---------------
When seeing behavour similar to `this <https://stackoverflow.com/questions/15528795/liquibase-lock-reasons>`_
You can run ``--base-directory /path/to/node --release-lock`` When seeing problems acquiring the lock, with output like this::
Waiting for changelog lock....
Waiting for changelog lock....
Waiting for changelog lock....
Waiting for changelog lock....
Waiting for changelog lock....
Waiting for changelog lock....
Waiting for changelog lock....
Liquibase Update Failed: Could not acquire change log lock. Currently locked by SomeComputer (192.168.15.X) since 2013-03-20 13:39
SEVERE 2013-03-20 16:59:liquibase: Could not acquire change log lock. Currently locked by SomeComputer (192.168.15.X) since 2013-03-20 13:39
liquibase.exception.LockException: Could not acquire change log lock. Currently locked by SomeComputer (192.168.15.X) since 2013-03-20 13:39
at liquibase.lockservice.LockService.waitForLock(LockService.java:81)
at liquibase.Liquibase.tag(Liquibase.java:507)
at liquibase.integration.commandline.Main.doMigration(Main.java:643)
at liquibase.integration.commandline.Main.main(Main.java:116)
then the advice at `this StackOverflow question <https://stackoverflow.com/questions/15528795/liquibase-lock-reasons>`_
may be useful. You can run ``--base-directory /path/to/node --release-lock`` to force Liquibase to give up the lock.

View File

@ -24,21 +24,17 @@ Network Map Service
Supporting the messaging layer is a network map service, which is responsible for tracking public nodes on the network. Supporting the messaging layer is a network map service, which is responsible for tracking public nodes on the network.
Nodes have an internal component, the network map cache, which contains a copy of the network map (which is just a Nodes have an internal component, the network map cache, which contains a copy of the network map (which is backed up in the database
document). When a node starts up its cache fetches a copy of the full network map, and requests to be notified of to persist that information across the restarts in case the network map server is down). When a node starts up its cache
changes. The node then registers itself with the network map service, and the service notifies subscribers that a new fetches a copy of the full network map (from the server or from filesystem for development mode). After that it polls on
node has joined the network. Nodes do not automatically deregister themselves, so (for example) nodes going offline regular time interval for network map and applies any related changes locally.
briefly for maintenance are retained in the network map, and messages for them will be queued, minimising disruption. Nodes do not automatically deregister themselves, so (for example) nodes going offline briefly for maintenance are retained
in the network map, and messages for them will be queued, minimising disruption.
Nodes submit signed changes to the map service, which then forwards update notifications on to nodes which have Additionally, on every restart and on daily basis nodes submit signed `NodeInfo`s to the map service. When network map gets
requested to be notified of changes. signed, these changes are distributed as new network data. `NodeInfo` republishing is treated as a heartbeat from the node,
based on that network map service is able to figure out which nodes can be considered as stale and removed from the network
The network map currently supports: map document after `eventHorizon` time.
* Looking up nodes by service
* Looking up node for a party
* Suggesting a node providing a specific service, based on suitability for a contract and parties, for example suggesting
an appropriate interest rates oracle for an interest rate swap contract. Currently no recommendation logic is in place.
Message queues Message queues
-------------- --------------

View File

@ -191,3 +191,19 @@ Then node can be started as usual. At some point in time, nodes will gradually j
information on business relations with operators. Private networks are not separate networks, nodes are still part of bigger information on business relations with operators. Private networks are not separate networks, nodes are still part of bigger
compatibility zone, only hidden. We reuse all the infrastructure of the compatibility zone like notaries, permissioning service, compatibility zone, only hidden. We reuse all the infrastructure of the compatibility zone like notaries, permissioning service,
so the interoperability between nodes is kept. so the interoperability between nodes is kept.
Cleaning the network map cache
------------------------------
Sometimes it may happen that the node ends up with an inconsistent view of the network. This can occur due to changes in deployment
leading to stale data in the database, different data distribution time and mistakes in configuration. For these unlikely
events both RPC method and command line option for clearing local network map cache database exist. To use them
you either need to run from the command line:
.. code-block:: shell
java -jar corda.jar --clear-network-map-cache
or call RPC method `clearNetworkMapCache` (it can be invoked through the node's shell as `run clearNetworkMapCache`, for more information on
how to log into node's shell see :doc:`shell`). As we are testing and hardening the implementation this step shouldn't be required.
After cleaning the cache, network map data is restored on the next poll from the server or filesystem.

View File

@ -65,6 +65,7 @@ class NodeArgsParser : AbstractArgsParser<CmdLineOptions>() {
private val justGenerateNodeInfoArg = optionParser.accepts("just-generate-node-info", private val justGenerateNodeInfoArg = optionParser.accepts("just-generate-node-info",
"Perform the node start-up task necessary to generate its nodeInfo, save it to disk, then quit") "Perform the node start-up task necessary to generate its nodeInfo, save it to disk, then quit")
private val bootstrapRaftClusterArg = optionParser.accepts("bootstrap-raft-cluster", "Bootstraps Raft cluster. The node forms a single node cluster (ignoring otherwise configured peer addresses), acting as a seed for other nodes to join the cluster.") private val bootstrapRaftClusterArg = optionParser.accepts("bootstrap-raft-cluster", "Bootstraps Raft cluster. The node forms a single node cluster (ignoring otherwise configured peer addresses), acting as a seed for other nodes to join the cluster.")
private val clearNetworkMapCache = optionParser.accepts("clear-network-map-cache", "Clears local copy of network map, on node startup it will be restored from server or file system.")
override fun doParse(optionSet: OptionSet): CmdLineOptions { override fun doParse(optionSet: OptionSet): CmdLineOptions {
require(!optionSet.has(baseDirectoryArg) || !optionSet.has(configFileArg)) { require(!optionSet.has(baseDirectoryArg) || !optionSet.has(configFileArg)) {
@ -89,6 +90,7 @@ class NodeArgsParser : AbstractArgsParser<CmdLineOptions>() {
val networkRootTrustStorePassword = optionSet.valueOf(networkRootTrustStorePasswordArg) val networkRootTrustStorePassword = optionSet.valueOf(networkRootTrustStorePasswordArg)
val unknownConfigKeysPolicy = optionSet.valueOf(unknownConfigKeysPolicy) val unknownConfigKeysPolicy = optionSet.valueOf(unknownConfigKeysPolicy)
val devMode = optionSet.has(devModeArg) val devMode = optionSet.has(devModeArg)
val clearNetworkMapCache = optionSet.has(clearNetworkMapCache)
val registrationConfig = if (isRegistration) { val registrationConfig = if (isRegistration) {
requireNotNull(networkRootTrustStorePassword) { "Network root trust store password must be provided in registration mode using --network-root-truststore-password." } requireNotNull(networkRootTrustStorePassword) { "Network root trust store password must be provided in registration mode using --network-root-truststore-password." }
@ -109,7 +111,8 @@ class NodeArgsParser : AbstractArgsParser<CmdLineOptions>() {
justGenerateNodeInfo, justGenerateNodeInfo,
bootstrapRaftCluster, bootstrapRaftCluster,
unknownConfigKeysPolicy, unknownConfigKeysPolicy,
devMode) devMode,
clearNetworkMapCache)
} }
} }
@ -126,7 +129,8 @@ data class CmdLineOptions(val baseDirectory: Path,
val justGenerateNodeInfo: Boolean, val justGenerateNodeInfo: Boolean,
val bootstrapRaftCluster: Boolean, val bootstrapRaftCluster: Boolean,
val unknownConfigKeysPolicy: UnknownConfigKeysPolicy, val unknownConfigKeysPolicy: UnknownConfigKeysPolicy,
val devMode: Boolean) { val devMode: Boolean,
val clearNetworkMapCache: Boolean) {
fun loadConfig(): Pair<Config, Try<NodeConfiguration>> { fun loadConfig(): Pair<Config, Try<NodeConfiguration>> {
val rawConfig = ConfigHelper.loadConfig( val rawConfig = ConfigHelper.loadConfig(
baseDirectory, baseDirectory,

View File

@ -297,6 +297,15 @@ abstract class AbstractNode(val configuration: NodeConfiguration,
} }
} }
fun clearNetworkMapCache() {
Node.printBasicNodeInfo("Clearing network map cache entries")
log.info("Starting clearing of network map cache entries...")
configureDatabase(configuration.dataSourceProperties, configuration.database, { null }, { null }).use {
val networkMapCache = PersistentNetworkMapCache(it, emptyList())
networkMapCache.clearNetworkMapCache()
}
}
open fun start(): StartedNode<AbstractNode> { open fun start(): StartedNode<AbstractNode> {
check(started == null) { "Node has already been started" } check(started == null) { "Node has already been started" }
if (configuration.devMode) { if (configuration.devMode) {

View File

@ -71,7 +71,6 @@ open class NodeStartup(val args: Array<String>) {
return false return false
} }
val cmdlineOptions = NodeArgsParser().parseOrExit(*args) val cmdlineOptions = NodeArgsParser().parseOrExit(*args)
// We do the single node check before we initialise logging so that in case of a double-node start it // We do the single node check before we initialise logging so that in case of a double-node start it
// doesn't mess with the running node's logs. // doesn't mess with the running node's logs.
enforceSingleNodeIsRunning(cmdlineOptions.baseDirectory) enforceSingleNodeIsRunning(cmdlineOptions.baseDirectory)
@ -169,6 +168,10 @@ open class NodeStartup(val args: Array<String>) {
protected open fun startNode(conf: NodeConfiguration, versionInfo: VersionInfo, startTime: Long, cmdlineOptions: CmdLineOptions) { protected open fun startNode(conf: NodeConfiguration, versionInfo: VersionInfo, startTime: Long, cmdlineOptions: CmdLineOptions) {
val node = createNode(conf, versionInfo) val node = createNode(conf, versionInfo)
if (cmdlineOptions.clearNetworkMapCache) {
node.clearNetworkMapCache()
return
}
if (cmdlineOptions.justGenerateNodeInfo) { if (cmdlineOptions.justGenerateNodeInfo) {
// Perform the minimum required start-up logic to be able to write a nodeInfo to disk // Perform the minimum required start-up logic to be able to write a nodeInfo to disk
node.generateAndSaveNodeInfo() node.generateAndSaveNodeInfo()

View File

@ -20,6 +20,7 @@ import net.corda.node.VersionInfo
import net.corda.node.services.statemachine.FlowMessagingImpl import net.corda.node.services.statemachine.FlowMessagingImpl
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2PMessagingHeaders import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2PMessagingHeaders
import org.apache.activemq.artemis.api.core.ActiveMQDuplicateIdException import org.apache.activemq.artemis.api.core.ActiveMQDuplicateIdException
import org.apache.activemq.artemis.api.core.ActiveMQObjectClosedException
import org.apache.activemq.artemis.api.core.SimpleString import org.apache.activemq.artemis.api.core.SimpleString
import org.apache.activemq.artemis.api.core.client.ClientMessage import org.apache.activemq.artemis.api.core.client.ClientMessage
import org.apache.activemq.artemis.api.core.client.ClientProducer import org.apache.activemq.artemis.api.core.client.ClientProducer
@ -128,6 +129,12 @@ class MessagingExecutor(
break@eventLoop break@eventLoop
} }
} }
} catch (exception: ActiveMQObjectClosedException) {
log.error("Messaging client connection closed", exception)
if (job is Job.Send) {
job.sentFuture.setException(exception)
}
System.exit(1)
} catch (exception: Throwable) { } catch (exception: Throwable) {
log.error("Exception while handling job $job, disregarding", exception) log.error("Exception while handling job $job, disregarding", exception)
if (job is Job.Send) { if (job is Job.Send) {

View File

@ -29,6 +29,7 @@ import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.serialization.serialize import net.corda.core.serialization.serialize
import net.corda.core.utilities.NetworkHostAndPort import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.contextLogger import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.debug
import net.corda.core.utilities.loggerFor import net.corda.core.utilities.loggerFor
import net.corda.node.internal.schemas.NodeInfoSchemaV1 import net.corda.node.internal.schemas.NodeInfoSchemaV1
import net.corda.node.services.api.NetworkMapCacheBaseInternal import net.corda.node.services.api.NetworkMapCacheBaseInternal
@ -351,9 +352,11 @@ open class PersistentNetworkMapCache(
} }
override fun clearNetworkMapCache() { override fun clearNetworkMapCache() {
logger.info("Clearing Network Map Cache entries")
invalidateCaches() invalidateCaches()
database.transaction { database.transaction {
val result = getAllInfos(session) val result = getAllInfos(session)
logger.debug { "Number of node infos to be cleared: ${result.size}" }
for (nodeInfo in result) session.remove(nodeInfo) for (nodeInfo in result) session.remove(nodeInfo)
} }
} }

View File

@ -22,7 +22,7 @@ import net.corda.core.serialization.SerializedBytes
import net.corda.core.serialization.serialize import net.corda.core.serialization.serialize
import net.corda.core.utilities.NonEmptySet import net.corda.core.utilities.NonEmptySet
import net.corda.core.utilities.UntrustworthyData import net.corda.core.utilities.UntrustworthyData
import net.corda.core.utilities.checkPayloadIs import net.corda.core.internal.checkPayloadIs
class FlowSessionImpl( class FlowSessionImpl(
override val counterparty: Party, override val counterparty: Party,

View File

@ -364,6 +364,10 @@ class SingleThreadedStateMachineManager(
null null
} }
externalEventMutex.withLock { externalEventMutex.withLock {
// Remove any sessions the old flow has.
for (sessionId in getFlowSessionIds(currentState.checkpoint)) {
sessionToFlow.remove(sessionId)
}
if (flow != null) addAndStartFlow(flowId, flow) if (flow != null) addAndStartFlow(flowId, flow)
// Deliver all the external events from the old flow instance. // Deliver all the external events from the old flow instance.
val unprocessedExternalEvents = mutableListOf<ExternalEvent>() val unprocessedExternalEvents = mutableListOf<ExternalEvent>()

View File

@ -54,7 +54,8 @@ class NodeArgsParserTest {
justGenerateNodeInfo = false, justGenerateNodeInfo = false,
bootstrapRaftCluster = false, bootstrapRaftCluster = false,
unknownConfigKeysPolicy = UnknownConfigKeysPolicy.FAIL, unknownConfigKeysPolicy = UnknownConfigKeysPolicy.FAIL,
devMode = false)) devMode = false,
clearNetworkMapCache = false))
} }
@Test @Test
@ -168,6 +169,12 @@ class NodeArgsParserTest {
assertThat(cmdLineOptions.justGenerateNodeInfo).isTrue() assertThat(cmdLineOptions.justGenerateNodeInfo).isTrue()
} }
@Test
fun `clear network map cache`() {
val cmdLineOptions = parser.parse("--clear-network-map-cache")
assertThat(cmdLineOptions.clearNetworkMapCache).isTrue()
}
@Test @Test
fun `bootstrap raft cluster`() { fun `bootstrap raft cluster`() {
val cmdLineOptions = parser.parse("--bootstrap-raft-cluster") val cmdLineOptions = parser.parse("--bootstrap-raft-cluster")

View File

@ -17,15 +17,21 @@ import net.corda.core.internal.delete
import net.corda.core.internal.list import net.corda.core.internal.list
import net.corda.core.internal.readObject import net.corda.core.internal.readObject
import net.corda.core.node.NodeInfo import net.corda.core.node.NodeInfo
import net.corda.core.serialization.serialize
import net.corda.core.utilities.NetworkHostAndPort import net.corda.core.utilities.NetworkHostAndPort
import net.corda.node.VersionInfo import net.corda.node.VersionInfo
import net.corda.node.internal.schemas.NodeInfoSchemaV1
import net.corda.node.services.config.NodeConfiguration import net.corda.node.services.config.NodeConfiguration
import net.corda.nodeapi.internal.SignedNodeInfo import net.corda.nodeapi.internal.SignedNodeInfo
import net.corda.nodeapi.internal.network.NodeInfoFilesCopier.Companion.NODE_INFO_FILE_NAME_PREFIX import net.corda.nodeapi.internal.network.NodeInfoFilesCopier.Companion.NODE_INFO_FILE_NAME_PREFIX
import net.corda.nodeapi.internal.persistence.CordaPersistence
import net.corda.nodeapi.internal.persistence.DatabaseConfig import net.corda.nodeapi.internal.persistence.DatabaseConfig
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.SerializationEnvironmentRule import net.corda.testing.core.SerializationEnvironmentRule
import net.corda.testing.internal.createNodeInfoAndSigned
import net.corda.testing.internal.rigorousMock import net.corda.testing.internal.rigorousMock
import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties import net.corda.testing.node.MockServices.Companion.makeTestDataSourceProperties
import org.assertj.core.api.Assertions.assertThat
import org.junit.Rule import org.junit.Rule
import org.junit.Test import org.junit.Test
import org.junit.rules.TemporaryFolder import org.junit.rules.TemporaryFolder
@ -62,12 +68,55 @@ class NodeTest {
@Test @Test
fun `generateAndSaveNodeInfo works`() { fun `generateAndSaveNodeInfo works`() {
val nodeAddress = NetworkHostAndPort("0.1.2.3", 456) val configuration = createConfig()
val nodeName = CordaX500Name("Manx Blockchain Corp", "Douglas", "IM")
val info = VersionInfo(789, "3.0", "SNAPSHOT", "R3") val info = VersionInfo(789, "3.0", "SNAPSHOT", "R3")
configureDatabase(configuration.dataSourceProperties, configuration.database, { null }, { null }).use { database ->
val node = Node(configuration, info, initialiseSerialization = false)
assertEquals(node.generateNodeInfo(), node.generateNodeInfo()) // Node info doesn't change (including the serial)
}
}
@Test
fun `clear network map cache works`() {
val configuration = createConfig()
val (nodeInfo, _) = createNodeInfoAndSigned(ALICE_NAME)
configureDatabase(configuration.dataSourceProperties, configuration.database, { null }, { null }).use {
it.transaction {
val persistentNodeInfo = NodeInfoSchemaV1.PersistentNodeInfo(
id = 0,
hash = nodeInfo.serialize().hash.toString(),
addresses = nodeInfo.addresses.map { NodeInfoSchemaV1.DBHostAndPort.fromHostAndPort(it) },
legalIdentitiesAndCerts = nodeInfo.legalIdentitiesAndCerts.mapIndexed { idx, elem ->
NodeInfoSchemaV1.DBPartyAndCertificate(elem, isMain = idx == 0)
},
platformVersion = nodeInfo.platformVersion,
serial = nodeInfo.serial
)
// Save some NodeInfo
session.save(persistentNodeInfo)
}
val versionInfo = VersionInfo(10, "3.0", "SNAPSHOT", "R3")
val node = Node(configuration, versionInfo, initialiseSerialization = false)
assertThat(getAllInfos(it)).isNotEmpty
node.clearNetworkMapCache()
assertThat(getAllInfos(it)).isEmpty()
}
}
private fun getAllInfos(database: CordaPersistence): List<NodeInfoSchemaV1.PersistentNodeInfo> {
return database.transaction {
val criteria = session.criteriaBuilder.createQuery(NodeInfoSchemaV1.PersistentNodeInfo::class.java)
criteria.select(criteria.from(NodeInfoSchemaV1.PersistentNodeInfo::class.java))
session.createQuery(criteria).resultList
}
}
private fun createConfig(): NodeConfiguration {
val dataSourceProperties = makeTestDataSourceProperties() val dataSourceProperties = makeTestDataSourceProperties()
val databaseConfig = DatabaseConfig() val databaseConfig = DatabaseConfig()
val configuration = rigorousMock<AbstractNodeConfiguration>().also { val nodeAddress = NetworkHostAndPort("0.1.2.3", 456)
val nodeName = CordaX500Name("Manx Blockchain Corp", "Douglas", "IM")
return rigorousMock<AbstractNodeConfiguration>().also {
doReturn(null).whenever(it).relay doReturn(null).whenever(it).relay
doReturn(nodeAddress).whenever(it).p2pAddress doReturn(nodeAddress).whenever(it).p2pAddress
doReturn(nodeName).whenever(it).myLegalName doReturn(nodeName).whenever(it).myLegalName
@ -79,9 +128,5 @@ class NodeTest {
doReturn("tsp").whenever(it).trustStorePassword doReturn("tsp").whenever(it).trustStorePassword
doReturn("ksp").whenever(it).keyStorePassword doReturn("ksp").whenever(it).keyStorePassword
} }
configureDatabase(dataSourceProperties, databaseConfig, { null }, { null }).use { _ ->
val node = Node(configuration, info, initialiseSerialization = false)
assertEquals(node.generateNodeInfo(), node.generateNodeInfo()) // Node info doesn't change (including the serial)
}
} }
} }

View File

@ -0,0 +1,61 @@
package net.corda.node.services.messaging
import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.Timer
import com.nhaarman.mockito_kotlin.any
import com.nhaarman.mockito_kotlin.mock
import com.nhaarman.mockito_kotlin.whenever
import net.corda.core.utilities.OpaqueBytes
import net.corda.node.VersionInfo
import net.corda.node.services.statemachine.DeduplicationId
import net.corda.testing.node.internal.InMemoryMessage
import org.apache.activemq.artemis.api.core.ActiveMQObjectClosedException
import org.apache.activemq.artemis.api.core.client.ClientProducer
import org.apache.activemq.artemis.api.core.client.ClientSession
import org.junit.After
import org.junit.Rule
import org.junit.Test
import org.junit.contrib.java.lang.system.ExpectedSystemExit
import kotlin.concurrent.thread
class MessagingExecutorTest {
@Rule
@JvmField
val exit: ExpectedSystemExit = ExpectedSystemExit.none()
private lateinit var messagingExecutor: MessagingExecutor
@After
fun after() {
messagingExecutor.close()
}
@Test
fun `System exit node if messaging is closed`() {
exit.expectSystemExitWithStatus(1)
val session = mock<ClientSession>()
whenever(session.createMessage(any())).thenReturn(mock())
val producer = mock<ClientProducer>()
whenever(producer.send(any(), any(), any())).thenThrow(ActiveMQObjectClosedException())
val resolver = mock<AddressToArtemisQueueResolver>()
whenever(resolver.resolveTargetToArtemisQueue(any())).thenReturn("address")
val metricRegistry = mock<MetricRegistry>()
val sendLatencyMetric = mock<Timer>()
whenever(metricRegistry.timer(any())).thenReturn(sendLatencyMetric)
whenever(sendLatencyMetric.time()).thenReturn(mock())
whenever(metricRegistry.histogram(any())).thenReturn(mock())
messagingExecutor = MessagingExecutor(session, producer, VersionInfo.UNKNOWN, resolver, metricRegistry, "ourSenderUUID", 10, "legalName")
messagingExecutor.start()
thread {
messagingExecutor.send(InMemoryMessage("topic", OpaqueBytes(ByteArray(10)), DeduplicationId("1")), mock())
}.join()
}
}

View File

@ -46,7 +46,7 @@ public class FlowShellCommand extends InteractiveShellCommand {
@Usage("The class name of the flow to run, or an unambiguous substring") @Argument String name, @Usage("The class name of the flow to run, or an unambiguous substring") @Argument String name,
@Usage("The data to pass as input") @Argument(unquote = false) List<String> input @Usage("The data to pass as input") @Argument(unquote = false) List<String> input
) { ) {
logger.info("Executing command \"flow start {} {}\",", name, input.stream().collect(joining(" "))); logger.info("Executing command \"flow start {} {}\",", name, (input != null) ? input.stream().collect(joining(" ")) : "<no arguments>");
startFlow(name, input, out, ops(), ansiProgressRenderer(), objectMapper()); startFlow(name, input, out, ops(), ansiProgressRenderer(), objectMapper());
} }

View File

@ -47,7 +47,7 @@ public class RunShellCommand extends InteractiveShellCommand {
) )
@Usage("runs a method from the CordaRPCOps interface on the node.") @Usage("runs a method from the CordaRPCOps interface on the node.")
public Object main(InvocationContext<Map> context, @Usage("The command to run") @Argument(unquote = false) List<String> command) { public Object main(InvocationContext<Map> context, @Usage("The command to run") @Argument(unquote = false) List<String> command) {
logger.info("Executing command \"run {}\",", command.stream().collect(joining(" "))); logger.info("Executing command \"run {}\",", (command != null) ? command.stream().collect(joining(" ")) : "<no arguments>");
StringToMethodCallParser<CordaRPCOps> parser = new StringToMethodCallParser<>(CordaRPCOps.class, objectMapper()); StringToMethodCallParser<CordaRPCOps> parser = new StringToMethodCallParser<>(CordaRPCOps.class, objectMapper());
if (command == null) { if (command == null) {

View File

@ -31,7 +31,7 @@ public class StartShellCommand extends InteractiveShellCommand {
public void main(@Usage("The class name of the flow to run, or an unambiguous substring") @Argument String name, public void main(@Usage("The class name of the flow to run, or an unambiguous substring") @Argument String name,
@Usage("The data to pass as input") @Argument(unquote = false) List<String> input) { @Usage("The data to pass as input") @Argument(unquote = false) List<String> input) {
logger.info("Executing command \"start {} {}\",", name, input.stream().collect(joining(" "))); logger.info("Executing command \"start {} {}\",", name, (input != null) ? input.stream().collect(joining(" ")) : "<no arguments>");
ANSIProgressRenderer ansiProgressRenderer = ansiProgressRenderer(); ANSIProgressRenderer ansiProgressRenderer = ansiProgressRenderer();
FlowShellCommand.startFlow(name, input, out, ops(), ansiProgressRenderer != null ? ansiProgressRenderer : new CRaSHANSIProgressRenderer(out), objectMapper()); FlowShellCommand.startFlow(name, input, out, ops(), ansiProgressRenderer != null ? ansiProgressRenderer : new CRaSHANSIProgressRenderer(out), objectMapper());
} }

View File

@ -291,6 +291,7 @@ object InteractiveShell {
while (!Thread.currentThread().isInterrupted) { while (!Thread.currentThread().isInterrupted) {
try { try {
latch.await() latch.await()
break
} catch (e: InterruptedException) { } catch (e: InterruptedException) {
try { try {
rpcOps.killFlow(stateObservable.id) rpcOps.killFlow(stateObservable.id)