mirror of
https://github.com/corda/corda.git
synced 2024-12-18 20:47:57 +00:00
Merge remote-tracking branch 'origin/release/os/4.6' into christians/ENT-5273-update-fb-from-os.4.6
This commit is contained in:
commit
81d68abe7e
@ -48,13 +48,18 @@ pipeline {
|
|||||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish JDK 11 Release to Artifactory".replaceAll("/", "::")
|
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish JDK 11 Release to Artifactory".replaceAll("/", "::")
|
||||||
CORDA_USE_CACHE = "corda-remotes"
|
|
||||||
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
}
|
}
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
|
/*
|
||||||
|
* Temporarily disable Sonatype checks for regression builds
|
||||||
|
*/
|
||||||
stage('Sonatype Check') {
|
stage('Sonatype Check') {
|
||||||
|
when {
|
||||||
|
expression { isReleaseTag }
|
||||||
|
}
|
||||||
steps {
|
steps {
|
||||||
sh "./gradlew --no-daemon clean jar"
|
sh "./gradlew --no-daemon clean jar"
|
||||||
script {
|
script {
|
||||||
@ -83,7 +88,6 @@ pipeline {
|
|||||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\" " +
|
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\" " +
|
||||||
"-Ddocker.buildbase.tag=11latest " +
|
"-Ddocker.buildbase.tag=11latest " +
|
||||||
"-Ddocker.container.env.parameter.CORDA_USE_CACHE=\"${CORDA_USE_CACHE}\" " +
|
|
||||||
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||||
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||||
"-Ddocker.dockerfile=DockerfileJDK11Azul" +
|
"-Ddocker.dockerfile=DockerfileJDK11Azul" +
|
||||||
|
2
.ci/dev/nightly-regression/Jenkinsfile
vendored
2
.ci/dev/nightly-regression/Jenkinsfile
vendored
@ -20,7 +20,6 @@ pipeline {
|
|||||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
CORDA_USE_CACHE = "corda-remotes"
|
|
||||||
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
}
|
}
|
||||||
@ -39,7 +38,6 @@ pipeline {
|
|||||||
"-Dkubenetize=true " +
|
"-Dkubenetize=true " +
|
||||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
"-Ddocker.container.env.parameter.CORDA_USE_CACHE=\"${CORDA_USE_CACHE}\" " +
|
|
||||||
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||||
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
15
.ci/dev/publish-api-docs/Jenkinsfile
vendored
15
.ci/dev/publish-api-docs/Jenkinsfile
vendored
@ -1,5 +1,15 @@
|
|||||||
@Library('corda-shared-build-pipeline-steps')
|
#!groovy
|
||||||
|
/**
|
||||||
|
* Jenkins pipeline to build Corda OS KDoc & Javadoc archive
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kill already started job.
|
||||||
|
* Assume new commit takes precendence and results from previous
|
||||||
|
* unfinished builds are not required.
|
||||||
|
* This feature doesn't play well with disableConcurrentBuilds() option
|
||||||
|
*/
|
||||||
|
@Library('corda-shared-build-pipeline-steps')
|
||||||
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
|
||||||
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||||
@ -10,6 +20,7 @@ pipeline {
|
|||||||
ansiColor('xterm')
|
ansiColor('xterm')
|
||||||
timestamps()
|
timestamps()
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
|
|
||||||
environment {
|
environment {
|
||||||
@ -20,7 +31,7 @@ pipeline {
|
|||||||
|
|
||||||
stages {
|
stages {
|
||||||
stage('Publish Archived API Docs to Artifactory') {
|
stage('Publish Archived API Docs to Artifactory') {
|
||||||
when { tag pattern: /^release-os-V(\d+\.\d+)(\.\d+){0,1}(-GA){0,1}(-\d{4}-\d\d-\d\d-\d{4}){0,1}$/, comparator: 'REGEXP' }
|
when { tag pattern: /^docs-release-os-V(\d+\.\d+)(\.\d+){0,1}(-GA){0,1}(-\d{4}-\d\d-\d\d-\d{4}){0,1}$/, comparator: 'REGEXP' }
|
||||||
steps {
|
steps {
|
||||||
sh "./gradlew :clean :docs:artifactoryPublish -DpublishApiDocs"
|
sh "./gradlew :clean :docs:artifactoryPublish -DpublishApiDocs"
|
||||||
}
|
}
|
||||||
|
8
.ci/dev/regression/Jenkinsfile
vendored
8
.ci/dev/regression/Jenkinsfile
vendored
@ -50,13 +50,18 @@ pipeline {
|
|||||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish Release to Artifactory".replaceAll("/", "::")
|
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish Release to Artifactory".replaceAll("/", "::")
|
||||||
CORDA_USE_CACHE = "corda-remotes"
|
|
||||||
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
}
|
}
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
|
/*
|
||||||
|
* Temporarily disable Sonatype checks for regression builds
|
||||||
|
*/
|
||||||
stage('Sonatype Check') {
|
stage('Sonatype Check') {
|
||||||
|
when {
|
||||||
|
expression { isReleaseTag }
|
||||||
|
}
|
||||||
steps {
|
steps {
|
||||||
sh "./gradlew --no-daemon clean jar"
|
sh "./gradlew --no-daemon clean jar"
|
||||||
script {
|
script {
|
||||||
@ -89,7 +94,6 @@ pipeline {
|
|||||||
"-Dkubenetize=true " +
|
"-Dkubenetize=true " +
|
||||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
"-Ddocker.container.env.parameter.CORDA_USE_CACHE=\"${CORDA_USE_CACHE}\" " +
|
|
||||||
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||||
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
2
Jenkinsfile
vendored
2
Jenkinsfile
vendored
@ -17,7 +17,6 @@ pipeline {
|
|||||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
CORDA_USE_CACHE = "corda-remotes"
|
|
||||||
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
}
|
}
|
||||||
@ -30,7 +29,6 @@ pipeline {
|
|||||||
"-Dkubenetize=true " +
|
"-Dkubenetize=true " +
|
||||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
"-Ddocker.container.env.parameter.CORDA_USE_CACHE=\"${CORDA_USE_CACHE}\" " +
|
|
||||||
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||||
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
|
@ -62,14 +62,14 @@ buildscript {
|
|||||||
|
|
||||||
ext.asm_version = '7.1'
|
ext.asm_version = '7.1'
|
||||||
ext.artemis_version = '2.6.2'
|
ext.artemis_version = '2.6.2'
|
||||||
// TODO Upgrade Jackson only when corda is using kotlin 1.3.10
|
// TODO Upgrade to Jackson 2.10+ only when corda is using kotlin 1.3.10
|
||||||
ext.jackson_version = '2.9.7'
|
ext.jackson_version = '2.9.8'
|
||||||
ext.jetty_version = '9.4.19.v20190610'
|
ext.jetty_version = '9.4.19.v20190610'
|
||||||
ext.jersey_version = '2.25'
|
ext.jersey_version = '2.25'
|
||||||
ext.servlet_version = '4.0.1'
|
ext.servlet_version = '4.0.1'
|
||||||
ext.assertj_version = '3.12.2'
|
ext.assertj_version = '3.12.2'
|
||||||
ext.slf4j_version = '1.7.26'
|
ext.slf4j_version = '1.7.30'
|
||||||
ext.log4j_version = '2.11.2'
|
ext.log4j_version = '2.13.3'
|
||||||
ext.bouncycastle_version = constants.getProperty("bouncycastleVersion")
|
ext.bouncycastle_version = constants.getProperty("bouncycastleVersion")
|
||||||
ext.guava_version = constants.getProperty("guavaVersion")
|
ext.guava_version = constants.getProperty("guavaVersion")
|
||||||
ext.caffeine_version = constants.getProperty("caffeineVersion")
|
ext.caffeine_version = constants.getProperty("caffeineVersion")
|
||||||
|
@ -292,6 +292,7 @@ class ReconnectingCordaRPCOps private constructor(
|
|||||||
}
|
}
|
||||||
private class ErrorInterceptingHandler(val reconnectingRPCConnection: ReconnectingRPCConnection) : InvocationHandler {
|
private class ErrorInterceptingHandler(val reconnectingRPCConnection: ReconnectingRPCConnection) : InvocationHandler {
|
||||||
private fun Method.isStartFlow() = name.startsWith("startFlow") || name.startsWith("startTrackedFlow")
|
private fun Method.isStartFlow() = name.startsWith("startFlow") || name.startsWith("startTrackedFlow")
|
||||||
|
private fun Method.isShutdown() = name == "shutdown" || name == "gracefulShutdown" || name == "terminate"
|
||||||
|
|
||||||
private fun checkIfIsStartFlow(method: Method, e: InvocationTargetException) {
|
private fun checkIfIsStartFlow(method: Method, e: InvocationTargetException) {
|
||||||
if (method.isStartFlow()) {
|
if (method.isStartFlow()) {
|
||||||
@ -306,7 +307,7 @@ class ReconnectingCordaRPCOps private constructor(
|
|||||||
*
|
*
|
||||||
* A negative number for [maxNumberOfAttempts] means an unlimited number of retries will be performed.
|
* A negative number for [maxNumberOfAttempts] means an unlimited number of retries will be performed.
|
||||||
*/
|
*/
|
||||||
@Suppress("ThrowsCount", "ComplexMethod")
|
@Suppress("ThrowsCount", "ComplexMethod", "NestedBlockDepth")
|
||||||
private fun doInvoke(method: Method, args: Array<out Any>?, maxNumberOfAttempts: Int): Any? {
|
private fun doInvoke(method: Method, args: Array<out Any>?, maxNumberOfAttempts: Int): Any? {
|
||||||
checkIfClosed()
|
checkIfClosed()
|
||||||
var remainingAttempts = maxNumberOfAttempts
|
var remainingAttempts = maxNumberOfAttempts
|
||||||
@ -318,21 +319,21 @@ class ReconnectingCordaRPCOps private constructor(
|
|||||||
log.debug { "RPC $method invoked successfully." }
|
log.debug { "RPC $method invoked successfully." }
|
||||||
}
|
}
|
||||||
} catch (e: InvocationTargetException) {
|
} catch (e: InvocationTargetException) {
|
||||||
if (method.name.equals("shutdown", true)) {
|
|
||||||
log.debug("Shutdown invoked, stop reconnecting.", e)
|
|
||||||
reconnectingRPCConnection.notifyServerAndClose()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
when (e.targetException) {
|
when (e.targetException) {
|
||||||
is RejectedCommandException -> {
|
is RejectedCommandException -> {
|
||||||
log.warn("Node is being shutdown. Operation ${method.name} rejected. Shutting down...", e)
|
log.warn("Node is being shutdown. Operation ${method.name} rejected. Shutting down...", e)
|
||||||
throw e.targetException
|
throw e.targetException
|
||||||
}
|
}
|
||||||
is ConnectionFailureException -> {
|
is ConnectionFailureException -> {
|
||||||
|
if (method.isShutdown()) {
|
||||||
|
log.debug("Shutdown invoked, stop reconnecting.", e)
|
||||||
|
reconnectingRPCConnection.notifyServerAndClose()
|
||||||
|
} else {
|
||||||
log.warn("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)
|
log.warn("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)
|
||||||
reconnectingRPCConnection.reconnectOnError(e)
|
reconnectingRPCConnection.reconnectOnError(e)
|
||||||
checkIfIsStartFlow(method, e)
|
checkIfIsStartFlow(method, e)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
is RPCException -> {
|
is RPCException -> {
|
||||||
rethrowIfUnrecoverable(e.targetException as RPCException)
|
rethrowIfUnrecoverable(e.targetException as RPCException)
|
||||||
|
|
||||||
|
@ -1,28 +0,0 @@
|
|||||||
package net.corda.common.logging
|
|
||||||
|
|
||||||
import org.apache.logging.log4j.core.Core
|
|
||||||
import org.apache.logging.log4j.core.LogEvent
|
|
||||||
import org.apache.logging.log4j.core.appender.rewrite.RewritePolicy
|
|
||||||
import org.apache.logging.log4j.core.config.plugins.Plugin
|
|
||||||
import org.apache.logging.log4j.core.config.plugins.PluginFactory
|
|
||||||
import org.apache.logging.log4j.core.impl.Log4jLogEvent
|
|
||||||
|
|
||||||
@Plugin(name = "ErrorCodeRewritePolicy", category = Core.CATEGORY_NAME, elementType = "rewritePolicy", printObject = false)
|
|
||||||
class ErrorCodeRewritePolicy : RewritePolicy {
|
|
||||||
override fun rewrite(source: LogEvent): LogEvent? {
|
|
||||||
val newMessage = source.message?.withErrorCodeFor(source.thrown, source.level)
|
|
||||||
return if (newMessage == source.message) {
|
|
||||||
source
|
|
||||||
} else {
|
|
||||||
Log4jLogEvent.Builder(source).setMessage(newMessage).build()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
companion object {
|
|
||||||
@JvmStatic
|
|
||||||
@PluginFactory
|
|
||||||
fun createPolicy(): ErrorCodeRewritePolicy {
|
|
||||||
return ErrorCodeRewritePolicy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,5 +1,5 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Configuration status="info" packages="net.corda.common.logging" shutdownHook="disable">
|
<Configuration status="info" shutdownHook="disable">
|
||||||
|
|
||||||
<Properties>
|
<Properties>
|
||||||
<Property name="log-path">${sys:log-path:-logs}</Property>
|
<Property name="log-path">${sys:log-path:-logs}</Property>
|
||||||
@ -172,21 +172,17 @@
|
|||||||
|
|
||||||
<Rewrite name="Console-ErrorCode-Selector">
|
<Rewrite name="Console-ErrorCode-Selector">
|
||||||
<AppenderRef ref="Console-Selector"/>
|
<AppenderRef ref="Console-Selector"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
|
|
||||||
<Rewrite name="Console-ErrorCode-Appender-Println">
|
<Rewrite name="Console-ErrorCode-Appender-Println">
|
||||||
<AppenderRef ref="Console-Appender-Println"/>
|
<AppenderRef ref="Console-Appender-Println"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
|
|
||||||
<Rewrite name="RollingFile-ErrorCode-Appender">
|
<Rewrite name="RollingFile-ErrorCode-Appender">
|
||||||
<AppenderRef ref="RollingFile-Appender"/>
|
<AppenderRef ref="RollingFile-Appender"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
<Rewrite name="Diagnostic-RollingFile-ErrorCode-Appender">
|
<Rewrite name="Diagnostic-RollingFile-ErrorCode-Appender">
|
||||||
<AppenderRef ref="Diagnostic-RollingFile-Appender"/>
|
<AppenderRef ref="Diagnostic-RollingFile-Appender"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
</Appenders>
|
</Appenders>
|
||||||
|
|
||||||
|
@ -14,13 +14,12 @@ java8MinUpdateVersion=171
|
|||||||
platformVersion=8
|
platformVersion=8
|
||||||
guavaVersion=28.0-jre
|
guavaVersion=28.0-jre
|
||||||
# Quasar version to use with Java 8:
|
# Quasar version to use with Java 8:
|
||||||
quasarVersion=0.7.12_r3
|
quasarVersion=0.7.13_r3
|
||||||
quasarClassifier=jdk8
|
|
||||||
# Quasar version to use with Java 11:
|
# Quasar version to use with Java 11:
|
||||||
quasarVersion11=0.8.0_r3
|
quasarVersion11=0.8.0_r3
|
||||||
jdkClassifier11=jdk11
|
jdkClassifier11=jdk11
|
||||||
proguardVersion=6.1.1
|
proguardVersion=6.1.1
|
||||||
bouncycastleVersion=1.60
|
bouncycastleVersion=1.66
|
||||||
classgraphVersion=4.8.78
|
classgraphVersion=4.8.78
|
||||||
disruptorVersion=3.4.2
|
disruptorVersion=3.4.2
|
||||||
typesafeConfigVersion=1.3.4
|
typesafeConfigVersion=1.3.4
|
||||||
|
@ -8,6 +8,7 @@ import net.corda.core.identity.Party;
|
|||||||
import net.corda.core.utilities.KotlinUtilsKt;
|
import net.corda.core.utilities.KotlinUtilsKt;
|
||||||
import net.corda.testing.core.TestConstants;
|
import net.corda.testing.core.TestConstants;
|
||||||
import net.corda.testing.core.TestUtils;
|
import net.corda.testing.core.TestUtils;
|
||||||
|
import net.corda.testing.driver.DriverDSL;
|
||||||
import net.corda.testing.driver.DriverParameters;
|
import net.corda.testing.driver.DriverParameters;
|
||||||
import net.corda.testing.driver.NodeHandle;
|
import net.corda.testing.driver.NodeHandle;
|
||||||
import net.corda.testing.driver.NodeParameters;
|
import net.corda.testing.driver.NodeParameters;
|
||||||
@ -19,8 +20,11 @@ import org.slf4j.LoggerFactory;
|
|||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.time.temporal.ChronoUnit;
|
import java.time.temporal.ChronoUnit;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static net.corda.testing.driver.Driver.driver;
|
import static net.corda.testing.driver.Driver.driver;
|
||||||
|
|
||||||
@ -29,14 +33,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
|
|||||||
@Test
|
@Test
|
||||||
public void awaitFlowExternalOperationInJava() {
|
public void awaitFlowExternalOperationInJava() {
|
||||||
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
||||||
NodeHandle alice = KotlinUtilsKt.getOrThrow(
|
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
|
NodeHandle alice = aliceAndBob.get(0);
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
NodeHandle bob = aliceAndBob.get(1);
|
||||||
);
|
|
||||||
NodeHandle bob = KotlinUtilsKt.getOrThrow(
|
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
|
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
|
||||||
);
|
|
||||||
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
||||||
FlowWithExternalOperationInJava.class,
|
FlowWithExternalOperationInJava.class,
|
||||||
TestUtils.singleIdentity(bob.getNodeInfo())
|
TestUtils.singleIdentity(bob.getNodeInfo())
|
||||||
@ -47,14 +46,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
|
|||||||
@Test
|
@Test
|
||||||
public void awaitFlowExternalAsyncOperationInJava() {
|
public void awaitFlowExternalAsyncOperationInJava() {
|
||||||
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
||||||
NodeHandle alice = KotlinUtilsKt.getOrThrow(
|
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
|
NodeHandle alice = aliceAndBob.get(0);
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
NodeHandle bob = aliceAndBob.get(1);
|
||||||
);
|
|
||||||
NodeHandle bob = KotlinUtilsKt.getOrThrow(
|
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
|
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
|
||||||
);
|
|
||||||
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
||||||
FlowWithExternalAsyncOperationInJava.class,
|
FlowWithExternalAsyncOperationInJava.class,
|
||||||
TestUtils.singleIdentity(bob.getNodeInfo())
|
TestUtils.singleIdentity(bob.getNodeInfo())
|
||||||
@ -65,14 +59,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
|
|||||||
@Test
|
@Test
|
||||||
public void awaitFlowExternalOperationInJavaCanBeRetried() {
|
public void awaitFlowExternalOperationInJavaCanBeRetried() {
|
||||||
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
||||||
NodeHandle alice = KotlinUtilsKt.getOrThrow(
|
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
|
NodeHandle alice = aliceAndBob.get(0);
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
NodeHandle bob = aliceAndBob.get(1);
|
||||||
);
|
|
||||||
NodeHandle bob = KotlinUtilsKt.getOrThrow(
|
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
|
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
|
||||||
);
|
|
||||||
KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
||||||
FlowWithExternalOperationThatGetsRetriedInJava.class,
|
FlowWithExternalOperationThatGetsRetriedInJava.class,
|
||||||
TestUtils.singleIdentity(bob.getNodeInfo())
|
TestUtils.singleIdentity(bob.getNodeInfo())
|
||||||
@ -190,4 +179,15 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
|
|||||||
return operation.apply(futureService, deduplicationId);
|
return operation.apply(futureService, deduplicationId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private List<NodeHandle> aliceAndBob(DriverDSL driver) {
|
||||||
|
return Arrays.asList(TestConstants.ALICE_NAME, TestConstants.BOB_NAME)
|
||||||
|
.stream()
|
||||||
|
.map(nm -> driver.startNode(new NodeParameters().withProvidedName(nm)))
|
||||||
|
.collect(Collectors.toList())
|
||||||
|
.stream()
|
||||||
|
.map(future -> KotlinUtilsKt.getOrThrow(future,
|
||||||
|
Duration.of(1, ChronoUnit.MINUTES)))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
}
|
||||||
}
|
}
|
@ -4,6 +4,7 @@ import co.paralleluniverse.fibers.Suspendable
|
|||||||
import net.corda.core.flows.HospitalizeFlowException
|
import net.corda.core.flows.HospitalizeFlowException
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.minutes
|
import net.corda.core.utilities.minutes
|
||||||
@ -24,8 +25,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation`() {
|
fun `external async operation`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowWithExternalAsyncOperation, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowWithExternalAsyncOperation, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
@ -35,8 +38,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that checks deduplicationId is not rerun when flow is retried`() {
|
fun `external async operation that checks deduplicationId is not rerun when flow is retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<DuplicatedProcessException> {
|
assertFailsWith<DuplicatedProcessException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationWithDeduplication,
|
::FlowWithExternalAsyncOperationWithDeduplication,
|
||||||
@ -50,8 +55,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation propagates exception to calling flow`() {
|
fun `external async operation propagates exception to calling flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<MyCordaException> {
|
assertFailsWith<MyCordaException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationPropagatesException,
|
::FlowWithExternalAsyncOperationPropagatesException,
|
||||||
@ -66,8 +73,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation exception can be caught in flow`() {
|
fun `external async operation exception can be caught in flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val result = alice.rpc.startFlow(
|
val result = alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationThatThrowsExceptionAndCaughtInFlow,
|
::FlowWithExternalAsyncOperationThatThrowsExceptionAndCaughtInFlow,
|
||||||
bob.nodeInfo.singleIdentity()
|
bob.nodeInfo.singleIdentity()
|
||||||
@ -80,8 +89,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation with exception that hospital keeps for observation does not fail`() {
|
fun `external async operation with exception that hospital keeps for observation does not fail`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationPropagatesException,
|
::FlowWithExternalAsyncOperationPropagatesException,
|
||||||
@ -96,8 +107,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation with exception that hospital discharges is retried and runs the future again`() {
|
fun `external async operation with exception that hospital discharges is retried and runs the future again`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationPropagatesException,
|
::FlowWithExternalAsyncOperationPropagatesException,
|
||||||
@ -112,8 +125,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that throws exception rather than completing future exceptionally fails with internal exception`() {
|
fun `external async operation that throws exception rather than completing future exceptionally fails with internal exception`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<StateTransitionException> {
|
assertFailsWith<StateTransitionException> {
|
||||||
alice.rpc.startFlow(::FlowWithExternalAsyncOperationUnhandledException, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowWithExternalAsyncOperationUnhandledException, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
@ -125,8 +140,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that passes serviceHub into process can be retried`() {
|
fun `external async operation that passes serviceHub into process can be retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationThatPassesInServiceHubCanRetry,
|
::FlowWithExternalAsyncOperationThatPassesInServiceHubCanRetry,
|
||||||
@ -140,8 +157,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
|
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<DirectlyAccessedServiceHubException> {
|
assertFailsWith<DirectlyAccessedServiceHubException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationThatDirectlyAccessesServiceHubFailsRetry,
|
::FlowWithExternalAsyncOperationThatDirectlyAccessesServiceHubFailsRetry,
|
||||||
@ -155,8 +174,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `starting multiple futures and joining on their results`() {
|
fun `starting multiple futures and joining on their results`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowThatStartsMultipleFuturesAndJoins, bob.nodeInfo.singleIdentity()).returnValue.getOrThrow(1.minutes)
|
alice.rpc.startFlow(::FlowThatStartsMultipleFuturesAndJoins, bob.nodeInfo.singleIdentity()).returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package net.corda.coretests.flows
|
|||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.minutes
|
import net.corda.core.utilities.minutes
|
||||||
@ -18,8 +19,10 @@ class FlowExternalOperationStartFlowTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `starting a flow inside of a flow that starts a future will succeed`() {
|
fun `starting a flow inside of a flow that starts a future will succeed`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowThatStartsAnotherFlowInAnExternalOperation, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowThatStartsAnotherFlowInAnExternalOperation, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
@ -29,8 +32,10 @@ class FlowExternalOperationStartFlowTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `multiple flows can be started and their futures joined from inside a flow`() {
|
fun `multiple flows can be started and their futures joined from inside a flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::ForkJoinFlows, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::ForkJoinFlows, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
|
@ -5,6 +5,7 @@ import net.corda.core.flows.FlowLogic
|
|||||||
import net.corda.core.flows.HospitalizeFlowException
|
import net.corda.core.flows.HospitalizeFlowException
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.internal.packageName
|
import net.corda.core.internal.packageName
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.services.queryBy
|
import net.corda.core.node.services.queryBy
|
||||||
@ -29,8 +30,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation`() {
|
fun `external operation`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowWithExternalOperation, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowWithExternalOperation, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
@ -40,8 +43,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation that checks deduplicationId is not rerun when flow is retried`() {
|
fun `external operation that checks deduplicationId is not rerun when flow is retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<DuplicatedProcessException> {
|
assertFailsWith<DuplicatedProcessException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationWithDeduplication,
|
::FlowWithExternalOperationWithDeduplication,
|
||||||
@ -55,8 +60,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation propagates exception to calling flow`() {
|
fun `external operation propagates exception to calling flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<MyCordaException> {
|
assertFailsWith<MyCordaException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationPropagatesException,
|
::FlowWithExternalOperationPropagatesException,
|
||||||
@ -71,8 +78,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation exception can be caught in flow`() {
|
fun `external operation exception can be caught in flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowWithExternalOperationThatThrowsExceptionAndCaughtInFlow, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowWithExternalOperationThatThrowsExceptionAndCaughtInFlow, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
@ -82,8 +91,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation with exception that hospital keeps for observation does not fail`() {
|
fun `external operation with exception that hospital keeps for observation does not fail`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationPropagatesException,
|
::FlowWithExternalOperationPropagatesException,
|
||||||
@ -98,8 +109,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation with exception that hospital discharges is retried and runs the external operation again`() {
|
fun `external operation with exception that hospital discharges is retried and runs the external operation again`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationPropagatesException,
|
::FlowWithExternalOperationPropagatesException,
|
||||||
@ -114,8 +127,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that passes serviceHub into process can be retried`() {
|
fun `external async operation that passes serviceHub into process can be retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationThatPassesInServiceHubCanRetry,
|
::FlowWithExternalOperationThatPassesInServiceHubCanRetry,
|
||||||
@ -129,8 +144,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
|
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<DirectlyAccessedServiceHubException> {
|
assertFailsWith<DirectlyAccessedServiceHubException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationThatDirectlyAccessesServiceHubFailsRetry,
|
::FlowWithExternalOperationThatDirectlyAccessesServiceHubFailsRetry,
|
||||||
@ -199,8 +216,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation can be retried when an error occurs inside of database transaction`() {
|
fun `external operation can be retried when an error occurs inside of database transaction`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val success = alice.rpc.startFlow(
|
val success = alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationThatErrorsInsideOfDatabaseTransaction,
|
::FlowWithExternalOperationThatErrorsInsideOfDatabaseTransaction,
|
||||||
bob.nodeInfo.singleIdentity()
|
bob.nodeInfo.singleIdentity()
|
||||||
|
@ -10,6 +10,7 @@ import net.corda.core.flows.StartableByRPC
|
|||||||
import net.corda.core.flows.StateMachineRunId
|
import net.corda.core.flows.StateMachineRunId
|
||||||
import net.corda.core.flows.UnexpectedFlowEndException
|
import net.corda.core.flows.UnexpectedFlowEndException
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.minutes
|
import net.corda.core.utilities.minutes
|
||||||
@ -56,9 +57,10 @@ class FlowIsKilledTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `manually handled killed flows propagate error to counter parties`() {
|
fun `manually handled killed flows propagate error to counter parties`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.let { rpc ->
|
alice.rpc.let { rpc ->
|
||||||
val handle = rpc.startFlow(
|
val handle = rpc.startFlow(
|
||||||
::AFlowThatWantsToDieAndKillsItsFriends,
|
::AFlowThatWantsToDieAndKillsItsFriends,
|
||||||
@ -85,8 +87,11 @@ class FlowIsKilledTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `a manually killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
fun `a manually killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val handle = alice.rpc.startFlow(
|
val handle = alice.rpc.startFlow(
|
||||||
::AFlowThatGetsMurderedByItsFriend,
|
::AFlowThatGetsMurderedByItsFriend,
|
||||||
bob.nodeInfo.singleIdentity()
|
bob.nodeInfo.singleIdentity()
|
||||||
|
@ -7,6 +7,7 @@ import net.corda.core.flows.InitiatedBy
|
|||||||
import net.corda.core.flows.InitiatingFlow
|
import net.corda.core.flows.InitiatingFlow
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.minutes
|
import net.corda.core.utilities.minutes
|
||||||
@ -53,8 +54,10 @@ class FlowSleepTest {
|
|||||||
fun `flow can sleep and perform other suspending functions`() {
|
fun `flow can sleep and perform other suspending functions`() {
|
||||||
// ensures that events received while the flow is sleeping are not processed
|
// ensures that events received while the flow is sleeping are not processed
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val (start, finish) = alice.rpc.startFlow(
|
val (start, finish) = alice.rpc.startFlow(
|
||||||
::SleepAndInteractWithPartyFlow,
|
::SleepAndInteractWithPartyFlow,
|
||||||
bob.nodeInfo.singleIdentity()
|
bob.nodeInfo.singleIdentity()
|
||||||
|
@ -6,6 +6,7 @@ import net.corda.core.crypto.internal.Instances
|
|||||||
import org.bouncycastle.asn1.x509.AlgorithmIdentifier
|
import org.bouncycastle.asn1.x509.AlgorithmIdentifier
|
||||||
import org.bouncycastle.operator.ContentSigner
|
import org.bouncycastle.operator.ContentSigner
|
||||||
import java.io.OutputStream
|
import java.io.OutputStream
|
||||||
|
import java.security.InvalidKeyException
|
||||||
import java.security.PrivateKey
|
import java.security.PrivateKey
|
||||||
import java.security.Provider
|
import java.security.Provider
|
||||||
import java.security.SecureRandom
|
import java.security.SecureRandom
|
||||||
@ -24,7 +25,8 @@ object ContentSignerBuilder {
|
|||||||
else
|
else
|
||||||
Signature.getInstance(signatureScheme.signatureName, provider)
|
Signature.getInstance(signatureScheme.signatureName, provider)
|
||||||
|
|
||||||
val sig = signatureInstance.apply {
|
val sig = try {
|
||||||
|
signatureInstance.apply {
|
||||||
// TODO special handling for Sphincs due to a known BouncyCastle's Sphincs bug we reported.
|
// TODO special handling for Sphincs due to a known BouncyCastle's Sphincs bug we reported.
|
||||||
// It is fixed in BC 161b12, so consider updating the below if-statement after updating BouncyCastle.
|
// It is fixed in BC 161b12, so consider updating the below if-statement after updating BouncyCastle.
|
||||||
if (random != null && signatureScheme != SPHINCS256_SHA256) {
|
if (random != null && signatureScheme != SPHINCS256_SHA256) {
|
||||||
@ -33,6 +35,9 @@ object ContentSignerBuilder {
|
|||||||
initSign(privateKey)
|
initSign(privateKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} catch(ex: InvalidKeyException) {
|
||||||
|
throw InvalidKeyException("Incorrect key type ${privateKey.algorithm} for signature scheme ${signatureInstance.algorithm}", ex)
|
||||||
|
}
|
||||||
return object : ContentSigner {
|
return object : ContentSigner {
|
||||||
private val stream = SignatureOutputStream(sig, optimised)
|
private val stream = SignatureOutputStream(sig, optimised)
|
||||||
override fun getAlgorithmIdentifier(): AlgorithmIdentifier = sigAlgId
|
override fun getAlgorithmIdentifier(): AlgorithmIdentifier = sigAlgId
|
||||||
|
@ -0,0 +1,33 @@
|
|||||||
|
package net.corda.nodeapi.internal.crypto
|
||||||
|
|
||||||
|
import net.corda.core.crypto.Crypto
|
||||||
|
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
||||||
|
import org.junit.Test
|
||||||
|
import java.math.BigInteger
|
||||||
|
import java.security.InvalidKeyException
|
||||||
|
|
||||||
|
class ContentSignerBuilderTest {
|
||||||
|
companion object {
|
||||||
|
private const val entropy = "20200723"
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `should build content signer for valid eddsa key`() {
|
||||||
|
val signatureScheme = Crypto.EDDSA_ED25519_SHA512
|
||||||
|
val provider = Crypto.findProvider(signatureScheme.providerName)
|
||||||
|
val issuerKeyPair = Crypto.deriveKeyPairFromEntropy(signatureScheme, BigInteger(entropy))
|
||||||
|
ContentSignerBuilder.build(signatureScheme, issuerKeyPair.private, provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `should fail to build content signer for incorrect key type`() {
|
||||||
|
val signatureScheme = Crypto.EDDSA_ED25519_SHA512
|
||||||
|
val provider = Crypto.findProvider(signatureScheme.providerName)
|
||||||
|
val issuerKeyPair = Crypto.deriveKeyPairFromEntropy(Crypto.ECDSA_SECP256R1_SHA256, BigInteger(entropy))
|
||||||
|
assertThatExceptionOfType(InvalidKeyException::class.java)
|
||||||
|
.isThrownBy {
|
||||||
|
ContentSignerBuilder.build(signatureScheme, issuerKeyPair.private, provider)
|
||||||
|
}
|
||||||
|
.withMessage("Incorrect key type EC for signature scheme NONEwithEdDSA")
|
||||||
|
}
|
||||||
|
}
|
@ -23,8 +23,10 @@ class NodesStartStopSingleVmTests(@Suppress("unused") private val iteration: Int
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun nodesStartStop() {
|
fun nodesStartStop() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
startNode(providedName = ALICE_NAME).getOrThrow()
|
val alice = startNode(providedName = ALICE_NAME)
|
||||||
startNode(providedName = BOB_NAME).getOrThrow()
|
val bob = startNode(providedName = BOB_NAME)
|
||||||
|
alice.getOrThrow()
|
||||||
|
bob.getOrThrow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,5 +1,6 @@
|
|||||||
package net.corda.node.logging
|
package net.corda.node.logging
|
||||||
|
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.internal.div
|
import net.corda.core.internal.div
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.OpaqueBytes
|
import net.corda.core.utilities.OpaqueBytes
|
||||||
@ -22,8 +23,10 @@ class IssueCashLoggingTests {
|
|||||||
fun `issuing and sending cash as payment do not result in duplicate insertion warnings`() {
|
fun `issuing and sending cash as payment do not result in duplicate insertion warnings`() {
|
||||||
val user = User("mark", "dadada", setOf(all()))
|
val user = User("mark", "dadada", setOf(all()))
|
||||||
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
||||||
val nodeA = startNode(rpcUsers = listOf(user)).getOrThrow()
|
val (nodeA, nodeB) = listOf(startNode(rpcUsers = listOf(user)),
|
||||||
val nodeB = startNode().getOrThrow()
|
startNode())
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val amount = 1.DOLLARS
|
val amount = 1.DOLLARS
|
||||||
val ref = OpaqueBytes.of(0)
|
val ref = OpaqueBytes.of(0)
|
||||||
|
@ -62,30 +62,49 @@ abstract class StateMachineErrorHandlingTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DriverDSL.createBytemanNode(
|
internal fun DriverDSL.createBytemanNode(nodeProvidedName: CordaX500Name): Pair<NodeHandle, Int> {
|
||||||
providedName: CordaX500Name,
|
val port = nextPort()
|
||||||
|
val bytemanNodeHandle = (this as InternalDriverDSL).startNode(
|
||||||
|
NodeParameters(
|
||||||
|
providedName = nodeProvidedName,
|
||||||
|
rpcUsers = listOf(rpcUser)
|
||||||
|
),
|
||||||
|
bytemanPort = port
|
||||||
|
)
|
||||||
|
return bytemanNodeHandle.getOrThrow() to port
|
||||||
|
}
|
||||||
|
|
||||||
|
internal fun DriverDSL.createNode(nodeProvidedName: CordaX500Name): NodeHandle {
|
||||||
|
return (this as InternalDriverDSL).startNode(
|
||||||
|
NodeParameters(
|
||||||
|
providedName = nodeProvidedName,
|
||||||
|
rpcUsers = listOf(rpcUser)
|
||||||
|
)
|
||||||
|
).getOrThrow()
|
||||||
|
}
|
||||||
|
|
||||||
|
internal fun DriverDSL.createNodeAndBytemanNode(
|
||||||
|
nodeProvidedName: CordaX500Name,
|
||||||
|
bytemanNodeProvidedName: CordaX500Name,
|
||||||
additionalCordapps: Collection<TestCordapp> = emptyList()
|
additionalCordapps: Collection<TestCordapp> = emptyList()
|
||||||
): Pair<NodeHandle, Int> {
|
): Triple<NodeHandle, NodeHandle, Int> {
|
||||||
val port = nextPort()
|
val port = nextPort()
|
||||||
val nodeHandle = (this as InternalDriverDSL).startNode(
|
val nodeHandle = (this as InternalDriverDSL).startNode(
|
||||||
NodeParameters(
|
NodeParameters(
|
||||||
providedName = providedName,
|
providedName = nodeProvidedName,
|
||||||
|
rpcUsers = listOf(rpcUser),
|
||||||
|
additionalCordapps = additionalCordapps
|
||||||
|
)
|
||||||
|
)
|
||||||
|
val bytemanNodeHandle = startNode(
|
||||||
|
NodeParameters(
|
||||||
|
providedName = bytemanNodeProvidedName,
|
||||||
rpcUsers = listOf(rpcUser),
|
rpcUsers = listOf(rpcUser),
|
||||||
additionalCordapps = additionalCordapps
|
additionalCordapps = additionalCordapps
|
||||||
),
|
),
|
||||||
bytemanPort = port
|
bytemanPort = port
|
||||||
).getOrThrow()
|
|
||||||
return nodeHandle to port
|
|
||||||
}
|
|
||||||
|
|
||||||
internal fun DriverDSL.createNode(providedName: CordaX500Name, additionalCordapps: Collection<TestCordapp> = emptyList()): NodeHandle {
|
|
||||||
return startNode(
|
|
||||||
NodeParameters(
|
|
||||||
providedName = providedName,
|
|
||||||
rpcUsers = listOf(rpcUser),
|
|
||||||
additionalCordapps = additionalCordapps
|
|
||||||
)
|
)
|
||||||
).getOrThrow()
|
return Triple(nodeHandle.getOrThrow(), bytemanNodeHandle.getOrThrow(), port)
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun submitBytemanRules(rules: String, port: Int) {
|
internal fun submitBytemanRules(rules: String, port: Int) {
|
||||||
|
@ -35,8 +35,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error recording a transaction inside of ReceiveFinalityFlow will keep the flow in for observation`() {
|
fun `error recording a transaction inside of ReceiveFinalityFlow will keep the flow in for observation`() {
|
||||||
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
|
||||||
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
|
|
||||||
|
|
||||||
// could not get rule for FinalityDoctor + observation counter to work
|
// could not get rule for FinalityDoctor + observation counter to work
|
||||||
val rules = """
|
val rules = """
|
||||||
@ -97,8 +96,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error resolving a transaction's dependencies inside of ReceiveFinalityFlow will keep the flow in for observation`() {
|
fun `error resolving a transaction's dependencies inside of ReceiveFinalityFlow will keep the flow in for observation`() {
|
||||||
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
|
||||||
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
|
|
||||||
|
|
||||||
// could not get rule for FinalityDoctor + observation counter to work
|
// could not get rule for FinalityDoctor + observation counter to work
|
||||||
val rules = """
|
val rules = """
|
||||||
@ -161,8 +159,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and complete successfully`() {
|
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and complete successfully`() {
|
||||||
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
|
||||||
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -229,8 +226,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and be kept for observation is error persists`() {
|
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and be kept for observation is error persists`() {
|
||||||
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
|
||||||
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -40,8 +40,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
|
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -88,8 +87,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `unexpected error during flow initialisation throws exception to client`() {
|
fun `unexpected error during flow initialisation throws exception to client`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
CLASS ${FlowStateMachineImpl::class.java.name}
|
CLASS ${FlowStateMachineImpl::class.java.name}
|
||||||
@ -134,8 +132,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during initialisation when trying to rollback the flow's database transaction the flow is able to retry and complete successfully`() {
|
fun `error during initialisation when trying to rollback the flow's database transaction the flow is able to retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -187,8 +184,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during initialisation when trying to close the flow's database transaction the flow is able to retry and complete successfully`() {
|
fun `error during initialisation when trying to close the flow's database transaction the flow is able to retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -242,8 +238,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -298,8 +293,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during retrying a flow that failed when committing its original checkpoint will retry the flow again and complete successfully`() {
|
fun `error during retrying a flow that failed when committing its original checkpoint will retry the flow again and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Throw exception on executeCommitTransaction action after first suspend + commit
|
RULE Throw exception on executeCommitTransaction action after first suspend + commit
|
||||||
@ -351,8 +345,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
|
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -400,8 +393,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -464,8 +456,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - session init can be retried when there is a transient connection error to the database`() {
|
fun `responding flow - session init can be retried when there is a transient connection error to the database`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -529,8 +520,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - session init can be retried when there is a transient connection error to the database goes to observation if error persists`() {
|
fun `responding flow - session init can be retried when there is a transient connection error to the database goes to observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -35,8 +35,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with SendInitial action is retried 3 times and kept for observation if error persists`() {
|
fun `error during transition with SendInitial action is retried 3 times and kept for observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -87,8 +86,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with SendInitial action that does not persist will retry and complete successfully`() {
|
fun `error during transition with SendInitial action that does not persist will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -135,8 +133,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with AcknowledgeMessages action is swallowed and flow completes successfully`() {
|
fun `error during transition with AcknowledgeMessages action is swallowed and flow completes successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Set flag when inside executeAcknowledgeMessages
|
RULE Set flag when inside executeAcknowledgeMessages
|
||||||
@ -230,8 +227,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during flow retry when executing retryFlowFromSafePoint the flow is able to retry and recover`() {
|
fun `error during flow retry when executing retryFlowFromSafePoint the flow is able to retry and recover`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Set flag when executing first suspend
|
RULE Set flag when executing first suspend
|
||||||
@ -296,8 +292,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action that occurs after the first suspend will retry and complete successfully`() {
|
fun `error during transition with CommitTransaction action that occurs after the first suspend will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
// seems to be restarting the flow from the beginning every time
|
// seems to be restarting the flow from the beginning every time
|
||||||
val rules = """
|
val rules = """
|
||||||
@ -362,8 +357,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
|
fun `error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
// seems to be restarting the flow from the beginning every time
|
// seems to be restarting the flow from the beginning every time
|
||||||
val rules = """
|
val rules = """
|
||||||
@ -419,8 +413,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action and ConstraintViolationException that occurs when completing a flow will retry and be kept for observation if error persists`() {
|
fun `error during transition with CommitTransaction action and ConstraintViolationException that occurs when completing a flow will retry and be kept for observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -488,8 +481,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow can be retried when there is a transient connection error to the database`() {
|
fun `flow can be retried when there is a transient connection error to the database`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -552,8 +544,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow can be retried when there is a transient connection error to the database goes to observation if error persists`() {
|
fun `flow can be retried when there is a transient connection error to the database goes to observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -610,8 +601,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
|
fun `responding flow - error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -103,8 +103,7 @@ class StateMachineKillFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow killed when it is in the flow hospital for observation is removed correctly`() {
|
fun `flow killed when it is in the flow hospital for observation is removed correctly`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -40,8 +40,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `initiating subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
|
fun `initiating subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -119,8 +118,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `initiating subflow - error during transition with CommitTransaction action that occurs after the first receive will retry and complete successfully`() {
|
fun `initiating subflow - error during transition with CommitTransaction action that occurs after the first receive will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -190,8 +188,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
|
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -253,8 +250,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first receive will retry and complete successfully`() {
|
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first receive will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -41,7 +41,7 @@ class AddressBindingFailureTests {
|
|||||||
|
|
||||||
assertThatThrownBy {
|
assertThatThrownBy {
|
||||||
driver(DriverParameters(startNodesInProcess = false,
|
driver(DriverParameters(startNodesInProcess = false,
|
||||||
notarySpecs = listOf(NotarySpec(notaryName)),
|
notarySpecs = listOf(NotarySpec(notaryName, startInProcess = false)),
|
||||||
notaryCustomOverrides = mapOf("p2pAddress" to address.toString()),
|
notaryCustomOverrides = mapOf("p2pAddress" to address.toString()),
|
||||||
portAllocation = portAllocation,
|
portAllocation = portAllocation,
|
||||||
cordappsForAllNodes = emptyList())
|
cordappsForAllNodes = emptyList())
|
||||||
|
@ -6,8 +6,8 @@ import net.corda.core.flows.StartableByRPC
|
|||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.serialization.CheckpointCustomSerializer
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.node.logging.logFile
|
|
||||||
import net.corda.testing.driver.driver
|
import net.corda.testing.driver.driver
|
||||||
|
import net.corda.testing.driver.logFile
|
||||||
import org.assertj.core.api.Assertions
|
import org.assertj.core.api.Assertions
|
||||||
import org.junit.Test
|
import org.junit.Test
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
|
@ -7,9 +7,9 @@ import net.corda.core.messaging.startFlow
|
|||||||
import net.corda.core.serialization.CheckpointCustomSerializer
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
import net.corda.core.serialization.CordaSerializable
|
import net.corda.core.serialization.CordaSerializable
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.node.logging.logFile
|
|
||||||
import net.corda.testing.driver.DriverParameters
|
import net.corda.testing.driver.DriverParameters
|
||||||
import net.corda.testing.driver.driver
|
import net.corda.testing.driver.driver
|
||||||
|
import net.corda.testing.driver.logFile
|
||||||
import net.corda.testing.node.internal.enclosedCordapp
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
import org.assertj.core.api.Assertions
|
import org.assertj.core.api.Assertions
|
||||||
import org.junit.Test
|
import org.junit.Test
|
||||||
|
@ -12,6 +12,7 @@ import net.corda.core.flows.ReceiveFinalityFlow
|
|||||||
import net.corda.core.flows.SignTransactionFlow
|
import net.corda.core.flows.SignTransactionFlow
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.AppServiceHub
|
import net.corda.core.node.AppServiceHub
|
||||||
import net.corda.core.node.services.CordaService
|
import net.corda.core.node.services.CordaService
|
||||||
@ -318,8 +319,10 @@ class FlowEntityManagerTest : AbstractFlowEntityManagerTest() {
|
|||||||
StaffedFlowHospital.onFlowDischarged.add { _, _ -> ++counter }
|
StaffedFlowHospital.onFlowDischarged.add { _, _ -> ++counter }
|
||||||
driver(DriverParameters(startNodesInProcess = true)) {
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val txId =
|
val txId =
|
||||||
alice.rpc.startFlow(::EntityManagerWithFlushCatchAndInteractWithOtherPartyFlow, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::EntityManagerWithFlushCatchAndInteractWithOtherPartyFlow, bob.nodeInfo.singleIdentity())
|
||||||
|
@ -3,6 +3,7 @@ package net.corda.node.flows
|
|||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.unwrap
|
import net.corda.core.utilities.unwrap
|
||||||
@ -65,36 +66,35 @@ class FlowOverrideTests {
|
|||||||
private val nodeAClasses = setOf(Ping::class.java, Pong::class.java, Pongiest::class.java)
|
private val nodeAClasses = setOf(Ping::class.java, Pong::class.java, Pongiest::class.java)
|
||||||
private val nodeBClasses = setOf(Ping::class.java, Pong::class.java)
|
private val nodeBClasses = setOf(Ping::class.java, Pong::class.java)
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `should use the most specific implementation of a responding flow`() {
|
fun `should use the most specific implementation of a responding flow`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||||
val nodeA = startNode(NodeParameters(
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
providedName = ALICE_NAME,
|
.map {
|
||||||
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray()))
|
NodeParameters(providedName = it,
|
||||||
)).getOrThrow()
|
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())))
|
||||||
val nodeB = startNode(NodeParameters(
|
}
|
||||||
providedName = BOB_NAME,
|
.map { startNode(it) }
|
||||||
additionalCordapps = setOf(cordappForClasses(*nodeBClasses.toTypedArray()))
|
.transpose()
|
||||||
)).getOrThrow()
|
.getOrThrow()
|
||||||
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pongiest.GORGONZOLA))
|
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pongiest.GORGONZOLA))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `should use the overriden implementation of a responding flow`() {
|
fun `should use the overriden implementation of a responding flow`() {
|
||||||
val flowOverrides = mapOf(Ping::class.java to Pong::class.java)
|
val flowOverrides = mapOf(Ping::class.java to Pong::class.java)
|
||||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||||
val nodeA = startNode(NodeParameters(
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
providedName = ALICE_NAME,
|
.map {
|
||||||
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())),
|
NodeParameters(providedName = it,
|
||||||
flowOverrides = flowOverrides
|
flowOverrides = flowOverrides,
|
||||||
)).getOrThrow()
|
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())))
|
||||||
val nodeB = startNode(NodeParameters(
|
}
|
||||||
providedName = BOB_NAME,
|
.map { startNode(it) }
|
||||||
additionalCordapps = setOf(cordappForClasses(*nodeBClasses.toTypedArray()))
|
.transpose()
|
||||||
)).getOrThrow()
|
.getOrThrow()
|
||||||
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pong.PONG))
|
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pong.PONG))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -0,0 +1,511 @@
|
|||||||
|
package net.corda.node.flows
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.FlowSession
|
||||||
|
import net.corda.core.flows.HospitalizeFlowException
|
||||||
|
import net.corda.core.flows.InitiatedBy
|
||||||
|
import net.corda.core.flows.InitiatingFlow
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
|
import net.corda.core.flows.StateMachineRunId
|
||||||
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.FlowIORequest
|
||||||
|
import net.corda.core.internal.IdempotentFlow
|
||||||
|
import net.corda.core.internal.TimedFlow
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
|
import net.corda.core.messaging.StateMachineTransactionMapping
|
||||||
|
import net.corda.core.messaging.startFlow
|
||||||
|
import net.corda.core.utilities.OpaqueBytes
|
||||||
|
import net.corda.core.utilities.getOrThrow
|
||||||
|
import net.corda.core.utilities.seconds
|
||||||
|
import net.corda.core.utilities.unwrap
|
||||||
|
import net.corda.finance.DOLLARS
|
||||||
|
import net.corda.finance.flows.CashIssueAndPaymentFlow
|
||||||
|
import net.corda.node.services.config.NodeConfiguration
|
||||||
|
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
||||||
|
import net.corda.node.services.statemachine.FlowTimeoutException
|
||||||
|
import net.corda.node.services.statemachine.StaffedFlowHospital
|
||||||
|
import net.corda.testing.core.ALICE_NAME
|
||||||
|
import net.corda.testing.core.BOB_NAME
|
||||||
|
import net.corda.testing.core.singleIdentity
|
||||||
|
import net.corda.testing.driver.DriverParameters
|
||||||
|
import net.corda.testing.driver.driver
|
||||||
|
import net.corda.testing.node.internal.FINANCE_CORDAPPS
|
||||||
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
|
import org.junit.Test
|
||||||
|
import java.sql.SQLTransientConnectionException
|
||||||
|
import java.util.concurrent.Semaphore
|
||||||
|
import kotlin.test.assertEquals
|
||||||
|
import kotlin.test.assertNull
|
||||||
|
|
||||||
|
class FlowReloadAfterCheckpointTest {
|
||||||
|
|
||||||
|
private companion object {
|
||||||
|
val cordapps = listOf(enclosedCordapp())
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, false)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
handle.returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCounts[flowStartedByAlice])
|
||||||
|
assertEquals(6, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will not reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is false`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to false)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, false)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
handle.returnValue.getOrThrow()
|
||||||
|
assertNull(reloadCounts[flowStartedByAlice])
|
||||||
|
assertNull(reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true and be kept for observation due to failed deserialization`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
lateinit var flowKeptForObservation: StateMachineRunId
|
||||||
|
val lock = Semaphore(0)
|
||||||
|
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { id, _ ->
|
||||||
|
flowKeptForObservation = id
|
||||||
|
lock.release()
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), true, false, false)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
lock.acquire()
|
||||||
|
assertEquals(flowStartedByAlice, flowKeptForObservation)
|
||||||
|
assertEquals(4, reloadCounts[flowStartedByAlice])
|
||||||
|
assertEquals(4, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will reload from a previous checkpoint after calling suspending function and skipping the persisting the current checkpoint when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, true)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
handle.returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCounts[flowStartedByAlice])
|
||||||
|
assertEquals(6, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `idempotent flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::MyIdempotentFlow, false).returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `idempotent flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true but can't throw deserialization error from objects in the call function`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::MyIdempotentFlow, true).returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `timed flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::MyTimedFlow).returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will correctly retry after an error when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
var timesDischarged = 0
|
||||||
|
StaffedFlowHospital.onFlowDischarged.add { _, _ -> timesDischarged += 1 }
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::TransientConnectionFailureFlow).returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
assertEquals(3, timesDischarged)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow continues reloading from checkpoints after node restart when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(
|
||||||
|
DriverParameters(
|
||||||
|
inMemoryDB = false,
|
||||||
|
startNodesInProcess = true,
|
||||||
|
notarySpecs = emptyList(),
|
||||||
|
cordappsForAllNodes = cordapps
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::MyHospitalizingFlow)
|
||||||
|
Thread.sleep(10.seconds.toMillis())
|
||||||
|
|
||||||
|
alice.stop()
|
||||||
|
|
||||||
|
startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
Thread.sleep(20.seconds.toMillis())
|
||||||
|
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `idempotent flow continues reloading from checkpoints after node restart when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(
|
||||||
|
DriverParameters(
|
||||||
|
inMemoryDB = false,
|
||||||
|
startNodesInProcess = true,
|
||||||
|
notarySpecs = emptyList(),
|
||||||
|
cordappsForAllNodes = cordapps
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::IdempotentHospitalizingFlow)
|
||||||
|
Thread.sleep(10.seconds.toMillis())
|
||||||
|
|
||||||
|
alice.stop()
|
||||||
|
|
||||||
|
startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
Thread.sleep(20.seconds.toMillis())
|
||||||
|
|
||||||
|
// restarts completely from the beginning and forgets the in-memory reload count therefore
|
||||||
|
// it reloads an extra 2 times for checkpoints it had already reloaded before the node shutdown
|
||||||
|
assertEquals(7, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `more complicated flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(
|
||||||
|
::CashIssueAndPaymentFlow,
|
||||||
|
500.DOLLARS,
|
||||||
|
OpaqueBytes.of(0x01),
|
||||||
|
bob.nodeInfo.singleIdentity(),
|
||||||
|
false,
|
||||||
|
defaultNotaryIdentity
|
||||||
|
)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
handle.returnValue.getOrThrow(30.seconds)
|
||||||
|
val flowStartedByBob = bob.rpc.stateMachineRecordedTransactionMappingSnapshot()
|
||||||
|
.map(StateMachineTransactionMapping::stateMachineRunId)
|
||||||
|
.toSet()
|
||||||
|
.single()
|
||||||
|
Thread.sleep(10.seconds.toMillis())
|
||||||
|
assertEquals(7, reloadCounts[flowStartedByAlice])
|
||||||
|
assertEquals(6, reloadCounts[flowStartedByBob])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class ReloadFromCheckpointFlow(
|
||||||
|
private val party: Party,
|
||||||
|
private val shouldHaveDeserializationError: Boolean,
|
||||||
|
private val counterPartyHasDeserializationError: Boolean,
|
||||||
|
private val skipCheckpoints: Boolean
|
||||||
|
) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
val session = initiateFlow(party)
|
||||||
|
session.send(counterPartyHasDeserializationError, skipCheckpoints)
|
||||||
|
session.receive(String::class.java, skipCheckpoints).unwrap { it }
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, skipCheckpoints)
|
||||||
|
val map = if (shouldHaveDeserializationError) {
|
||||||
|
BrokenMap(mutableMapOf("i dont want" to "this to work"))
|
||||||
|
} else {
|
||||||
|
mapOf("i dont want" to "this to work")
|
||||||
|
}
|
||||||
|
logger.info("I need to use my variable to pass the build!: $map")
|
||||||
|
session.sendAndReceive<String>("hey I made it this far")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 5 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 6.
|
||||||
|
* Therefore this flow should reload 6 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@InitiatedBy(ReloadFromCheckpointFlow::class)
|
||||||
|
class ReloadFromCheckpointResponder(private val session: FlowSession) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var flowId: StateMachineRunId? = null
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
flowId = runId
|
||||||
|
val counterPartyHasDeserializationError = session.receive<Boolean>().unwrap { it }
|
||||||
|
session.send("hello there 12312311")
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
val map = if (counterPartyHasDeserializationError) {
|
||||||
|
BrokenMap(mutableMapOf("i dont want" to "this to work"))
|
||||||
|
} else {
|
||||||
|
mapOf("i dont want" to "this to work")
|
||||||
|
}
|
||||||
|
logger.info("I need to use my variable to pass the build!: $map")
|
||||||
|
session.receive<String>().unwrap { it }
|
||||||
|
session.send("sending back a message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class MyIdempotentFlow(private val shouldHaveDeserializationError: Boolean) : FlowLogic<Unit>(), IdempotentFlow {
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
val map = if (shouldHaveDeserializationError) {
|
||||||
|
BrokenMap(mutableMapOf("i dont want" to "this to work"))
|
||||||
|
} else {
|
||||||
|
mapOf("i dont want" to "this to work")
|
||||||
|
}
|
||||||
|
logger.info("I need to use my variable to pass the build!: $map")
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class MyTimedFlow : FlowLogic<Unit>(), TimedFlow {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var thrown = false
|
||||||
|
}
|
||||||
|
|
||||||
|
override val isTimeoutEnabled: Boolean = true
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
if (!thrown) {
|
||||||
|
thrown = true
|
||||||
|
throw FlowTimeoutException()
|
||||||
|
}
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class TransientConnectionFailureFlow : FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var retryCount = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
if (retryCount < 3) {
|
||||||
|
retryCount += 1
|
||||||
|
throw SQLTransientConnectionException("Connection is not available")
|
||||||
|
|
||||||
|
}
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class MyHospitalizingFlow : FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var thrown = false
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
if (!thrown) {
|
||||||
|
thrown = true
|
||||||
|
throw HospitalizeFlowException("i want to try again")
|
||||||
|
}
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class IdempotentHospitalizingFlow : FlowLogic<Unit>(), IdempotentFlow {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var thrown = false
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
if (!thrown) {
|
||||||
|
thrown = true
|
||||||
|
throw HospitalizeFlowException("i want to try again")
|
||||||
|
}
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,12 +1,16 @@
|
|||||||
package net.corda.node.flows
|
package net.corda.node.flows
|
||||||
|
|
||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
import net.corda.client.rpc.CordaRPCClient
|
|
||||||
import net.corda.client.rpc.CordaRPCClientConfiguration
|
|
||||||
import net.corda.core.CordaRuntimeException
|
import net.corda.core.CordaRuntimeException
|
||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.FlowExternalAsyncOperation
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.FlowSession
|
||||||
|
import net.corda.core.flows.InitiatedBy
|
||||||
|
import net.corda.core.flows.InitiatingFlow
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.IdempotentFlow
|
import net.corda.core.internal.IdempotentFlow
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.serialization.CordaSerializable
|
import net.corda.core.serialization.CordaSerializable
|
||||||
import net.corda.core.utilities.ProgressTracker
|
import net.corda.core.utilities.ProgressTracker
|
||||||
@ -22,6 +26,7 @@ import net.corda.testing.core.singleIdentity
|
|||||||
import net.corda.testing.driver.DriverParameters
|
import net.corda.testing.driver.DriverParameters
|
||||||
import net.corda.testing.driver.driver
|
import net.corda.testing.driver.driver
|
||||||
import net.corda.testing.node.User
|
import net.corda.testing.node.User
|
||||||
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
||||||
import org.hibernate.exception.ConstraintViolationException
|
import org.hibernate.exception.ConstraintViolationException
|
||||||
import org.junit.After
|
import org.junit.After
|
||||||
@ -32,7 +37,8 @@ import java.sql.SQLException
|
|||||||
import java.sql.SQLTransientConnectionException
|
import java.sql.SQLTransientConnectionException
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.time.temporal.ChronoUnit
|
import java.time.temporal.ChronoUnit
|
||||||
import java.util.*
|
import java.util.Collections
|
||||||
|
import java.util.HashSet
|
||||||
import java.util.concurrent.CompletableFuture
|
import java.util.concurrent.CompletableFuture
|
||||||
import java.util.concurrent.TimeoutException
|
import java.util.concurrent.TimeoutException
|
||||||
import kotlin.test.assertEquals
|
import kotlin.test.assertEquals
|
||||||
@ -40,7 +46,11 @@ import kotlin.test.assertFailsWith
|
|||||||
import kotlin.test.assertNotNull
|
import kotlin.test.assertNotNull
|
||||||
|
|
||||||
class FlowRetryTest {
|
class FlowRetryTest {
|
||||||
val config = CordaRPCClientConfiguration.DEFAULT.copy(connectionRetryIntervalMultiplier = 1.1)
|
|
||||||
|
private companion object {
|
||||||
|
val user = User("mark", "dadada", setOf(Permissions.all()))
|
||||||
|
val cordapps = listOf(enclosedCordapp())
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
fun resetCounters() {
|
fun resetCounters() {
|
||||||
@ -57,148 +67,136 @@ class FlowRetryTest {
|
|||||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
|
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flows continue despite errors`() {
|
fun `flows continue despite errors`() {
|
||||||
val numSessions = 2
|
val numSessions = 2
|
||||||
val numIterations = 10
|
val numIterations = 10
|
||||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<InitiatorFlow>()))
|
val result: Any? = driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList())) {
|
||||||
val result: Any? = driver(DriverParameters(
|
|
||||||
startNodesInProcess = isQuasarAgentSpecified(),
|
|
||||||
notarySpecs = emptyList()
|
|
||||||
)) {
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
|
|
||||||
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
it.proxy.startFlow(::InitiatorFlow, numSessions, numIterations, nodeBHandle.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
}
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val result = nodeAHandle.rpc.startFlow(
|
||||||
|
::InitiatorFlow,
|
||||||
|
numSessions,
|
||||||
|
numIterations,
|
||||||
|
nodeBHandle.nodeInfo.singleIdentity()
|
||||||
|
).returnValue.getOrThrow()
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
assertNotNull(result)
|
assertNotNull(result)
|
||||||
assertEquals("$numSessions:$numIterations", result)
|
assertEquals("$numSessions:$numIterations", result)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `async operation deduplication id is stable accross retries`() {
|
fun `async operation deduplication id is stable accross retries`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<AsyncRetryFlow>()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
driver(DriverParameters(
|
|
||||||
startNodesInProcess = isQuasarAgentSpecified(),
|
|
||||||
notarySpecs = emptyList()
|
|
||||||
)) {
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||||
|
nodeAHandle.rpc.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
|
||||||
it.proxy.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow gives up after number of exceptions, even if this is the first line of the flow`() {
|
fun `flow gives up after number of exceptions, even if this is the first line of the flow`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<RetryFlow>()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
|
||||||
driver(DriverParameters(
|
|
||||||
startNodesInProcess = isQuasarAgentSpecified(),
|
|
||||||
notarySpecs = emptyList()
|
|
||||||
)) {
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||||
|
assertFailsWith<CordaRuntimeException> {
|
||||||
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
nodeAHandle.rpc.startFlow(::RetryFlow).returnValue.getOrThrow()
|
||||||
it.proxy.startFlow(::RetryFlow).returnValue.getOrThrow()
|
|
||||||
}
|
|
||||||
result
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow that throws in constructor throw for the RPC client that attempted to start them`() {
|
fun `flow that throws in constructor throw for the RPC client that attempted to start them`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<ThrowingFlow>()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
|
||||||
driver(DriverParameters(
|
|
||||||
startNodesInProcess = isQuasarAgentSpecified(),
|
|
||||||
notarySpecs = emptyList()
|
|
||||||
)) {
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||||
|
assertFailsWith<CordaRuntimeException> {
|
||||||
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
nodeAHandle.rpc.startFlow(::ThrowingFlow).returnValue.getOrThrow()
|
||||||
it.proxy.startFlow(::ThrowingFlow).returnValue.getOrThrow()
|
|
||||||
}
|
|
||||||
result
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `SQLTransientConnectionExceptions thrown by hikari are retried 3 times and then kept in the checkpoints table`() {
|
fun `SQLTransientConnectionExceptions thrown by hikari are retried 3 times and then kept in the checkpoints table`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.all()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
|
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
|
||||||
assertFailsWith<TimeoutException> {
|
assertFailsWith<TimeoutException> {
|
||||||
it.proxy.startFlow(::TransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
|
nodeAHandle.rpc.startFlow(::TransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
|
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
|
||||||
}
|
}
|
||||||
assertEquals(3, TransientConnectionFailureFlow.retryCount)
|
assertEquals(3, TransientConnectionFailureFlow.retryCount)
|
||||||
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get())
|
assertEquals(
|
||||||
}
|
1,
|
||||||
|
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get()
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `Specific exception still detected even if it is nested inside another exception`() {
|
fun `Specific exception still detected even if it is nested inside another exception`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.all()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
|
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
|
||||||
assertFailsWith<TimeoutException> {
|
assertFailsWith<TimeoutException> {
|
||||||
it.proxy.startFlow(::WrappedTransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
|
nodeAHandle.rpc.startFlow(::WrappedTransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
|
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
|
||||||
}
|
}
|
||||||
assertEquals(3, WrappedTransientConnectionFailureFlow.retryCount)
|
assertEquals(3, WrappedTransientConnectionFailureFlow.retryCount)
|
||||||
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get())
|
assertEquals(
|
||||||
}
|
1,
|
||||||
|
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get()
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `General external exceptions are not retried and propagate`() {
|
fun `General external exceptions are not retried and propagate`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.all()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
|
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
|
||||||
assertFailsWith<CordaRuntimeException> {
|
assertFailsWith<CordaRuntimeException> {
|
||||||
it.proxy.startFlow(::GeneralExternalFailureFlow, nodeBHandle.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
nodeAHandle.rpc.startFlow(
|
||||||
|
::GeneralExternalFailureFlow,
|
||||||
|
nodeBHandle.nodeInfo.singleIdentity()
|
||||||
|
).returnValue.getOrThrow()
|
||||||
}
|
}
|
||||||
assertEquals(0, GeneralExternalFailureFlow.retryCount)
|
assertEquals(0, GeneralExternalFailureFlow.retryCount)
|
||||||
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.FAILED).returnValue.get())
|
assertEquals(
|
||||||
}
|
1,
|
||||||
|
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.FAILED).returnValue.get()
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `Permission exceptions are not retried and propagate`() {
|
fun `Permission exceptions are not retried and propagate`() {
|
||||||
val user = User("mark", "dadada", setOf())
|
val user = User("mark", "dadada", setOf())
|
||||||
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||||
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
|
||||||
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
||||||
it.proxy.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
|
nodeAHandle.rpc.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
|
||||||
}.withMessageStartingWith("User not authorized to perform RPC call")
|
}.withMessageStartingWith("User not authorized to perform RPC call")
|
||||||
// This stays at -1 since the flow never even got called
|
// This stays at -1 since the flow never even got called
|
||||||
assertEquals(-1, GeneralExternalFailureFlow.retryCount)
|
assertEquals(-1, GeneralExternalFailureFlow.retryCount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fun isQuasarAgentSpecified(): Boolean {
|
fun isQuasarAgentSpecified(): Boolean {
|
||||||
@ -306,6 +304,10 @@ enum class Step { First, BeforeInitiate, AfterInitiate, AfterInitiateSendReceive
|
|||||||
|
|
||||||
data class Visited(val sessionNum: Int, val iterationNum: Int, val step: Step)
|
data class Visited(val sessionNum: Int, val iterationNum: Int, val step: Step)
|
||||||
|
|
||||||
|
class BrokenMap<K, V>(delegate: MutableMap<K, V> = mutableMapOf()) : MutableMap<K, V> by delegate {
|
||||||
|
override fun put(key: K, value: V): V? = throw IllegalStateException("Broken on purpose")
|
||||||
|
}
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class RetryFlow() : FlowLogic<String>(), IdempotentFlow {
|
class RetryFlow() : FlowLogic<String>(), IdempotentFlow {
|
||||||
companion object {
|
companion object {
|
||||||
@ -333,7 +335,7 @@ class AsyncRetryFlow() : FlowLogic<String>(), IdempotentFlow {
|
|||||||
val deduplicationIds = mutableSetOf<String>()
|
val deduplicationIds = mutableSetOf<String>()
|
||||||
}
|
}
|
||||||
|
|
||||||
class RecordDeduplicationId: FlowExternalAsyncOperation<String> {
|
class RecordDeduplicationId : FlowExternalAsyncOperation<String> {
|
||||||
override fun execute(deduplicationId: String): CompletableFuture<String> {
|
override fun execute(deduplicationId: String): CompletableFuture<String> {
|
||||||
val dedupeIdIsNew = deduplicationIds.add(deduplicationId)
|
val dedupeIdIsNew = deduplicationIds.add(deduplicationId)
|
||||||
if (dedupeIdIsNew) {
|
if (dedupeIdIsNew) {
|
||||||
@ -415,7 +417,8 @@ class WrappedTransientConnectionFailureFlow(private val party: Party) : FlowLogi
|
|||||||
retryCount += 1
|
retryCount += 1
|
||||||
throw IllegalStateException(
|
throw IllegalStateException(
|
||||||
"wrapped error message",
|
"wrapped error message",
|
||||||
IllegalStateException("another layer deep", SQLTransientConnectionException("Connection is not available")))
|
IllegalStateException("another layer deep", SQLTransientConnectionException("Connection is not available"))
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -456,6 +459,8 @@ class GeneralExternalFailureResponder(private val session: FlowSession) : FlowLo
|
|||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class GetCheckpointNumberOfStatusFlow(private val flowStatus: Checkpoint.FlowStatus) : FlowLogic<Long>() {
|
class GetCheckpointNumberOfStatusFlow(private val flowStatus: Checkpoint.FlowStatus) : FlowLogic<Long>() {
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
override fun call(): Long {
|
override fun call(): Long {
|
||||||
val sqlStatement =
|
val sqlStatement =
|
||||||
"select count(*) " +
|
"select count(*) " +
|
||||||
|
@ -43,7 +43,7 @@ class FlowSessionCloseTest {
|
|||||||
).transpose().getOrThrow()
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
assertThatThrownBy { it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), true, null, false).returnValue.getOrThrow() }
|
assertThatThrownBy { it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), true, null, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow() }
|
||||||
.isInstanceOf(CordaRuntimeException::class.java)
|
.isInstanceOf(CordaRuntimeException::class.java)
|
||||||
.hasMessageContaining(PrematureSessionCloseException::class.java.name)
|
.hasMessageContaining(PrematureSessionCloseException::class.java.name)
|
||||||
.hasMessageContaining("The following session was closed before it was initialised")
|
.hasMessageContaining("The following session was closed before it was initialised")
|
||||||
@ -52,20 +52,28 @@ class FlowSessionCloseTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout=300_000)
|
||||||
fun `flow cannot access closed session`() {
|
fun `flow cannot access closed session, unless it's a duplicate close which is handled gracefully`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
||||||
val (nodeAHandle, nodeBHandle) = listOf(
|
val (nodeAHandle, nodeBHandle) = listOf(
|
||||||
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
|
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
|
||||||
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
|
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
|
||||||
).transpose().getOrThrow()
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
InitiatorFlow.SessionAPI.values().forEach { sessionAPI ->
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
assertThatThrownBy { it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, sessionAPI, false).returnValue.getOrThrow() }
|
InitiatorFlow.SessionAPI.values().forEach { sessionAPI ->
|
||||||
|
when (sessionAPI) {
|
||||||
|
InitiatorFlow.SessionAPI.CLOSE -> {
|
||||||
|
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, sessionAPI, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
else -> {
|
||||||
|
assertThatThrownBy { it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, sessionAPI, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow() }
|
||||||
.isInstanceOf(UnexpectedFlowEndException::class.java)
|
.isInstanceOf(UnexpectedFlowEndException::class.java)
|
||||||
.hasMessageContaining("Tried to access ended session")
|
.hasMessageContaining("Tried to access ended session")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -79,7 +87,7 @@ class FlowSessionCloseTest {
|
|||||||
).transpose().getOrThrow()
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, null, false).returnValue.getOrThrow()
|
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, null, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -93,7 +101,7 @@ class FlowSessionCloseTest {
|
|||||||
).transpose().getOrThrow()
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, null, true).returnValue.getOrThrow()
|
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, null, InitiatorFlow.ResponderReaction.RETRY_CLOSE_FROM_CHECKPOINT).returnValue.getOrThrow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -151,14 +159,21 @@ class FlowSessionCloseTest {
|
|||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class InitiatorFlow(val party: Party, private val prematureClose: Boolean = false,
|
class InitiatorFlow(val party: Party, private val prematureClose: Boolean = false,
|
||||||
private val accessClosedSessionWithApi: SessionAPI? = null,
|
private val accessClosedSessionWithApi: SessionAPI? = null,
|
||||||
private val retryClose: Boolean = false): FlowLogic<Unit>() {
|
private val responderReaction: ResponderReaction): FlowLogic<Unit>() {
|
||||||
|
|
||||||
@CordaSerializable
|
@CordaSerializable
|
||||||
enum class SessionAPI {
|
enum class SessionAPI {
|
||||||
SEND,
|
SEND,
|
||||||
SEND_AND_RECEIVE,
|
SEND_AND_RECEIVE,
|
||||||
RECEIVE,
|
RECEIVE,
|
||||||
GET_FLOW_INFO
|
GET_FLOW_INFO,
|
||||||
|
CLOSE
|
||||||
|
}
|
||||||
|
|
||||||
|
@CordaSerializable
|
||||||
|
enum class ResponderReaction {
|
||||||
|
NORMAL_CLOSE,
|
||||||
|
RETRY_CLOSE_FROM_CHECKPOINT
|
||||||
}
|
}
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
@ -169,7 +184,7 @@ class FlowSessionCloseTest {
|
|||||||
session.close()
|
session.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
session.send(retryClose)
|
session.send(responderReaction)
|
||||||
sleep(1.seconds)
|
sleep(1.seconds)
|
||||||
|
|
||||||
if (accessClosedSessionWithApi != null) {
|
if (accessClosedSessionWithApi != null) {
|
||||||
@ -178,6 +193,7 @@ class FlowSessionCloseTest {
|
|||||||
SessionAPI.RECEIVE -> session.receive<String>()
|
SessionAPI.RECEIVE -> session.receive<String>()
|
||||||
SessionAPI.SEND_AND_RECEIVE -> session.sendAndReceive<String>("dummy payload")
|
SessionAPI.SEND_AND_RECEIVE -> session.sendAndReceive<String>("dummy payload")
|
||||||
SessionAPI.GET_FLOW_INFO -> session.getCounterpartyFlowInfo()
|
SessionAPI.GET_FLOW_INFO -> session.getCounterpartyFlowInfo()
|
||||||
|
SessionAPI.CLOSE -> session.close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -192,13 +208,17 @@ class FlowSessionCloseTest {
|
|||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
val retryClose = otherSideSession.receive<Boolean>()
|
val responderReaction = otherSideSession.receive<InitiatorFlow.ResponderReaction>()
|
||||||
.unwrap{ it }
|
.unwrap{ it }
|
||||||
|
|
||||||
|
when(responderReaction) {
|
||||||
|
InitiatorFlow.ResponderReaction.NORMAL_CLOSE -> {
|
||||||
|
otherSideSession.close()
|
||||||
|
}
|
||||||
|
InitiatorFlow.ResponderReaction.RETRY_CLOSE_FROM_CHECKPOINT -> {
|
||||||
otherSideSession.close()
|
otherSideSession.close()
|
||||||
|
|
||||||
// failing with a transient exception to force a replay of the close.
|
// failing with a transient exception to force a replay of the close.
|
||||||
if (retryClose) {
|
|
||||||
if (!thrown) {
|
if (!thrown) {
|
||||||
thrown = true
|
thrown = true
|
||||||
throw SQLTransientConnectionException("Connection is not available")
|
throw SQLTransientConnectionException("Connection is not available")
|
||||||
@ -206,6 +226,7 @@ class FlowSessionCloseTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@InitiatingFlow
|
@InitiatingFlow
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
|
@ -14,6 +14,7 @@ import net.corda.core.flows.StateMachineRunId
|
|||||||
import net.corda.core.flows.UnexpectedFlowEndException
|
import net.corda.core.flows.UnexpectedFlowEndException
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.CordaRPCOps
|
import net.corda.core.messaging.CordaRPCOps
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.services.StatesNotAvailableException
|
import net.corda.core.node.services.StatesNotAvailableException
|
||||||
@ -68,9 +69,10 @@ class KillFlowTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `a killed flow will propagate the killed error to counter parties when it reaches the next suspension point`() {
|
fun `a killed flow will propagate the killed error to counter parties when it reaches the next suspension point`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.let { rpc ->
|
alice.rpc.let { rpc ->
|
||||||
val handle = rpc.startFlow(
|
val handle = rpc.startFlow(
|
||||||
::AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriends,
|
::AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriends,
|
||||||
@ -118,8 +120,10 @@ class KillFlowTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `killing a flow suspended in send + receive + sendAndReceive ends the flow immediately`() {
|
fun `killing a flow suspended in send + receive + sendAndReceive ends the flow immediately`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = false)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = false)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val bobParty = bob.nodeInfo.singleIdentity()
|
val bobParty = bob.nodeInfo.singleIdentity()
|
||||||
bob.stop()
|
bob.stop()
|
||||||
val terminated = (bob as OutOfProcess).process.waitFor(30, TimeUnit.SECONDS)
|
val terminated = (bob as OutOfProcess).process.waitFor(30, TimeUnit.SECONDS)
|
||||||
@ -192,9 +196,10 @@ class KillFlowTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `a killed flow will propagate the killed error to counter parties if it was suspended`() {
|
fun `a killed flow will propagate the killed error to counter parties if it was suspended`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.let { rpc ->
|
alice.rpc.let { rpc ->
|
||||||
val handle = rpc.startFlow(
|
val handle = rpc.startFlow(
|
||||||
::AFlowThatGetsMurderedAndSomehowKillsItsFriends,
|
::AFlowThatGetsMurderedAndSomehowKillsItsFriends,
|
||||||
@ -224,9 +229,10 @@ class KillFlowTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `a killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
fun `a killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val handle = alice.rpc.startFlow(
|
val handle = alice.rpc.startFlow(
|
||||||
::AFlowThatGetsMurderedByItsFriend,
|
::AFlowThatGetsMurderedByItsFriend,
|
||||||
listOf(bob.nodeInfo.singleIdentity(), charlie.nodeInfo.singleIdentity())
|
listOf(bob.nodeInfo.singleIdentity(), charlie.nodeInfo.singleIdentity())
|
||||||
|
@ -1,68 +0,0 @@
|
|||||||
package net.corda.node.logging
|
|
||||||
|
|
||||||
import net.corda.core.flows.FlowLogic
|
|
||||||
import net.corda.core.flows.InitiatingFlow
|
|
||||||
import net.corda.core.flows.StartableByRPC
|
|
||||||
import net.corda.core.internal.div
|
|
||||||
import net.corda.core.messaging.FlowHandle
|
|
||||||
import net.corda.core.messaging.startFlow
|
|
||||||
import net.corda.core.utilities.getOrThrow
|
|
||||||
import net.corda.testing.driver.DriverParameters
|
|
||||||
import net.corda.testing.driver.NodeHandle
|
|
||||||
import net.corda.testing.driver.driver
|
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
|
||||||
import org.junit.Test
|
|
||||||
import java.io.File
|
|
||||||
|
|
||||||
class ErrorCodeLoggingTests {
|
|
||||||
@Test(timeout=300_000)
|
|
||||||
fun `log entries with a throwable and ERROR or WARN get an error code appended`() {
|
|
||||||
driver(DriverParameters(notarySpecs = emptyList())) {
|
|
||||||
val node = startNode(startInSameProcess = false).getOrThrow()
|
|
||||||
node.rpc.startFlow(::MyFlow).waitForCompletion()
|
|
||||||
val logFile = node.logFile()
|
|
||||||
|
|
||||||
val linesWithErrorCode = logFile.useLines { lines ->
|
|
||||||
lines.filter { line ->
|
|
||||||
line.contains("[errorCode=")
|
|
||||||
}.filter { line ->
|
|
||||||
line.contains("moreInformationAt=https://errors.corda.net/")
|
|
||||||
}.toList()
|
|
||||||
}
|
|
||||||
|
|
||||||
assertThat(linesWithErrorCode).isNotEmpty
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is used to detect broken logging which can be caused by loggers being initialized
|
|
||||||
// before the initLogging() call is made
|
|
||||||
@Test(timeout=300_000)
|
|
||||||
fun `When logging is set to error level, there are no other levels logged after node startup`() {
|
|
||||||
driver(DriverParameters(notarySpecs = emptyList())) {
|
|
||||||
val node = startNode(startInSameProcess = false, logLevelOverride = "ERROR").getOrThrow()
|
|
||||||
val logFile = node.logFile()
|
|
||||||
val lengthAfterStart = logFile.length()
|
|
||||||
node.rpc.startFlow(::MyFlow).waitForCompletion()
|
|
||||||
// An exception thrown in a flow will log at the "INFO" level.
|
|
||||||
assertThat(logFile.length()).isEqualTo(lengthAfterStart)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@StartableByRPC
|
|
||||||
@InitiatingFlow
|
|
||||||
class MyFlow : FlowLogic<String>() {
|
|
||||||
override fun call(): String {
|
|
||||||
throw IllegalArgumentException("Mwahahahah")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private fun FlowHandle<*>.waitForCompletion() {
|
|
||||||
try {
|
|
||||||
returnValue.getOrThrow()
|
|
||||||
} catch (e: Exception) {
|
|
||||||
// This is expected to throw an exception, using getOrThrow() just to wait until done.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fun NodeHandle.logFile(): File = (baseDirectory / "logs").toFile().walk().filter { it.name.startsWith("node-") && it.extension == "log" }.single()
|
|
@ -7,6 +7,7 @@ import net.corda.core.contracts.Command
|
|||||||
import net.corda.core.contracts.StateAndContract
|
import net.corda.core.contracts.StateAndContract
|
||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.internal.packageName
|
import net.corda.core.internal.packageName
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
@ -57,8 +58,10 @@ class FlowsDrainingModeContentionTest {
|
|||||||
portAllocation = portAllocation,
|
portAllocation = portAllocation,
|
||||||
extraCordappPackagesToScan = listOf(MessageState::class.packageName)
|
extraCordappPackagesToScan = listOf(MessageState::class.packageName)
|
||||||
)) {
|
)) {
|
||||||
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = users) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val nodeARpcInfo = RpcInfo(nodeA.rpcAddress, user.username, user.password)
|
val nodeARpcInfo = RpcInfo(nodeA.rpcAddress, user.username, user.password)
|
||||||
val flow = nodeA.rpc.startFlow(::ProposeTransactionAndWaitForCommit, message, nodeARpcInfo, nodeB.nodeInfo.singleIdentity(), defaultNotaryIdentity)
|
val flow = nodeA.rpc.startFlow(::ProposeTransactionAndWaitForCommit, message, nodeARpcInfo, nodeB.nodeInfo.singleIdentity(), defaultNotaryIdentity)
|
||||||
|
@ -4,6 +4,7 @@ import co.paralleluniverse.fibers.Suspendable
|
|||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.concurrent.map
|
import net.corda.core.internal.concurrent.map
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.contextLogger
|
import net.corda.core.utilities.contextLogger
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
@ -53,8 +54,11 @@ class P2PFlowsDrainingModeTest {
|
|||||||
@Test(timeout=300_000)
|
@Test(timeout=300_000)
|
||||||
fun `flows draining mode suspends consumption of initial session messages`() {
|
fun `flows draining mode suspends consumption of initial session messages`() {
|
||||||
driver(DriverParameters(startNodesInProcess = false, portAllocation = portAllocation, notarySpecs = emptyList())) {
|
driver(DriverParameters(startNodesInProcess = false, portAllocation = portAllocation, notarySpecs = emptyList())) {
|
||||||
val initiatedNode = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (initiatedNode, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val initiating = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow().rpc
|
.map { startNode(providedName = it, rpcUsers = users) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
val initiating = bob.rpc
|
||||||
val counterParty = initiatedNode.nodeInfo.singleIdentity()
|
val counterParty = initiatedNode.nodeInfo.singleIdentity()
|
||||||
val initiated = initiatedNode.rpc
|
val initiated = initiatedNode.rpc
|
||||||
|
|
||||||
@ -85,8 +89,10 @@ class P2PFlowsDrainingModeTest {
|
|||||||
|
|
||||||
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
|
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
|
||||||
|
|
||||||
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = users) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
var successful = false
|
var successful = false
|
||||||
val latch = CountDownLatch(1)
|
val latch = CountDownLatch(1)
|
||||||
|
|
||||||
@ -133,8 +139,10 @@ class P2PFlowsDrainingModeTest {
|
|||||||
|
|
||||||
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
|
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
|
||||||
|
|
||||||
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = users) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
var successful = false
|
var successful = false
|
||||||
val latch = CountDownLatch(1)
|
val latch = CountDownLatch(1)
|
||||||
|
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
package net.corda.node.services.config
|
package net.corda.node.services.config
|
||||||
|
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.node.logging.logFile
|
|
||||||
import net.corda.testing.driver.DriverParameters
|
import net.corda.testing.driver.DriverParameters
|
||||||
import net.corda.testing.driver.driver
|
import net.corda.testing.driver.driver
|
||||||
import net.corda.testing.driver.internal.incrementalPortAllocation
|
import net.corda.testing.driver.internal.incrementalPortAllocation
|
||||||
|
import net.corda.testing.driver.logFile
|
||||||
import org.junit.Assert.assertTrue
|
import org.junit.Assert.assertTrue
|
||||||
import org.junit.Test
|
import org.junit.Test
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ import net.corda.core.CordaRuntimeException
|
|||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.unwrap
|
import net.corda.core.utilities.unwrap
|
||||||
@ -58,8 +59,10 @@ class RpcExceptionHandlingTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()), allowHibernateToManageAppSchema = false)) {
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()), allowHibernateToManageAppSchema = false)) {
|
||||||
val devModeNode = startNode(params, BOB_NAME).getOrThrow()
|
val (devModeNode, node) = listOf(startNode(params, BOB_NAME),
|
||||||
val node = startNode(ALICE_NAME, devMode = false, parameters = params).getOrThrow()
|
startNode(ALICE_NAME, devMode = false, parameters = params))
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
assertThatThrownExceptionIsReceivedUnwrapped(devModeNode)
|
assertThatThrownExceptionIsReceivedUnwrapped(devModeNode)
|
||||||
assertThatThrownExceptionIsReceivedUnwrapped(node)
|
assertThatThrownExceptionIsReceivedUnwrapped(node)
|
||||||
@ -77,8 +80,10 @@ class RpcExceptionHandlingTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()), allowHibernateToManageAppSchema = false)) {
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()), allowHibernateToManageAppSchema = false)) {
|
||||||
val devModeNode = startNode(params, BOB_NAME).getOrThrow()
|
val (devModeNode, node) = listOf(startNode(params, BOB_NAME),
|
||||||
val node = startNode(ALICE_NAME, devMode = false, parameters = params).getOrThrow()
|
startNode(ALICE_NAME, devMode = false, parameters = params))
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
assertThatThrownBy { devModeNode.throwExceptionFromFlow() }.isInstanceOfSatisfying(FlowException::class.java) { exception ->
|
assertThatThrownBy { devModeNode.throwExceptionFromFlow() }.isInstanceOfSatisfying(FlowException::class.java) { exception ->
|
||||||
assertThat(exception).hasNoCause()
|
assertThat(exception).hasNoCause()
|
||||||
@ -102,8 +107,10 @@ class RpcExceptionHandlingTest {
|
|||||||
|
|
||||||
fun DriverDSL.scenario(nameA: CordaX500Name, nameB: CordaX500Name, devMode: Boolean) {
|
fun DriverDSL.scenario(nameA: CordaX500Name, nameB: CordaX500Name, devMode: Boolean) {
|
||||||
|
|
||||||
val nodeA = startNode(nameA, devMode, params).getOrThrow()
|
val (nodeA, nodeB) = listOf(nameA, nameB)
|
||||||
val nodeB = startNode(nameB, devMode, params).getOrThrow()
|
.map { startNode(it, devMode, params) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
nodeA.rpc.startFlow(::InitFlow, nodeB.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
nodeA.rpc.startFlow(::InitFlow, nodeB.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@ import net.corda.core.flows.NotaryException
|
|||||||
import net.corda.core.flows.ReceiveFinalityFlow
|
import net.corda.core.flows.ReceiveFinalityFlow
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.StateMachineUpdate
|
import net.corda.core.messaging.StateMachineUpdate
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.OpaqueBytes
|
import net.corda.core.utilities.OpaqueBytes
|
||||||
@ -46,14 +47,20 @@ class FlowHospitalTest {
|
|||||||
|
|
||||||
private val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
|
private val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `when double spend occurs, the flow is successfully deleted on the counterparty`() {
|
fun `when double spend occurs, the flow is successfully deleted on the counterparty`() {
|
||||||
driver(DriverParameters(cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts")))) {
|
driver(DriverParameters(cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts")))) {
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
val (charlieClient, aliceClient) = listOf(CHARLIE_NAME, ALICE_NAME)
|
||||||
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
.map {
|
||||||
|
startNode(providedName = it,
|
||||||
val charlieClient = CordaRPCClient(charlie.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
rpcUsers = listOf(rpcUser))
|
||||||
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
.map {
|
||||||
|
CordaRPCClient(it.rpcAddress)
|
||||||
|
.start(rpcUser.username, rpcUser.password).proxy
|
||||||
|
}
|
||||||
|
|
||||||
val aliceParty = aliceClient.nodeInfo().legalIdentities.first()
|
val aliceParty = aliceClient.nodeInfo().legalIdentities.first()
|
||||||
|
|
||||||
@ -80,7 +87,7 @@ class FlowHospitalTest {
|
|||||||
val secondStateAndRef = charlieClient.startFlow(::IssueFlow, defaultNotaryIdentity).returnValue.get()
|
val secondStateAndRef = charlieClient.startFlow(::IssueFlow, defaultNotaryIdentity).returnValue.get()
|
||||||
charlieClient.startFlow(::SpendFlowWithCustomException, secondStateAndRef, aliceParty).returnValue.get()
|
charlieClient.startFlow(::SpendFlowWithCustomException, secondStateAndRef, aliceParty).returnValue.get()
|
||||||
|
|
||||||
val secondSubscription = aliceClient.stateMachinesFeed().updates.subscribe{
|
val secondSubscription = aliceClient.stateMachinesFeed().updates.subscribe {
|
||||||
if (it is StateMachineUpdate.Removed && it.result.isFailure)
|
if (it is StateMachineUpdate.Removed && it.result.isFailure)
|
||||||
secondLatch.countDown()
|
secondLatch.countDown()
|
||||||
}
|
}
|
||||||
@ -95,7 +102,7 @@ class FlowHospitalTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `HospitalizeFlowException thrown`() {
|
fun `HospitalizeFlowException thrown`() {
|
||||||
var observationCounter: Int = 0
|
var observationCounter: Int = 0
|
||||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||||
@ -117,7 +124,7 @@ class FlowHospitalTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `Custom exception wrapping HospitalizeFlowException thrown`() {
|
fun `Custom exception wrapping HospitalizeFlowException thrown`() {
|
||||||
var observationCounter: Int = 0
|
var observationCounter: Int = 0
|
||||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||||
@ -139,7 +146,7 @@ class FlowHospitalTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `Custom exception extending HospitalizeFlowException thrown`() {
|
fun `Custom exception extending HospitalizeFlowException thrown`() {
|
||||||
var observationCounter: Int = 0
|
var observationCounter: Int = 0
|
||||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||||
@ -162,7 +169,7 @@ class FlowHospitalTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `HospitalizeFlowException cloaking an important exception thrown`() {
|
fun `HospitalizeFlowException cloaking an important exception thrown`() {
|
||||||
var dischargedCounter = 0
|
var dischargedCounter = 0
|
||||||
var observationCounter: Int = 0
|
var observationCounter: Int = 0
|
||||||
@ -191,7 +198,7 @@ class FlowHospitalTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class IssueFlow(val notary: Party): FlowLogic<StateAndRef<SingleOwnerState>>() {
|
class IssueFlow(val notary: Party) : FlowLogic<StateAndRef<SingleOwnerState>>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call(): StateAndRef<SingleOwnerState> {
|
override fun call(): StateAndRef<SingleOwnerState> {
|
||||||
@ -201,12 +208,11 @@ class FlowHospitalTest {
|
|||||||
val notarised = subFlow(FinalityFlow(signedTransaction, emptySet<FlowSession>()))
|
val notarised = subFlow(FinalityFlow(signedTransaction, emptySet<FlowSession>()))
|
||||||
return notarised.coreTransaction.outRef(0)
|
return notarised.coreTransaction.outRef(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
@InitiatingFlow
|
@InitiatingFlow
|
||||||
class SpendFlow(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party): FlowLogic<Unit>() {
|
class SpendFlow(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
@ -216,11 +222,10 @@ class FlowHospitalTest {
|
|||||||
sessionWithCounterParty.sendAndReceive<String>("initial-message")
|
sessionWithCounterParty.sendAndReceive<String>("initial-message")
|
||||||
subFlow(FinalityFlow(signedTransaction, setOf(sessionWithCounterParty)))
|
subFlow(FinalityFlow(signedTransaction, setOf(sessionWithCounterParty)))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@InitiatedBy(SpendFlow::class)
|
@InitiatedBy(SpendFlow::class)
|
||||||
class AcceptSpendFlow(private val otherSide: FlowSession): FlowLogic<Unit>() {
|
class AcceptSpendFlow(private val otherSide: FlowSession) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
@ -229,12 +234,11 @@ class FlowHospitalTest {
|
|||||||
|
|
||||||
subFlow(ReceiveFinalityFlow(otherSide))
|
subFlow(ReceiveFinalityFlow(otherSide))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
@InitiatingFlow
|
@InitiatingFlow
|
||||||
class SpendFlowWithCustomException(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party):
|
class SpendFlowWithCustomException(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party) :
|
||||||
FlowLogic<Unit>() {
|
FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
@ -249,11 +253,10 @@ class FlowHospitalTest {
|
|||||||
throw DoubleSpendException("double spend!", e)
|
throw DoubleSpendException("double spend!", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@InitiatedBy(SpendFlowWithCustomException::class)
|
@InitiatedBy(SpendFlowWithCustomException::class)
|
||||||
class AcceptSpendFlowWithCustomException(private val otherSide: FlowSession): FlowLogic<Unit>() {
|
class AcceptSpendFlowWithCustomException(private val otherSide: FlowSession) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
@ -262,16 +265,15 @@ class FlowHospitalTest {
|
|||||||
|
|
||||||
subFlow(ReceiveFinalityFlow(otherSide))
|
subFlow(ReceiveFinalityFlow(otherSide))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class DoubleSpendException(message: String, cause: Throwable): FlowException(message, cause)
|
class DoubleSpendException(message: String, cause: Throwable) : FlowException(message, cause)
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class ThrowingHospitalisedExceptionFlow(
|
class ThrowingHospitalisedExceptionFlow(
|
||||||
// Starting this Flow from an RPC client: if we pass in an encapsulated exception within another exception then the wrapping
|
// Starting this Flow from an RPC client: if we pass in an encapsulated exception within another exception then the wrapping
|
||||||
// exception, when deserialized, will get grounded into a CordaRuntimeException (this happens in ThrowableSerializer#fromProxy).
|
// exception, when deserialized, will get grounded into a CordaRuntimeException (this happens in ThrowableSerializer#fromProxy).
|
||||||
private val hospitalizeFlowExceptionClass: Class<*>): FlowLogic<Unit>() {
|
private val hospitalizeFlowExceptionClass: Class<*>) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
@ -294,5 +296,4 @@ class FlowHospitalTest {
|
|||||||
setCause(SQLException("deadlock"))
|
setCause(SQLException("deadlock"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -16,6 +16,7 @@ import net.corda.core.flows.FlowLogic
|
|||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.concurrent.openFuture
|
import net.corda.core.internal.concurrent.openFuture
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.services.Vault
|
import net.corda.core.node.services.Vault
|
||||||
import net.corda.core.node.services.vault.QueryCriteria
|
import net.corda.core.node.services.vault.QueryCriteria
|
||||||
@ -24,7 +25,7 @@ import net.corda.core.utilities.getOrThrow
|
|||||||
import net.corda.core.utilities.seconds
|
import net.corda.core.utilities.seconds
|
||||||
import net.corda.node.services.Permissions
|
import net.corda.node.services.Permissions
|
||||||
import net.corda.node.services.statemachine.StaffedFlowHospital
|
import net.corda.node.services.statemachine.StaffedFlowHospital
|
||||||
import net.corda.node.services.transactions.PersistentUniquenessProvider
|
import net.corda.notary.jpa.JPAUniquenessProvider
|
||||||
import net.corda.testing.core.ALICE_NAME
|
import net.corda.testing.core.ALICE_NAME
|
||||||
import net.corda.testing.core.BOB_NAME
|
import net.corda.testing.core.BOB_NAME
|
||||||
import net.corda.testing.core.singleIdentity
|
import net.corda.testing.core.singleIdentity
|
||||||
@ -450,8 +451,11 @@ class VaultObserverExceptionTest {
|
|||||||
findCordapp("com.r3.dbfailure.schemas")
|
findCordapp("com.r3.dbfailure.schemas")
|
||||||
), inMemoryDB = false)
|
), inMemoryDB = false)
|
||||||
) {
|
) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it,
|
||||||
|
rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
||||||
|
|
||||||
val startErrorInObservableWhenConsumingState = {
|
val startErrorInObservableWhenConsumingState = {
|
||||||
@ -540,8 +544,11 @@ class VaultObserverExceptionTest {
|
|||||||
),
|
),
|
||||||
inMemoryDB = false)
|
inMemoryDB = false)
|
||||||
) {
|
) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it,
|
||||||
|
rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
||||||
|
|
||||||
val startErrorInObservableWhenConsumingState = {
|
val startErrorInObservableWhenConsumingState = {
|
||||||
@ -622,8 +629,11 @@ class VaultObserverExceptionTest {
|
|||||||
),
|
),
|
||||||
inMemoryDB = false)
|
inMemoryDB = false)
|
||||||
) {
|
) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it,
|
||||||
|
rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
||||||
|
|
||||||
val startErrorInObservableWhenCreatingSecondState = {
|
val startErrorInObservableWhenCreatingSecondState = {
|
||||||
@ -699,8 +709,11 @@ class VaultObserverExceptionTest {
|
|||||||
),
|
),
|
||||||
inMemoryDB = false)
|
inMemoryDB = false)
|
||||||
) {
|
) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it,
|
||||||
|
rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
||||||
|
|
||||||
val startErrorInObservableWhenConsumingState = {
|
val startErrorInObservableWhenConsumingState = {
|
||||||
@ -843,8 +856,8 @@ class VaultObserverExceptionTest {
|
|||||||
override fun call(): List<String> {
|
override fun call(): List<String> {
|
||||||
return serviceHub.withEntityManager {
|
return serviceHub.withEntityManager {
|
||||||
val criteriaQuery = this.criteriaBuilder.createQuery(String::class.java)
|
val criteriaQuery = this.criteriaBuilder.createQuery(String::class.java)
|
||||||
val root = criteriaQuery.from(PersistentUniquenessProvider.CommittedTransaction::class.java)
|
val root = criteriaQuery.from(JPAUniquenessProvider.CommittedTransaction::class.java)
|
||||||
criteriaQuery.select(root.get<String>(PersistentUniquenessProvider.CommittedTransaction::transactionId.name))
|
criteriaQuery.select(root.get(JPAUniquenessProvider.CommittedTransaction::transactionId.name))
|
||||||
val query = this.createQuery(criteriaQuery)
|
val query = this.createQuery(criteriaQuery)
|
||||||
query.resultList
|
query.resultList
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,6 @@ import net.corda.node.services.statemachine.StateMachineManager
|
|||||||
import net.corda.node.services.transactions.BasicVerifierFactoryService
|
import net.corda.node.services.transactions.BasicVerifierFactoryService
|
||||||
import net.corda.node.services.transactions.DeterministicVerifierFactoryService
|
import net.corda.node.services.transactions.DeterministicVerifierFactoryService
|
||||||
import net.corda.node.services.transactions.InMemoryTransactionVerifierService
|
import net.corda.node.services.transactions.InMemoryTransactionVerifierService
|
||||||
import net.corda.node.services.transactions.SimpleNotaryService
|
|
||||||
import net.corda.node.services.transactions.VerifierFactoryService
|
import net.corda.node.services.transactions.VerifierFactoryService
|
||||||
import net.corda.node.services.upgrade.ContractUpgradeServiceImpl
|
import net.corda.node.services.upgrade.ContractUpgradeServiceImpl
|
||||||
import net.corda.node.services.vault.NodeVaultService
|
import net.corda.node.services.vault.NodeVaultService
|
||||||
@ -855,10 +854,6 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun isRunningSimpleNotaryService(configuration: NodeConfiguration): Boolean {
|
|
||||||
return configuration.notary != null && configuration.notary?.className == SimpleNotaryService::class.java.name
|
|
||||||
}
|
|
||||||
|
|
||||||
private class ServiceInstantiationException(cause: Throwable?) : CordaException("Service Instantiation Error", cause)
|
private class ServiceInstantiationException(cause: Throwable?) : CordaException("Service Instantiation Error", cause)
|
||||||
|
|
||||||
private fun installCordaServices() {
|
private fun installCordaServices() {
|
||||||
|
@ -6,12 +6,12 @@ import net.corda.core.flows.ContractUpgradeFlow
|
|||||||
import net.corda.core.internal.cordapp.CordappImpl
|
import net.corda.core.internal.cordapp.CordappImpl
|
||||||
import net.corda.core.internal.location
|
import net.corda.core.internal.location
|
||||||
import net.corda.node.VersionInfo
|
import net.corda.node.VersionInfo
|
||||||
import net.corda.node.services.transactions.NodeNotarySchemaV1
|
|
||||||
import net.corda.node.services.transactions.SimpleNotaryService
|
|
||||||
import net.corda.notary.experimental.bftsmart.BFTSmartNotarySchemaV1
|
import net.corda.notary.experimental.bftsmart.BFTSmartNotarySchemaV1
|
||||||
import net.corda.notary.experimental.bftsmart.BFTSmartNotaryService
|
import net.corda.notary.experimental.bftsmart.BFTSmartNotaryService
|
||||||
import net.corda.notary.experimental.raft.RaftNotarySchemaV1
|
import net.corda.notary.experimental.raft.RaftNotarySchemaV1
|
||||||
import net.corda.notary.experimental.raft.RaftNotaryService
|
import net.corda.notary.experimental.raft.RaftNotaryService
|
||||||
|
import net.corda.notary.jpa.JPANotarySchemaV1
|
||||||
|
import net.corda.notary.jpa.JPANotaryService
|
||||||
|
|
||||||
internal object VirtualCordapp {
|
internal object VirtualCordapp {
|
||||||
/** A list of the core RPC flows present in Corda */
|
/** A list of the core RPC flows present in Corda */
|
||||||
@ -46,7 +46,7 @@ internal object VirtualCordapp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** A Cordapp for the built-in notary service implementation. */
|
/** A Cordapp for the built-in notary service implementation. */
|
||||||
fun generateSimpleNotary(versionInfo: VersionInfo): CordappImpl {
|
fun generateJPANotary(versionInfo: VersionInfo): CordappImpl {
|
||||||
return CordappImpl(
|
return CordappImpl(
|
||||||
contractClassNames = listOf(),
|
contractClassNames = listOf(),
|
||||||
initiatedFlows = listOf(),
|
initiatedFlows = listOf(),
|
||||||
@ -57,15 +57,16 @@ internal object VirtualCordapp {
|
|||||||
serializationWhitelists = listOf(),
|
serializationWhitelists = listOf(),
|
||||||
serializationCustomSerializers = listOf(),
|
serializationCustomSerializers = listOf(),
|
||||||
checkpointCustomSerializers = listOf(),
|
checkpointCustomSerializers = listOf(),
|
||||||
customSchemas = setOf(NodeNotarySchemaV1),
|
customSchemas = setOf(JPANotarySchemaV1),
|
||||||
info = Cordapp.Info.Default("corda-notary", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
info = Cordapp.Info.Default("corda-notary", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
||||||
allFlows = listOf(),
|
allFlows = listOf(),
|
||||||
jarPath = SimpleNotaryService::class.java.location,
|
jarPath = JPANotaryService::class.java.location,
|
||||||
jarHash = SecureHash.allOnesHash,
|
jarHash = SecureHash.allOnesHash,
|
||||||
minimumPlatformVersion = versionInfo.platformVersion,
|
minimumPlatformVersion = versionInfo.platformVersion,
|
||||||
targetPlatformVersion = versionInfo.platformVersion,
|
targetPlatformVersion = versionInfo.platformVersion,
|
||||||
notaryService = SimpleNotaryService::class.java,
|
notaryService = JPANotaryService::class.java,
|
||||||
isLoaded = false
|
isLoaded = false,
|
||||||
|
isVirtual = true
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,6 +93,8 @@ interface NodeConfiguration : ConfigurationWithOptionsContainer {
|
|||||||
|
|
||||||
val quasarExcludePackages: List<String>
|
val quasarExcludePackages: List<String>
|
||||||
|
|
||||||
|
val reloadCheckpointAfterSuspend: Boolean
|
||||||
|
|
||||||
companion object {
|
companion object {
|
||||||
// default to at least 8MB and a bit extra for larger heap sizes
|
// default to at least 8MB and a bit extra for larger heap sizes
|
||||||
val defaultTransactionCacheSize: Long = 8.MB + getAdditionalCacheMemory()
|
val defaultTransactionCacheSize: Long = 8.MB + getAdditionalCacheMemory()
|
||||||
@ -125,6 +127,10 @@ enum class JmxReporterType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
data class DevModeOptions(
|
data class DevModeOptions(
|
||||||
|
@Deprecated(
|
||||||
|
"The checkpoint checker has been replaced by the ability to reload a checkpoint from the database after every suspend" +
|
||||||
|
"Use [NodeConfiguration.disableReloadCheckpointAfterSuspend] instead."
|
||||||
|
)
|
||||||
val disableCheckpointChecker: Boolean = Defaults.disableCheckpointChecker,
|
val disableCheckpointChecker: Boolean = Defaults.disableCheckpointChecker,
|
||||||
val allowCompatibilityZone: Boolean = Defaults.allowCompatibilityZone,
|
val allowCompatibilityZone: Boolean = Defaults.allowCompatibilityZone,
|
||||||
val djvm: DJVMOptions? = null
|
val djvm: DJVMOptions? = null
|
||||||
@ -140,10 +146,6 @@ data class DJVMOptions(
|
|||||||
val cordaSource: List<String>
|
val cordaSource: List<String>
|
||||||
)
|
)
|
||||||
|
|
||||||
fun NodeConfiguration.shouldCheckCheckpoints(): Boolean {
|
|
||||||
return this.devMode && this.devModeOptions?.disableCheckpointChecker != true
|
|
||||||
}
|
|
||||||
|
|
||||||
fun NodeConfiguration.shouldStartSSHDaemon() = this.sshd != null
|
fun NodeConfiguration.shouldStartSSHDaemon() = this.sshd != null
|
||||||
fun NodeConfiguration.shouldStartLocalShell() = !this.noLocalShell && System.console() != null && this.devMode
|
fun NodeConfiguration.shouldStartLocalShell() = !this.noLocalShell && System.console() != null && this.devMode
|
||||||
fun NodeConfiguration.shouldInitCrashShell() = shouldStartLocalShell() || shouldStartSSHDaemon()
|
fun NodeConfiguration.shouldInitCrashShell() = shouldStartLocalShell() || shouldStartSSHDaemon()
|
||||||
|
@ -83,7 +83,9 @@ data class NodeConfigurationImpl(
|
|||||||
override val blacklistedAttachmentSigningKeys: List<String> = Defaults.blacklistedAttachmentSigningKeys,
|
override val blacklistedAttachmentSigningKeys: List<String> = Defaults.blacklistedAttachmentSigningKeys,
|
||||||
override val configurationWithOptions: ConfigurationWithOptions,
|
override val configurationWithOptions: ConfigurationWithOptions,
|
||||||
override val flowExternalOperationThreadPoolSize: Int = Defaults.flowExternalOperationThreadPoolSize,
|
override val flowExternalOperationThreadPoolSize: Int = Defaults.flowExternalOperationThreadPoolSize,
|
||||||
override val quasarExcludePackages: List<String> = Defaults.quasarExcludePackages
|
override val quasarExcludePackages: List<String> = Defaults.quasarExcludePackages,
|
||||||
|
override val reloadCheckpointAfterSuspend: Boolean = Defaults.reloadCheckpointAfterSuspend
|
||||||
|
|
||||||
) : NodeConfiguration {
|
) : NodeConfiguration {
|
||||||
internal object Defaults {
|
internal object Defaults {
|
||||||
val jmxMonitoringHttpPort: Int? = null
|
val jmxMonitoringHttpPort: Int? = null
|
||||||
@ -122,6 +124,7 @@ data class NodeConfigurationImpl(
|
|||||||
val blacklistedAttachmentSigningKeys: List<String> = emptyList()
|
val blacklistedAttachmentSigningKeys: List<String> = emptyList()
|
||||||
const val flowExternalOperationThreadPoolSize: Int = 1
|
const val flowExternalOperationThreadPoolSize: Int = 1
|
||||||
val quasarExcludePackages: List<String> = emptyList()
|
val quasarExcludePackages: List<String> = emptyList()
|
||||||
|
val reloadCheckpointAfterSuspend: Boolean = System.getProperty("reloadCheckpointAfterSuspend", "false")!!.toBoolean()
|
||||||
|
|
||||||
fun cordappsDirectories(baseDirectory: Path) = listOf(baseDirectory / CORDAPPS_DIR_NAME_DEFAULT)
|
fun cordappsDirectories(baseDirectory: Path) = listOf(baseDirectory / CORDAPPS_DIR_NAME_DEFAULT)
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ import net.corda.common.validation.internal.Validated.Companion.invalid
|
|||||||
import net.corda.common.validation.internal.Validated.Companion.valid
|
import net.corda.common.validation.internal.Validated.Companion.valid
|
||||||
import net.corda.node.services.config.*
|
import net.corda.node.services.config.*
|
||||||
import net.corda.node.services.config.NodeConfigurationImpl.Defaults
|
import net.corda.node.services.config.NodeConfigurationImpl.Defaults
|
||||||
|
import net.corda.node.services.config.NodeConfigurationImpl.Defaults.reloadCheckpointAfterSuspend
|
||||||
import net.corda.node.services.config.schema.parsers.*
|
import net.corda.node.services.config.schema.parsers.*
|
||||||
|
|
||||||
internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfiguration>("NodeConfiguration") {
|
internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfiguration>("NodeConfiguration") {
|
||||||
@ -66,6 +67,7 @@ internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfig
|
|||||||
.withDefaultValue(Defaults.networkParameterAcceptanceSettings)
|
.withDefaultValue(Defaults.networkParameterAcceptanceSettings)
|
||||||
private val flowExternalOperationThreadPoolSize by int().optional().withDefaultValue(Defaults.flowExternalOperationThreadPoolSize)
|
private val flowExternalOperationThreadPoolSize by int().optional().withDefaultValue(Defaults.flowExternalOperationThreadPoolSize)
|
||||||
private val quasarExcludePackages by string().list().optional().withDefaultValue(Defaults.quasarExcludePackages)
|
private val quasarExcludePackages by string().list().optional().withDefaultValue(Defaults.quasarExcludePackages)
|
||||||
|
private val reloadCheckpointAfterSuspend by boolean().optional().withDefaultValue(Defaults.reloadCheckpointAfterSuspend)
|
||||||
@Suppress("unused")
|
@Suppress("unused")
|
||||||
private val custom by nestedObject().optional()
|
private val custom by nestedObject().optional()
|
||||||
@Suppress("unused")
|
@Suppress("unused")
|
||||||
@ -133,7 +135,8 @@ internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfig
|
|||||||
networkParameterAcceptanceSettings = config[networkParameterAcceptanceSettings],
|
networkParameterAcceptanceSettings = config[networkParameterAcceptanceSettings],
|
||||||
configurationWithOptions = ConfigurationWithOptions(configuration, Configuration.Options.defaults),
|
configurationWithOptions = ConfigurationWithOptions(configuration, Configuration.Options.defaults),
|
||||||
flowExternalOperationThreadPoolSize = config[flowExternalOperationThreadPoolSize],
|
flowExternalOperationThreadPoolSize = config[flowExternalOperationThreadPoolSize],
|
||||||
quasarExcludePackages = config[quasarExcludePackages]
|
quasarExcludePackages = config[quasarExcludePackages],
|
||||||
|
reloadCheckpointAfterSuspend = config[reloadCheckpointAfterSuspend]
|
||||||
))
|
))
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
return when (e) {
|
return when (e) {
|
||||||
|
@ -2,6 +2,7 @@ package net.corda.node.services.network
|
|||||||
|
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.crypto.SignedData
|
import net.corda.core.crypto.SignedData
|
||||||
|
import net.corda.core.crypto.sha256
|
||||||
import net.corda.core.internal.openHttpConnection
|
import net.corda.core.internal.openHttpConnection
|
||||||
import net.corda.core.internal.post
|
import net.corda.core.internal.post
|
||||||
import net.corda.core.internal.responseAs
|
import net.corda.core.internal.responseAs
|
||||||
@ -13,6 +14,7 @@ import net.corda.core.utilities.seconds
|
|||||||
import net.corda.core.utilities.trace
|
import net.corda.core.utilities.trace
|
||||||
import net.corda.node.VersionInfo
|
import net.corda.node.VersionInfo
|
||||||
import net.corda.node.utilities.registration.cacheControl
|
import net.corda.node.utilities.registration.cacheControl
|
||||||
|
import net.corda.node.utilities.registration.cordaServerVersion
|
||||||
import net.corda.nodeapi.internal.SignedNodeInfo
|
import net.corda.nodeapi.internal.SignedNodeInfo
|
||||||
import net.corda.nodeapi.internal.network.NetworkMap
|
import net.corda.nodeapi.internal.network.NetworkMap
|
||||||
import net.corda.nodeapi.internal.network.SignedNetworkMap
|
import net.corda.nodeapi.internal.network.SignedNetworkMap
|
||||||
@ -61,8 +63,9 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
|
|||||||
val signedNetworkMap = connection.responseAs<SignedNetworkMap>()
|
val signedNetworkMap = connection.responseAs<SignedNetworkMap>()
|
||||||
val networkMap = signedNetworkMap.verifiedNetworkMapCert(trustRoot)
|
val networkMap = signedNetworkMap.verifiedNetworkMapCert(trustRoot)
|
||||||
val timeout = connection.cacheControl.maxAgeSeconds().seconds
|
val timeout = connection.cacheControl.maxAgeSeconds().seconds
|
||||||
|
val version = connection.cordaServerVersion
|
||||||
logger.trace { "Fetched network map update from $url successfully: $networkMap" }
|
logger.trace { "Fetched network map update from $url successfully: $networkMap" }
|
||||||
return NetworkMapResponse(networkMap, timeout)
|
return NetworkMapResponse(networkMap, timeout, version)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun getNodeInfo(nodeInfoHash: SecureHash): NodeInfo {
|
fun getNodeInfo(nodeInfoHash: SecureHash): NodeInfo {
|
||||||
@ -81,6 +84,23 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
|
|||||||
return networkParameter
|
return networkParameter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun getNodeInfos(): List<NodeInfo> {
|
||||||
|
val url = URL("$networkMapUrl/node-infos")
|
||||||
|
logger.trace { "Fetching node infos from $url." }
|
||||||
|
val verifiedNodeInfo = url.openHttpConnection().responseAs<Pair<SignedNetworkMap, List<SignedNodeInfo>>>()
|
||||||
|
.also {
|
||||||
|
val verifiedNodeInfoHashes = it.first.verifiedNetworkMapCert(trustRoot).nodeInfoHashes
|
||||||
|
val nodeInfoHashes = it.second.map { signedNodeInfo -> signedNodeInfo.verified().serialize().sha256() }
|
||||||
|
require(
|
||||||
|
verifiedNodeInfoHashes.containsAll(nodeInfoHashes) &&
|
||||||
|
verifiedNodeInfoHashes.size == nodeInfoHashes.size
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.second.map { it.verified() }
|
||||||
|
logger.trace { "Fetched node infos successfully. Node Infos size: ${verifiedNodeInfo.size}" }
|
||||||
|
return verifiedNodeInfo
|
||||||
|
}
|
||||||
|
|
||||||
fun myPublicHostname(): String {
|
fun myPublicHostname(): String {
|
||||||
val url = URL("$networkMapUrl/my-hostname")
|
val url = URL("$networkMapUrl/my-hostname")
|
||||||
logger.trace { "Resolving public hostname from '$url'." }
|
logger.trace { "Resolving public hostname from '$url'." }
|
||||||
@ -90,4 +110,4 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data class NetworkMapResponse(val payload: NetworkMap, val cacheMaxAge: Duration)
|
data class NetworkMapResponse(val payload: NetworkMap, val cacheMaxAge: Duration, val serverVersion: String)
|
||||||
|
@ -4,6 +4,7 @@ import com.google.common.util.concurrent.MoreExecutors
|
|||||||
import net.corda.core.CordaRuntimeException
|
import net.corda.core.CordaRuntimeException
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.crypto.SignedData
|
import net.corda.core.crypto.SignedData
|
||||||
|
import net.corda.core.crypto.sha256
|
||||||
import net.corda.core.internal.NetworkParametersStorage
|
import net.corda.core.internal.NetworkParametersStorage
|
||||||
import net.corda.core.internal.VisibleForTesting
|
import net.corda.core.internal.VisibleForTesting
|
||||||
import net.corda.core.internal.copyTo
|
import net.corda.core.internal.copyTo
|
||||||
@ -65,6 +66,7 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
|||||||
companion object {
|
companion object {
|
||||||
private val logger = contextLogger()
|
private val logger = contextLogger()
|
||||||
private val defaultRetryInterval = 1.minutes
|
private val defaultRetryInterval = 1.minutes
|
||||||
|
private const val bulkNodeInfoFetchThreshold = 50
|
||||||
}
|
}
|
||||||
|
|
||||||
private val parametersUpdatesTrack = PublishSubject.create<ParametersUpdateInfo>()
|
private val parametersUpdatesTrack = PublishSubject.create<ParametersUpdateInfo>()
|
||||||
@ -173,17 +175,9 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
|||||||
if (networkMapClient == null) {
|
if (networkMapClient == null) {
|
||||||
throw CordaRuntimeException("Network map cache can be updated only if network map/compatibility zone URL is specified")
|
throw CordaRuntimeException("Network map cache can be updated only if network map/compatibility zone URL is specified")
|
||||||
}
|
}
|
||||||
val (globalNetworkMap, cacheTimeout) = networkMapClient.getNetworkMap()
|
val (globalNetworkMap, cacheTimeout, version) = networkMapClient.getNetworkMap()
|
||||||
globalNetworkMap.parametersUpdate?.let { handleUpdateNetworkParameters(networkMapClient, it) }
|
globalNetworkMap.parametersUpdate?.let { handleUpdateNetworkParameters(networkMapClient, it) }
|
||||||
val additionalHashes = extraNetworkMapKeys.flatMap {
|
val additionalHashes = getPrivateNetworkNodeHashes(version)
|
||||||
try {
|
|
||||||
networkMapClient.getNetworkMap(it).payload.nodeInfoHashes
|
|
||||||
} catch (e: Exception) {
|
|
||||||
// Failure to retrieve one network map using UUID shouldn't stop the whole update.
|
|
||||||
logger.warn("Error encountered when downloading network map with uuid '$it', skipping...", e)
|
|
||||||
emptyList<SecureHash>()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
val allHashesFromNetworkMap = (globalNetworkMap.nodeInfoHashes + additionalHashes).toSet()
|
val allHashesFromNetworkMap = (globalNetworkMap.nodeInfoHashes + additionalHashes).toSet()
|
||||||
if (currentParametersHash != globalNetworkMap.networkParameterHash) {
|
if (currentParametersHash != globalNetworkMap.networkParameterHash) {
|
||||||
exitOnParametersMismatch(globalNetworkMap)
|
exitOnParametersMismatch(globalNetworkMap)
|
||||||
@ -194,6 +188,37 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
|||||||
val allNodeHashes = networkMapCache.allNodeHashes
|
val allNodeHashes = networkMapCache.allNodeHashes
|
||||||
val nodeHashesToBeDeleted = (allNodeHashes - allHashesFromNetworkMap - nodeInfoWatcher.processedNodeInfoHashes)
|
val nodeHashesToBeDeleted = (allNodeHashes - allHashesFromNetworkMap - nodeInfoWatcher.processedNodeInfoHashes)
|
||||||
.filter { it != ourNodeInfoHash }
|
.filter { it != ourNodeInfoHash }
|
||||||
|
// enforce bulk fetch when no other nodes are known or unknown nodes count is less than threshold
|
||||||
|
if (version == "1" || (allNodeHashes.size > 1 && (allHashesFromNetworkMap - allNodeHashes).size < bulkNodeInfoFetchThreshold))
|
||||||
|
updateNodeInfosV1(allHashesFromNetworkMap, allNodeHashes, networkMapClient)
|
||||||
|
else
|
||||||
|
updateNodeInfos(allHashesFromNetworkMap)
|
||||||
|
// NOTE: We remove nodes after any new/updates because updated nodes will have a new hash and, therefore, any
|
||||||
|
// nodes that we can actually pull out of the cache (with the old hashes) should be a truly removed node.
|
||||||
|
nodeHashesToBeDeleted.mapNotNull { networkMapCache.getNodeByHash(it) }.forEach(networkMapCache::removeNode)
|
||||||
|
|
||||||
|
// Mark the network map cache as ready on a successful poll of the HTTP network map, even on the odd chance that
|
||||||
|
// it's empty
|
||||||
|
networkMapCache.nodeReady.set(null)
|
||||||
|
return cacheTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun updateNodeInfos(allHashesFromNetworkMap: Set<SecureHash>) {
|
||||||
|
val networkMapDownloadStartTime = System.currentTimeMillis()
|
||||||
|
val nodeInfos = try {
|
||||||
|
networkMapClient!!.getNodeInfos()
|
||||||
|
} catch (e: Exception) {
|
||||||
|
logger.warn("Error encountered when downloading node infos", e)
|
||||||
|
emptyList<NodeInfo>()
|
||||||
|
}
|
||||||
|
(allHashesFromNetworkMap - nodeInfos.map { it.serialize().sha256() }).forEach {
|
||||||
|
logger.warn("Error encountered when downloading node info '$it', skipping...")
|
||||||
|
}
|
||||||
|
networkMapCache.addOrUpdateNodes(nodeInfos)
|
||||||
|
logger.info("Fetched: ${nodeInfos.size} using 1 bulk request in ${System.currentTimeMillis() - networkMapDownloadStartTime}ms")
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun updateNodeInfosV1(allHashesFromNetworkMap: Set<SecureHash>, allNodeHashes: List<SecureHash>, networkMapClient: NetworkMapClient) {
|
||||||
//at the moment we use a blocking HTTP library - but under the covers, the OS will interleave threads waiting for IO
|
//at the moment we use a blocking HTTP library - but under the covers, the OS will interleave threads waiting for IO
|
||||||
//as HTTP GET is mostly IO bound, use more threads than CPU's
|
//as HTTP GET is mostly IO bound, use more threads than CPU's
|
||||||
//maximum threads to use = 24, as if we did not limit this on large machines it could result in 100's of concurrent requests
|
//maximum threads to use = 24, as if we did not limit this on large machines it could result in 100's of concurrent requests
|
||||||
@ -230,14 +255,25 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
|||||||
executorToUseForInsertionIntoDB.shutdown()
|
executorToUseForInsertionIntoDB.shutdown()
|
||||||
}.getOrThrow()
|
}.getOrThrow()
|
||||||
}
|
}
|
||||||
// NOTE: We remove nodes after any new/updates because updated nodes will have a new hash and, therefore, any
|
}
|
||||||
// nodes that we can actually pull out of the cache (with the old hashes) should be a truly removed node.
|
|
||||||
nodeHashesToBeDeleted.mapNotNull { networkMapCache.getNodeByHash(it) }.forEach(networkMapCache::removeNode)
|
|
||||||
|
|
||||||
// Mark the network map cache as ready on a successful poll of the HTTP network map, even on the odd chance that
|
private fun getPrivateNetworkNodeHashes(version: String): List<SecureHash> {
|
||||||
// it's empty
|
// private networks are not supported by latest versions of Network Map
|
||||||
networkMapCache.nodeReady.set(null)
|
// for compatibility reasons, this call is still present for new nodes that communicate with old Network Map service versions
|
||||||
return cacheTimeout
|
// but can be omitted if we know that the version of the Network Map is recent enough
|
||||||
|
return if (version == "1") {
|
||||||
|
extraNetworkMapKeys.flatMap {
|
||||||
|
try {
|
||||||
|
networkMapClient!!.getNetworkMap(it).payload.nodeInfoHashes
|
||||||
|
} catch (e: Exception) {
|
||||||
|
// Failure to retrieve one network map using UUID shouldn't stop the whole update.
|
||||||
|
logger.warn("Error encountered when downloading network map with uuid '$it', skipping...", e)
|
||||||
|
emptyList<SecureHash>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
emptyList()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun exitOnParametersMismatch(networkMap: NetworkMap) {
|
private fun exitOnParametersMismatch(networkMap: NetworkMap) {
|
||||||
|
@ -63,7 +63,6 @@ class NodeSchemaService(private val extraSchemas: Set<MappedSchema> = emptySet()
|
|||||||
NodeCoreV1)
|
NodeCoreV1)
|
||||||
|
|
||||||
val internalSchemas = requiredSchemas + extraSchemas.filter { schema ->
|
val internalSchemas = requiredSchemas + extraSchemas.filter { schema ->
|
||||||
schema::class.qualifiedName == "net.corda.node.services.transactions.NodeNotarySchemaV1" ||
|
|
||||||
schema::class.qualifiedName?.startsWith("net.corda.notary.") ?: false
|
schema::class.qualifiedName?.startsWith("net.corda.notary.") ?: false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,12 +148,21 @@ sealed class Event {
|
|||||||
data class AsyncOperationThrows(val throwable: Throwable) : Event()
|
data class AsyncOperationThrows(val throwable: Throwable) : Event()
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retry a flow from the last checkpoint, or if there is no checkpoint, restart the flow with the same invocation details.
|
* Retry a flow from its last checkpoint, or if there is no checkpoint, restart the flow with the same invocation details.
|
||||||
*/
|
*/
|
||||||
object RetryFlowFromSafePoint : Event() {
|
object RetryFlowFromSafePoint : Event() {
|
||||||
override fun toString() = "RetryFlowFromSafePoint"
|
override fun toString() = "RetryFlowFromSafePoint"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reload a flow from its last checkpoint, or if there is no checkpoint, restart the flow with the same invocation details.
|
||||||
|
* This is separate from [RetryFlowFromSafePoint] which is used for error handling within the state machine.
|
||||||
|
* [ReloadFlowFromCheckpointAfterSuspend] is only used when [NodeConfiguration.reloadCheckpointAfterSuspend] is true.
|
||||||
|
*/
|
||||||
|
object ReloadFlowFromCheckpointAfterSuspend : Event() {
|
||||||
|
override fun toString() = "ReloadFlowFromCheckpointAfterSuspend"
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Keeps a flow for overnight observation. Overnight observation practically sends the fiber to get suspended,
|
* Keeps a flow for overnight observation. Overnight observation practically sends the fiber to get suspended,
|
||||||
* in [FlowStateMachineImpl.processEventsUntilFlowIsResumed]. Since the fiber's channel will have no more events to process,
|
* in [FlowStateMachineImpl.processEventsUntilFlowIsResumed]. Since the fiber's channel will have no more events to process,
|
||||||
|
@ -19,6 +19,7 @@ import net.corda.core.utilities.contextLogger
|
|||||||
import net.corda.node.services.api.CheckpointStorage
|
import net.corda.node.services.api.CheckpointStorage
|
||||||
import net.corda.node.services.api.ServiceHubInternal
|
import net.corda.node.services.api.ServiceHubInternal
|
||||||
import net.corda.node.services.messaging.DeduplicationHandler
|
import net.corda.node.services.messaging.DeduplicationHandler
|
||||||
|
import net.corda.node.services.statemachine.FlowStateMachineImpl.Companion.currentStateMachine
|
||||||
import net.corda.node.services.statemachine.transitions.StateMachine
|
import net.corda.node.services.statemachine.transitions.StateMachine
|
||||||
import net.corda.node.utilities.isEnabledTimedFlow
|
import net.corda.node.utilities.isEnabledTimedFlow
|
||||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||||
@ -36,21 +37,23 @@ class NonResidentFlow(val runId: StateMachineRunId, val checkpoint: Checkpoint)
|
|||||||
}
|
}
|
||||||
|
|
||||||
class FlowCreator(
|
class FlowCreator(
|
||||||
val checkpointSerializationContext: CheckpointSerializationContext,
|
private val checkpointSerializationContext: CheckpointSerializationContext,
|
||||||
private val checkpointStorage: CheckpointStorage,
|
private val checkpointStorage: CheckpointStorage,
|
||||||
val scheduler: FiberScheduler,
|
private val scheduler: FiberScheduler,
|
||||||
val database: CordaPersistence,
|
private val database: CordaPersistence,
|
||||||
val transitionExecutor: TransitionExecutor,
|
private val transitionExecutor: TransitionExecutor,
|
||||||
val actionExecutor: ActionExecutor,
|
private val actionExecutor: ActionExecutor,
|
||||||
val secureRandom: SecureRandom,
|
private val secureRandom: SecureRandom,
|
||||||
val serviceHub: ServiceHubInternal,
|
private val serviceHub: ServiceHubInternal,
|
||||||
val unfinishedFibers: ReusableLatch,
|
private val unfinishedFibers: ReusableLatch,
|
||||||
val resetCustomTimeout: (StateMachineRunId, Long) -> Unit) {
|
private val resetCustomTimeout: (StateMachineRunId, Long) -> Unit) {
|
||||||
|
|
||||||
companion object {
|
companion object {
|
||||||
private val logger = contextLogger()
|
private val logger = contextLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private val reloadCheckpointAfterSuspend = serviceHub.configuration.reloadCheckpointAfterSuspend
|
||||||
|
|
||||||
fun createFlowFromNonResidentFlow(nonResidentFlow: NonResidentFlow): Flow<*>? {
|
fun createFlowFromNonResidentFlow(nonResidentFlow: NonResidentFlow): Flow<*>? {
|
||||||
// As for paused flows we don't extract the serialized flow state we need to re-extract the checkpoint from the database.
|
// As for paused flows we don't extract the serialized flow state we need to re-extract the checkpoint from the database.
|
||||||
val checkpoint = when (nonResidentFlow.checkpoint.status) {
|
val checkpoint = when (nonResidentFlow.checkpoint.status) {
|
||||||
@ -65,13 +68,23 @@ class FlowCreator(
|
|||||||
return createFlowFromCheckpoint(nonResidentFlow.runId, checkpoint)
|
return createFlowFromCheckpoint(nonResidentFlow.runId, checkpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun createFlowFromCheckpoint(runId: StateMachineRunId, oldCheckpoint: Checkpoint): Flow<*>? {
|
fun createFlowFromCheckpoint(
|
||||||
|
runId: StateMachineRunId,
|
||||||
|
oldCheckpoint: Checkpoint,
|
||||||
|
reloadCheckpointAfterSuspendCount: Int? = null
|
||||||
|
): Flow<*>? {
|
||||||
val checkpoint = oldCheckpoint.copy(status = Checkpoint.FlowStatus.RUNNABLE)
|
val checkpoint = oldCheckpoint.copy(status = Checkpoint.FlowStatus.RUNNABLE)
|
||||||
val fiber = checkpoint.getFiberFromCheckpoint(runId) ?: return null
|
val fiber = checkpoint.getFiberFromCheckpoint(runId) ?: return null
|
||||||
val resultFuture = openFuture<Any?>()
|
val resultFuture = openFuture<Any?>()
|
||||||
fiber.logic.stateMachine = fiber
|
fiber.logic.stateMachine = fiber
|
||||||
verifyFlowLogicIsSuspendable(fiber.logic)
|
verifyFlowLogicIsSuspendable(fiber.logic)
|
||||||
val state = createStateMachineState(checkpoint, fiber, true)
|
val state = createStateMachineState(
|
||||||
|
checkpoint = checkpoint,
|
||||||
|
fiber = fiber,
|
||||||
|
anyCheckpointPersisted = true,
|
||||||
|
reloadCheckpointAfterSuspendCount = reloadCheckpointAfterSuspendCount
|
||||||
|
?: if (reloadCheckpointAfterSuspend) checkpoint.checkpointState.numberOfSuspends else null
|
||||||
|
)
|
||||||
fiber.transientValues = createTransientValues(runId, resultFuture)
|
fiber.transientValues = createTransientValues(runId, resultFuture)
|
||||||
fiber.transientState = state
|
fiber.transientState = state
|
||||||
return Flow(fiber, resultFuture)
|
return Flow(fiber, resultFuture)
|
||||||
@ -108,11 +121,13 @@ class FlowCreator(
|
|||||||
).getOrThrow()
|
).getOrThrow()
|
||||||
|
|
||||||
val state = createStateMachineState(
|
val state = createStateMachineState(
|
||||||
checkpoint,
|
checkpoint = checkpoint,
|
||||||
flowStateMachineImpl,
|
fiber = flowStateMachineImpl,
|
||||||
existingCheckpoint != null,
|
anyCheckpointPersisted = existingCheckpoint != null,
|
||||||
deduplicationHandler,
|
reloadCheckpointAfterSuspendCount = if (reloadCheckpointAfterSuspend) 0 else null,
|
||||||
senderUUID)
|
deduplicationHandler = deduplicationHandler,
|
||||||
|
senderUUID = senderUUID
|
||||||
|
)
|
||||||
flowStateMachineImpl.transientState = state
|
flowStateMachineImpl.transientState = state
|
||||||
return Flow(flowStateMachineImpl, resultFuture)
|
return Flow(flowStateMachineImpl, resultFuture)
|
||||||
}
|
}
|
||||||
@ -125,9 +140,7 @@ class FlowCreator(
|
|||||||
}
|
}
|
||||||
is FlowState.Started -> tryCheckpointDeserialize(this.flowState.frozenFiber, runId) ?: return null
|
is FlowState.Started -> tryCheckpointDeserialize(this.flowState.frozenFiber, runId) ?: return null
|
||||||
// Places calling this function is rely on it to return null if the flow cannot be created from the checkpoint.
|
// Places calling this function is rely on it to return null if the flow cannot be created from the checkpoint.
|
||||||
else -> {
|
else -> null
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,10 +149,18 @@ class FlowCreator(
|
|||||||
return try {
|
return try {
|
||||||
bytes.checkpointDeserialize(context = checkpointSerializationContext)
|
bytes.checkpointDeserialize(context = checkpointSerializationContext)
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
|
if (reloadCheckpointAfterSuspend && currentStateMachine() != null) {
|
||||||
|
logger.error(
|
||||||
|
"Unable to deserialize checkpoint for flow $flowId. [reloadCheckpointAfterSuspend] is turned on, throwing exception",
|
||||||
|
e
|
||||||
|
)
|
||||||
|
throw ReloadFlowFromCheckpointException(e)
|
||||||
|
} else {
|
||||||
logger.error("Unable to deserialize checkpoint for flow $flowId. Something is very wrong and this flow will be ignored.", e)
|
logger.error("Unable to deserialize checkpoint for flow $flowId. Something is very wrong and this flow will be ignored.", e)
|
||||||
null
|
null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private fun verifyFlowLogicIsSuspendable(logic: FlowLogic<Any?>) {
|
private fun verifyFlowLogicIsSuspendable(logic: FlowLogic<Any?>) {
|
||||||
// Quasar requires (in Java 8) that at least the call method be annotated suspendable. Unfortunately, it's
|
// Quasar requires (in Java 8) that at least the call method be annotated suspendable. Unfortunately, it's
|
||||||
@ -169,12 +190,15 @@ class FlowCreator(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Suppress("LongParameterList")
|
||||||
private fun createStateMachineState(
|
private fun createStateMachineState(
|
||||||
checkpoint: Checkpoint,
|
checkpoint: Checkpoint,
|
||||||
fiber: FlowStateMachineImpl<*>,
|
fiber: FlowStateMachineImpl<*>,
|
||||||
anyCheckpointPersisted: Boolean,
|
anyCheckpointPersisted: Boolean,
|
||||||
|
reloadCheckpointAfterSuspendCount: Int?,
|
||||||
deduplicationHandler: DeduplicationHandler? = null,
|
deduplicationHandler: DeduplicationHandler? = null,
|
||||||
senderUUID: String? = null): StateMachineState {
|
senderUUID: String? = null
|
||||||
|
): StateMachineState {
|
||||||
return StateMachineState(
|
return StateMachineState(
|
||||||
checkpoint = checkpoint,
|
checkpoint = checkpoint,
|
||||||
pendingDeduplicationHandlers = deduplicationHandler?.let { listOf(it) } ?: emptyList(),
|
pendingDeduplicationHandlers = deduplicationHandler?.let { listOf(it) } ?: emptyList(),
|
||||||
@ -186,6 +210,8 @@ class FlowCreator(
|
|||||||
isRemoved = false,
|
isRemoved = false,
|
||||||
isKilled = false,
|
isKilled = false,
|
||||||
flowLogic = fiber.logic,
|
flowLogic = fiber.logic,
|
||||||
senderUUID = senderUUID)
|
senderUUID = senderUUID,
|
||||||
|
reloadCheckpointAfterSuspendCount = reloadCheckpointAfterSuspendCount
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -29,6 +29,7 @@ import net.corda.core.internal.DeclaredField
|
|||||||
import net.corda.core.internal.FlowIORequest
|
import net.corda.core.internal.FlowIORequest
|
||||||
import net.corda.core.internal.FlowStateMachine
|
import net.corda.core.internal.FlowStateMachine
|
||||||
import net.corda.core.internal.IdempotentFlow
|
import net.corda.core.internal.IdempotentFlow
|
||||||
|
import net.corda.core.internal.VisibleForTesting
|
||||||
import net.corda.core.internal.concurrent.OpenFuture
|
import net.corda.core.internal.concurrent.OpenFuture
|
||||||
import net.corda.core.internal.isIdempotentFlow
|
import net.corda.core.internal.isIdempotentFlow
|
||||||
import net.corda.core.internal.isRegularFile
|
import net.corda.core.internal.isRegularFile
|
||||||
@ -87,6 +88,9 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
|||||||
private val log: Logger = LoggerFactory.getLogger("net.corda.flow")
|
private val log: Logger = LoggerFactory.getLogger("net.corda.flow")
|
||||||
|
|
||||||
private val SERIALIZER_BLOCKER = Fiber::class.java.getDeclaredField("SERIALIZER_BLOCKER").apply { isAccessible = true }.get(null)
|
private val SERIALIZER_BLOCKER = Fiber::class.java.getDeclaredField("SERIALIZER_BLOCKER").apply { isAccessible = true }.get(null)
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
var onReloadFlowFromCheckpoint: ((id: StateMachineRunId) -> Unit)? = null
|
||||||
}
|
}
|
||||||
|
|
||||||
data class TransientValues(
|
data class TransientValues(
|
||||||
@ -529,6 +533,18 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
|||||||
unpark(SERIALIZER_BLOCKER)
|
unpark(SERIALIZER_BLOCKER)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
transientState.reloadCheckpointAfterSuspendCount?.let { count ->
|
||||||
|
if (count < transientState.checkpoint.checkpointState.numberOfSuspends) {
|
||||||
|
onReloadFlowFromCheckpoint?.invoke(id)
|
||||||
|
processEventImmediately(
|
||||||
|
Event.ReloadFlowFromCheckpointAfterSuspend,
|
||||||
|
isDbTransactionOpenOnEntry = false,
|
||||||
|
isDbTransactionOpenOnExit = false
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return uncheckedCast(processEventsUntilFlowIsResumed(
|
return uncheckedCast(processEventsUntilFlowIsResumed(
|
||||||
isDbTransactionOpenOnEntry = false,
|
isDbTransactionOpenOnEntry = false,
|
||||||
isDbTransactionOpenOnExit = true
|
isDbTransactionOpenOnExit = true
|
||||||
|
@ -30,11 +30,9 @@ import net.corda.core.utilities.debug
|
|||||||
import net.corda.node.internal.InitiatedFlowFactory
|
import net.corda.node.internal.InitiatedFlowFactory
|
||||||
import net.corda.node.services.api.CheckpointStorage
|
import net.corda.node.services.api.CheckpointStorage
|
||||||
import net.corda.node.services.api.ServiceHubInternal
|
import net.corda.node.services.api.ServiceHubInternal
|
||||||
import net.corda.node.services.config.shouldCheckCheckpoints
|
|
||||||
import net.corda.node.services.messaging.DeduplicationHandler
|
import net.corda.node.services.messaging.DeduplicationHandler
|
||||||
|
import net.corda.node.services.statemachine.FlowStateMachineImpl.Companion.currentStateMachine
|
||||||
import net.corda.node.services.statemachine.interceptors.DumpHistoryOnErrorInterceptor
|
import net.corda.node.services.statemachine.interceptors.DumpHistoryOnErrorInterceptor
|
||||||
import net.corda.node.services.statemachine.interceptors.FiberDeserializationChecker
|
|
||||||
import net.corda.node.services.statemachine.interceptors.FiberDeserializationCheckingInterceptor
|
|
||||||
import net.corda.node.services.statemachine.interceptors.HospitalisingInterceptor
|
import net.corda.node.services.statemachine.interceptors.HospitalisingInterceptor
|
||||||
import net.corda.node.services.statemachine.interceptors.PrintingInterceptor
|
import net.corda.node.services.statemachine.interceptors.PrintingInterceptor
|
||||||
import net.corda.node.utilities.AffinityExecutor
|
import net.corda.node.utilities.AffinityExecutor
|
||||||
@ -89,7 +87,6 @@ internal class SingleThreadedStateMachineManager(
|
|||||||
private val flowMessaging: FlowMessaging = FlowMessagingImpl(serviceHub)
|
private val flowMessaging: FlowMessaging = FlowMessagingImpl(serviceHub)
|
||||||
private val actionFutureExecutor = ActionFutureExecutor(innerState, serviceHub, scheduledFutureExecutor)
|
private val actionFutureExecutor = ActionFutureExecutor(innerState, serviceHub, scheduledFutureExecutor)
|
||||||
private val flowTimeoutScheduler = FlowTimeoutScheduler(innerState, scheduledFutureExecutor, serviceHub)
|
private val flowTimeoutScheduler = FlowTimeoutScheduler(innerState, scheduledFutureExecutor, serviceHub)
|
||||||
private val fiberDeserializationChecker = if (serviceHub.configuration.shouldCheckCheckpoints()) FiberDeserializationChecker() else null
|
|
||||||
private val ourSenderUUID = serviceHub.networkService.ourSenderUUID
|
private val ourSenderUUID = serviceHub.networkService.ourSenderUUID
|
||||||
|
|
||||||
private var checkpointSerializationContext: CheckpointSerializationContext? = null
|
private var checkpointSerializationContext: CheckpointSerializationContext? = null
|
||||||
@ -97,6 +94,7 @@ internal class SingleThreadedStateMachineManager(
|
|||||||
|
|
||||||
override val flowHospital: StaffedFlowHospital = makeFlowHospital()
|
override val flowHospital: StaffedFlowHospital = makeFlowHospital()
|
||||||
private val transitionExecutor = makeTransitionExecutor()
|
private val transitionExecutor = makeTransitionExecutor()
|
||||||
|
private val reloadCheckpointAfterSuspend = serviceHub.configuration.reloadCheckpointAfterSuspend
|
||||||
|
|
||||||
override val allStateMachines: List<FlowLogic<*>>
|
override val allStateMachines: List<FlowLogic<*>>
|
||||||
get() = innerState.withLock { flows.values.map { it.fiber.logic } }
|
get() = innerState.withLock { flows.values.map { it.fiber.logic } }
|
||||||
@ -124,7 +122,6 @@ internal class SingleThreadedStateMachineManager(
|
|||||||
)
|
)
|
||||||
this.checkpointSerializationContext = checkpointSerializationContext
|
this.checkpointSerializationContext = checkpointSerializationContext
|
||||||
val actionExecutor = makeActionExecutor(checkpointSerializationContext)
|
val actionExecutor = makeActionExecutor(checkpointSerializationContext)
|
||||||
fiberDeserializationChecker?.start(checkpointSerializationContext)
|
|
||||||
when (startMode) {
|
when (startMode) {
|
||||||
StateMachineManager.StartMode.ExcludingPaused -> {}
|
StateMachineManager.StartMode.ExcludingPaused -> {}
|
||||||
StateMachineManager.StartMode.Safe -> markAllFlowsAsPaused()
|
StateMachineManager.StartMode.Safe -> markAllFlowsAsPaused()
|
||||||
@ -207,10 +204,6 @@ internal class SingleThreadedStateMachineManager(
|
|||||||
// Account for any expected Fibers in a test scenario.
|
// Account for any expected Fibers in a test scenario.
|
||||||
liveFibers.countDown(allowedUnsuspendedFiberCount)
|
liveFibers.countDown(allowedUnsuspendedFiberCount)
|
||||||
liveFibers.await()
|
liveFibers.await()
|
||||||
fiberDeserializationChecker?.let {
|
|
||||||
val foundUnrestorableFibers = it.stop()
|
|
||||||
check(!foundUnrestorableFibers) { "Unrestorable checkpoints were created, please check the logs for details." }
|
|
||||||
}
|
|
||||||
flowHospital.close()
|
flowHospital.close()
|
||||||
scheduledFutureExecutor.shutdown()
|
scheduledFutureExecutor.shutdown()
|
||||||
scheduler.shutdown()
|
scheduler.shutdown()
|
||||||
@ -397,7 +390,7 @@ internal class SingleThreadedStateMachineManager(
|
|||||||
|
|
||||||
val checkpoint = tryDeserializeCheckpoint(serializedCheckpoint, flowId) ?: return
|
val checkpoint = tryDeserializeCheckpoint(serializedCheckpoint, flowId) ?: return
|
||||||
// Resurrect flow
|
// Resurrect flow
|
||||||
flowCreator.createFlowFromCheckpoint(flowId, checkpoint) ?: return
|
flowCreator.createFlowFromCheckpoint(flowId, checkpoint, currentState.reloadCheckpointAfterSuspendCount) ?: return
|
||||||
} else {
|
} else {
|
||||||
// Just flow initiation message
|
// Just flow initiation message
|
||||||
null
|
null
|
||||||
@ -632,10 +625,18 @@ internal class SingleThreadedStateMachineManager(
|
|||||||
return try {
|
return try {
|
||||||
serializedCheckpoint.deserialize(checkpointSerializationContext!!)
|
serializedCheckpoint.deserialize(checkpointSerializationContext!!)
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
|
if (reloadCheckpointAfterSuspend && currentStateMachine() != null) {
|
||||||
|
logger.error(
|
||||||
|
"Unable to deserialize checkpoint for flow $flowId. [reloadCheckpointAfterSuspend] is turned on, throwing exception",
|
||||||
|
e
|
||||||
|
)
|
||||||
|
throw ReloadFlowFromCheckpointException(e)
|
||||||
|
} else {
|
||||||
logger.error("Unable to deserialize checkpoint for flow $flowId. Something is very wrong and this flow will be ignored.", e)
|
logger.error("Unable to deserialize checkpoint for flow $flowId. Something is very wrong and this flow will be ignored.", e)
|
||||||
null
|
null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private fun addAndStartFlow(id: StateMachineRunId, flow: Flow<*>) {
|
private fun addAndStartFlow(id: StateMachineRunId, flow: Flow<*>) {
|
||||||
val checkpoint = flow.fiber.snapshot().checkpoint
|
val checkpoint = flow.fiber.snapshot().checkpoint
|
||||||
@ -700,9 +701,6 @@ internal class SingleThreadedStateMachineManager(
|
|||||||
if (serviceHub.configuration.devMode) {
|
if (serviceHub.configuration.devMode) {
|
||||||
interceptors.add { DumpHistoryOnErrorInterceptor(it) }
|
interceptors.add { DumpHistoryOnErrorInterceptor(it) }
|
||||||
}
|
}
|
||||||
if (serviceHub.configuration.shouldCheckCheckpoints()) {
|
|
||||||
interceptors.add { FiberDeserializationCheckingInterceptor(fiberDeserializationChecker!!, it) }
|
|
||||||
}
|
|
||||||
if (logger.isDebugEnabled) {
|
if (logger.isDebugEnabled) {
|
||||||
interceptors.add { PrintingInterceptor(it) }
|
interceptors.add { PrintingInterceptor(it) }
|
||||||
}
|
}
|
||||||
|
@ -589,6 +589,7 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging,
|
|||||||
return if (newError.mentionsThrowable(StateTransitionException::class.java)) {
|
return if (newError.mentionsThrowable(StateTransitionException::class.java)) {
|
||||||
when {
|
when {
|
||||||
newError.mentionsThrowable(InterruptedException::class.java) -> Diagnosis.TERMINAL
|
newError.mentionsThrowable(InterruptedException::class.java) -> Diagnosis.TERMINAL
|
||||||
|
newError.mentionsThrowable(ReloadFlowFromCheckpointException::class.java) -> Diagnosis.OVERNIGHT_OBSERVATION
|
||||||
newError.mentionsThrowable(AsyncOperationTransitionException::class.java) -> Diagnosis.NOT_MY_SPECIALTY
|
newError.mentionsThrowable(AsyncOperationTransitionException::class.java) -> Diagnosis.NOT_MY_SPECIALTY
|
||||||
history.notDischargedForTheSameThingMoreThan(2, this, currentState) -> Diagnosis.DISCHARGE
|
history.notDischargedForTheSameThingMoreThan(2, this, currentState) -> Diagnosis.DISCHARGE
|
||||||
else -> Diagnosis.OVERNIGHT_OBSERVATION
|
else -> Diagnosis.OVERNIGHT_OBSERVATION
|
||||||
|
@ -59,7 +59,8 @@ data class StateMachineState(
|
|||||||
val isRemoved: Boolean,
|
val isRemoved: Boolean,
|
||||||
@Volatile
|
@Volatile
|
||||||
var isKilled: Boolean,
|
var isKilled: Boolean,
|
||||||
val senderUUID: String?
|
val senderUUID: String?,
|
||||||
|
val reloadCheckpointAfterSuspendCount: Int?
|
||||||
) : KryoSerializable {
|
) : KryoSerializable {
|
||||||
override fun write(kryo: Kryo?, output: Output?) {
|
override fun write(kryo: Kryo?, output: Output?) {
|
||||||
throw IllegalStateException("${StateMachineState::class.qualifiedName} should never be serialized")
|
throw IllegalStateException("${StateMachineState::class.qualifiedName} should never be serialized")
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
package net.corda.node.services.statemachine
|
package net.corda.node.services.statemachine
|
||||||
|
|
||||||
import net.corda.core.CordaException
|
import net.corda.core.CordaRuntimeException
|
||||||
import net.corda.core.serialization.ConstructorForDeserialization
|
import net.corda.core.serialization.ConstructorForDeserialization
|
||||||
|
|
||||||
// CORDA-3353 - These exceptions should not be propagated up to rpc as they suppress the real exceptions
|
// CORDA-3353 - These exceptions should not be propagated up to rpc as they suppress the real exceptions
|
||||||
@ -9,12 +9,17 @@ class StateTransitionException(
|
|||||||
val transitionAction: Action?,
|
val transitionAction: Action?,
|
||||||
val transitionEvent: Event?,
|
val transitionEvent: Event?,
|
||||||
val exception: Exception
|
val exception: Exception
|
||||||
) : CordaException(exception.message, exception) {
|
) : CordaRuntimeException(exception.message, exception) {
|
||||||
|
|
||||||
@ConstructorForDeserialization
|
@ConstructorForDeserialization
|
||||||
constructor(exception: Exception): this(null, null, exception)
|
constructor(exception: Exception): this(null, null, exception)
|
||||||
}
|
}
|
||||||
|
|
||||||
class AsyncOperationTransitionException(exception: Exception) : CordaException(exception.message, exception)
|
class AsyncOperationTransitionException(exception: Exception) : CordaRuntimeException(exception.message, exception)
|
||||||
|
|
||||||
class ErrorStateTransitionException(val exception: Exception) : CordaException(exception.message, exception)
|
class ErrorStateTransitionException(val exception: Exception) : CordaRuntimeException(exception.message, exception)
|
||||||
|
|
||||||
|
class ReloadFlowFromCheckpointException(cause: Exception) : CordaRuntimeException(
|
||||||
|
"Could not reload flow from checkpoint. This is likely due to a discrepancy " +
|
||||||
|
"between the serialization and deserialization of an object in the flow's checkpoint", cause
|
||||||
|
)
|
@ -1,101 +0,0 @@
|
|||||||
package net.corda.node.services.statemachine.interceptors
|
|
||||||
|
|
||||||
import co.paralleluniverse.fibers.Suspendable
|
|
||||||
import net.corda.core.serialization.SerializedBytes
|
|
||||||
import net.corda.core.serialization.internal.CheckpointSerializationContext
|
|
||||||
import net.corda.core.serialization.internal.checkpointDeserialize
|
|
||||||
import net.corda.core.utilities.contextLogger
|
|
||||||
import net.corda.node.services.statemachine.ActionExecutor
|
|
||||||
import net.corda.node.services.statemachine.Event
|
|
||||||
import net.corda.node.services.statemachine.FlowFiber
|
|
||||||
import net.corda.node.services.statemachine.FlowState
|
|
||||||
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
|
||||||
import net.corda.node.services.statemachine.StateMachineState
|
|
||||||
import net.corda.node.services.statemachine.TransitionExecutor
|
|
||||||
import net.corda.node.services.statemachine.transitions.FlowContinuation
|
|
||||||
import net.corda.node.services.statemachine.transitions.TransitionResult
|
|
||||||
import java.util.concurrent.LinkedBlockingQueue
|
|
||||||
import kotlin.concurrent.thread
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This interceptor checks whether a checkpointed fiber state can be deserialised in a separate thread.
|
|
||||||
*/
|
|
||||||
class FiberDeserializationCheckingInterceptor(
|
|
||||||
val fiberDeserializationChecker: FiberDeserializationChecker,
|
|
||||||
val delegate: TransitionExecutor
|
|
||||||
) : TransitionExecutor {
|
|
||||||
|
|
||||||
@Suspendable
|
|
||||||
override fun executeTransition(
|
|
||||||
fiber: FlowFiber,
|
|
||||||
previousState: StateMachineState,
|
|
||||||
event: Event,
|
|
||||||
transition: TransitionResult,
|
|
||||||
actionExecutor: ActionExecutor
|
|
||||||
): Pair<FlowContinuation, StateMachineState> {
|
|
||||||
val (continuation, nextState) = delegate.executeTransition(fiber, previousState, event, transition, actionExecutor)
|
|
||||||
val previousFlowState = previousState.checkpoint.flowState
|
|
||||||
val nextFlowState = nextState.checkpoint.flowState
|
|
||||||
if (nextFlowState is FlowState.Started) {
|
|
||||||
if (previousFlowState !is FlowState.Started || previousFlowState.frozenFiber != nextFlowState.frozenFiber) {
|
|
||||||
fiberDeserializationChecker.submitCheck(nextFlowState.frozenFiber)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Pair(continuation, nextState)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A fiber deserialisation checker thread. It checks the queued up serialised checkpoints to see if they can be
|
|
||||||
* deserialised. This is only run in development mode to allow detecting of corrupt serialised checkpoints before they
|
|
||||||
* are actually used.
|
|
||||||
*/
|
|
||||||
class FiberDeserializationChecker {
|
|
||||||
companion object {
|
|
||||||
val log = contextLogger()
|
|
||||||
}
|
|
||||||
|
|
||||||
private sealed class Job {
|
|
||||||
class Check(val serializedFiber: SerializedBytes<FlowStateMachineImpl<*>>) : Job()
|
|
||||||
object Finish : Job()
|
|
||||||
}
|
|
||||||
|
|
||||||
private var checkerThread: Thread? = null
|
|
||||||
private val jobQueue = LinkedBlockingQueue<Job>()
|
|
||||||
private var foundUnrestorableFibers: Boolean = false
|
|
||||||
|
|
||||||
fun start(checkpointSerializationContext: CheckpointSerializationContext) {
|
|
||||||
require(checkerThread == null){"Checking thread must not already be started"}
|
|
||||||
checkerThread = thread(name = "FiberDeserializationChecker") {
|
|
||||||
while (true) {
|
|
||||||
val job = jobQueue.take()
|
|
||||||
when (job) {
|
|
||||||
is Job.Check -> {
|
|
||||||
try {
|
|
||||||
job.serializedFiber.checkpointDeserialize(context = checkpointSerializationContext)
|
|
||||||
} catch (exception: Exception) {
|
|
||||||
log.error("Encountered unrestorable checkpoint!", exception)
|
|
||||||
foundUnrestorableFibers = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Job.Finish -> {
|
|
||||||
return@thread
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fun submitCheck(serializedFiber: SerializedBytes<FlowStateMachineImpl<*>>) {
|
|
||||||
jobQueue.add(Job.Check(serializedFiber))
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns true if some unrestorable checkpoints were encountered, false otherwise
|
|
||||||
*/
|
|
||||||
fun stop(): Boolean {
|
|
||||||
jobQueue.add(Job.Finish)
|
|
||||||
checkerThread?.join()
|
|
||||||
return foundUnrestorableFibers
|
|
||||||
}
|
|
||||||
}
|
|
@ -58,7 +58,8 @@ class TopLevelTransition(
|
|||||||
is Event.InitiateFlow -> initiateFlowTransition(event)
|
is Event.InitiateFlow -> initiateFlowTransition(event)
|
||||||
is Event.AsyncOperationCompletion -> asyncOperationCompletionTransition(event)
|
is Event.AsyncOperationCompletion -> asyncOperationCompletionTransition(event)
|
||||||
is Event.AsyncOperationThrows -> asyncOperationThrowsTransition(event)
|
is Event.AsyncOperationThrows -> asyncOperationThrowsTransition(event)
|
||||||
is Event.RetryFlowFromSafePoint -> retryFlowFromSafePointTransition(startingState)
|
is Event.RetryFlowFromSafePoint -> retryFlowFromSafePointTransition()
|
||||||
|
is Event.ReloadFlowFromCheckpointAfterSuspend -> reloadFlowFromCheckpointAfterSuspendTransition()
|
||||||
is Event.OvernightObservation -> overnightObservationTransition()
|
is Event.OvernightObservation -> overnightObservationTransition()
|
||||||
is Event.WakeUpFromSleep -> wakeUpFromSleepTransition()
|
is Event.WakeUpFromSleep -> wakeUpFromSleepTransition()
|
||||||
}
|
}
|
||||||
@ -315,10 +316,18 @@ class TopLevelTransition(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun retryFlowFromSafePointTransition(startingState: StateMachineState): TransitionResult {
|
private fun retryFlowFromSafePointTransition(): TransitionResult {
|
||||||
return builder {
|
return builder {
|
||||||
// Need to create a flow from the prior checkpoint or flow initiation.
|
// Need to create a flow from the prior checkpoint or flow initiation.
|
||||||
actions.add(Action.RetryFlowFromSafePoint(startingState))
|
actions.add(Action.RetryFlowFromSafePoint(currentState))
|
||||||
|
FlowContinuation.Abort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun reloadFlowFromCheckpointAfterSuspendTransition(): TransitionResult {
|
||||||
|
return builder {
|
||||||
|
currentState = currentState.copy(reloadCheckpointAfterSuspendCount = currentState.reloadCheckpointAfterSuspendCount!! + 1)
|
||||||
|
actions.add(Action.RetryFlowFromSafePoint(currentState))
|
||||||
FlowContinuation.Abort
|
FlowContinuation.Abort
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,49 +0,0 @@
|
|||||||
package net.corda.node.services.transactions
|
|
||||||
|
|
||||||
import net.corda.core.flows.FlowSession
|
|
||||||
import net.corda.core.internal.notary.SinglePartyNotaryService
|
|
||||||
import net.corda.core.internal.notary.NotaryServiceFlow
|
|
||||||
import net.corda.core.schemas.MappedSchema
|
|
||||||
import net.corda.core.utilities.seconds
|
|
||||||
import net.corda.node.services.api.ServiceHubInternal
|
|
||||||
import java.security.PublicKey
|
|
||||||
|
|
||||||
/** An embedded notary service that uses the node's database to store committed states. */
|
|
||||||
class SimpleNotaryService(override val services: ServiceHubInternal, override val notaryIdentityKey: PublicKey) : SinglePartyNotaryService() {
|
|
||||||
private val notaryConfig = services.configuration.notary
|
|
||||||
?: throw IllegalArgumentException("Failed to register ${this::class.java}: notary configuration not present")
|
|
||||||
|
|
||||||
init {
|
|
||||||
val mode = if (notaryConfig.validating) "validating" else "non-validating"
|
|
||||||
log.info("Starting notary in $mode mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
override val uniquenessProvider = PersistentUniquenessProvider(
|
|
||||||
services.clock,
|
|
||||||
services.database,
|
|
||||||
services.cacheFactory,
|
|
||||||
::signTransaction)
|
|
||||||
|
|
||||||
override fun createServiceFlow(otherPartySession: FlowSession): NotaryServiceFlow {
|
|
||||||
return if (notaryConfig.validating) {
|
|
||||||
ValidatingNotaryFlow(otherPartySession, this, notaryConfig.etaMessageThresholdSeconds.seconds)
|
|
||||||
} else {
|
|
||||||
NonValidatingNotaryFlow(otherPartySession, this, notaryConfig.etaMessageThresholdSeconds.seconds)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun start() {}
|
|
||||||
override fun stop() {}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Entities used by a Notary
|
|
||||||
object NodeNotarySchema
|
|
||||||
|
|
||||||
object NodeNotarySchemaV1 : MappedSchema(schemaFamily = NodeNotarySchema.javaClass, version = 1,
|
|
||||||
mappedTypes = listOf(PersistentUniquenessProvider.BaseComittedState::class.java,
|
|
||||||
PersistentUniquenessProvider.Request::class.java,
|
|
||||||
PersistentUniquenessProvider.CommittedState::class.java,
|
|
||||||
PersistentUniquenessProvider.CommittedTransaction::class.java
|
|
||||||
)) {
|
|
||||||
override val migrationResource = "node-notary.changelog-master"
|
|
||||||
}
|
|
@ -9,10 +9,10 @@ import net.corda.node.VersionInfo
|
|||||||
import net.corda.node.internal.cordapp.VirtualCordapp
|
import net.corda.node.internal.cordapp.VirtualCordapp
|
||||||
import net.corda.node.services.api.ServiceHubInternal
|
import net.corda.node.services.api.ServiceHubInternal
|
||||||
import net.corda.node.services.config.NotaryConfig
|
import net.corda.node.services.config.NotaryConfig
|
||||||
import net.corda.node.services.transactions.SimpleNotaryService
|
|
||||||
import net.corda.nodeapi.internal.cordapp.CordappLoader
|
import net.corda.nodeapi.internal.cordapp.CordappLoader
|
||||||
import net.corda.notary.experimental.bftsmart.BFTSmartNotaryService
|
import net.corda.notary.experimental.bftsmart.BFTSmartNotaryService
|
||||||
import net.corda.notary.experimental.raft.RaftNotaryService
|
import net.corda.notary.experimental.raft.RaftNotaryService
|
||||||
|
import net.corda.notary.jpa.JPANotaryService
|
||||||
import java.lang.reflect.InvocationTargetException
|
import java.lang.reflect.InvocationTargetException
|
||||||
import java.security.PublicKey
|
import java.security.PublicKey
|
||||||
|
|
||||||
@ -44,8 +44,8 @@ class NotaryLoader(
|
|||||||
RaftNotaryService::class.java
|
RaftNotaryService::class.java
|
||||||
}
|
}
|
||||||
else -> {
|
else -> {
|
||||||
builtInNotary = VirtualCordapp.generateSimpleNotary(versionInfo)
|
builtInNotary = VirtualCordapp.generateJPANotary(versionInfo)
|
||||||
SimpleNotaryService::class.java
|
JPANotaryService::class.java
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -69,3 +69,8 @@ val HttpURLConnection.cacheControl: CacheControl
|
|||||||
get() {
|
get() {
|
||||||
return CacheControl.parse(Headers.of(headerFields.filterKeys { it != null }.mapValues { it.value[0] }))
|
return CacheControl.parse(Headers.of(headerFields.filterKeys { it != null }.mapValues { it.value[0] }))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
val HttpURLConnection.cordaServerVersion: String
|
||||||
|
get() {
|
||||||
|
return headerFields["X-Corda-Server-Version"]?.singleOrNull() ?: "1"
|
||||||
|
}
|
54
node/src/main/kotlin/net/corda/notary/common/BatchSigning.kt
Normal file
54
node/src/main/kotlin/net/corda/notary/common/BatchSigning.kt
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package net.corda.notary.common
|
||||||
|
|
||||||
|
import net.corda.core.crypto.Crypto
|
||||||
|
import net.corda.core.crypto.MerkleTree
|
||||||
|
import net.corda.core.crypto.PartialMerkleTree
|
||||||
|
import net.corda.core.crypto.SecureHash
|
||||||
|
import net.corda.core.crypto.SignableData
|
||||||
|
import net.corda.core.crypto.SignatureMetadata
|
||||||
|
import net.corda.core.crypto.TransactionSignature
|
||||||
|
import net.corda.core.crypto.sha256
|
||||||
|
import net.corda.core.flows.NotaryError
|
||||||
|
import net.corda.core.node.ServiceHub
|
||||||
|
import java.security.PublicKey
|
||||||
|
|
||||||
|
typealias BatchSigningFunction = (Iterable<SecureHash>) -> BatchSignature
|
||||||
|
|
||||||
|
/** Generates a signature over the bach of [txIds]. */
|
||||||
|
fun signBatch(
|
||||||
|
txIds: Iterable<SecureHash>,
|
||||||
|
notaryIdentityKey: PublicKey,
|
||||||
|
services: ServiceHub
|
||||||
|
): BatchSignature {
|
||||||
|
val merkleTree = MerkleTree.getMerkleTree(txIds.map { it.sha256() })
|
||||||
|
val merkleTreeRoot = merkleTree.hash
|
||||||
|
val signableData = SignableData(
|
||||||
|
merkleTreeRoot,
|
||||||
|
SignatureMetadata(
|
||||||
|
services.myInfo.platformVersion,
|
||||||
|
Crypto.findSignatureScheme(notaryIdentityKey).schemeNumberID
|
||||||
|
)
|
||||||
|
)
|
||||||
|
val sig = services.keyManagementService.sign(signableData, notaryIdentityKey)
|
||||||
|
return BatchSignature(sig, merkleTree)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** The outcome of just committing a transaction. */
|
||||||
|
sealed class InternalResult {
|
||||||
|
object Success : InternalResult()
|
||||||
|
data class Failure(val error: NotaryError) : InternalResult()
|
||||||
|
}
|
||||||
|
|
||||||
|
data class BatchSignature(
|
||||||
|
val rootSignature: TransactionSignature,
|
||||||
|
val fullMerkleTree: MerkleTree) {
|
||||||
|
/** Extracts a signature with a partial Merkle tree for the specified leaf in the batch signature. */
|
||||||
|
fun forParticipant(txId: SecureHash): TransactionSignature {
|
||||||
|
return TransactionSignature(
|
||||||
|
rootSignature.bytes,
|
||||||
|
rootSignature.by,
|
||||||
|
rootSignature.signatureMetadata,
|
||||||
|
PartialMerkleTree.build(fullMerkleTree, listOf(txId.sha256()))
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,9 @@
|
|||||||
|
package net.corda.notary.jpa
|
||||||
|
|
||||||
|
data class JPANotaryConfiguration(
|
||||||
|
val batchSize: Int = 32,
|
||||||
|
val batchTimeoutMs: Long = 200L,
|
||||||
|
val maxInputStates: Int = 2000,
|
||||||
|
val maxDBTransactionRetryCount: Int = 10,
|
||||||
|
val backOffBaseMs: Long = 20L
|
||||||
|
)
|
@ -0,0 +1,55 @@
|
|||||||
|
package net.corda.notary.jpa
|
||||||
|
|
||||||
|
import net.corda.core.crypto.SecureHash
|
||||||
|
import net.corda.core.flows.FlowSession
|
||||||
|
import net.corda.core.internal.notary.NotaryServiceFlow
|
||||||
|
import net.corda.core.internal.notary.SinglePartyNotaryService
|
||||||
|
import net.corda.core.utilities.seconds
|
||||||
|
import net.corda.node.services.api.ServiceHubInternal
|
||||||
|
import net.corda.node.services.transactions.NonValidatingNotaryFlow
|
||||||
|
import net.corda.node.services.transactions.ValidatingNotaryFlow
|
||||||
|
import net.corda.nodeapi.internal.config.parseAs
|
||||||
|
import net.corda.notary.common.signBatch
|
||||||
|
import java.security.PublicKey
|
||||||
|
|
||||||
|
/** Notary service backed by a relational database. */
|
||||||
|
class JPANotaryService(
|
||||||
|
override val services: ServiceHubInternal,
|
||||||
|
override val notaryIdentityKey: PublicKey) : SinglePartyNotaryService() {
|
||||||
|
|
||||||
|
private val notaryConfig = services.configuration.notary
|
||||||
|
?: throw IllegalArgumentException("Failed to register ${this::class.java}: notary configuration not present")
|
||||||
|
|
||||||
|
|
||||||
|
@Suppress("TooGenericExceptionCaught")
|
||||||
|
override val uniquenessProvider = with(services) {
|
||||||
|
val jpaNotaryConfig = try {
|
||||||
|
notaryConfig.extraConfig?.parseAs() ?: JPANotaryConfiguration()
|
||||||
|
} catch (e: Exception) {
|
||||||
|
throw IllegalArgumentException("Failed to register ${JPANotaryService::class.java}: extra notary configuration parameters invalid")
|
||||||
|
}
|
||||||
|
JPAUniquenessProvider(
|
||||||
|
clock,
|
||||||
|
database,
|
||||||
|
jpaNotaryConfig,
|
||||||
|
configuration.myLegalName,
|
||||||
|
::signTransactionBatch
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun signTransactionBatch(txIds: Iterable<SecureHash>)
|
||||||
|
= signBatch(txIds, notaryIdentityKey, services)
|
||||||
|
|
||||||
|
override fun createServiceFlow(otherPartySession: FlowSession): NotaryServiceFlow {
|
||||||
|
return if (notaryConfig.validating) {
|
||||||
|
ValidatingNotaryFlow(otherPartySession, this, notaryConfig.etaMessageThresholdSeconds.seconds)
|
||||||
|
} else NonValidatingNotaryFlow(otherPartySession, this, notaryConfig.etaMessageThresholdSeconds.seconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun start() {
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun stop() {
|
||||||
|
uniquenessProvider.stop()
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,408 @@
|
|||||||
|
package net.corda.notary.jpa
|
||||||
|
|
||||||
|
import com.google.common.collect.Queues
|
||||||
|
import net.corda.core.concurrent.CordaFuture
|
||||||
|
import net.corda.core.contracts.StateRef
|
||||||
|
import net.corda.core.contracts.TimeWindow
|
||||||
|
import net.corda.core.crypto.SecureHash
|
||||||
|
import net.corda.core.crypto.sha256
|
||||||
|
import net.corda.core.flows.NotarisationRequestSignature
|
||||||
|
import net.corda.core.flows.NotaryError
|
||||||
|
import net.corda.core.flows.StateConsumptionDetails
|
||||||
|
import net.corda.core.identity.CordaX500Name
|
||||||
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.OpenFuture
|
||||||
|
import net.corda.core.internal.concurrent.openFuture
|
||||||
|
import net.corda.notary.common.BatchSigningFunction
|
||||||
|
import net.corda.core.internal.notary.NotaryInternalException
|
||||||
|
import net.corda.core.internal.notary.UniquenessProvider
|
||||||
|
import net.corda.core.internal.notary.isConsumedByTheSameTx
|
||||||
|
import net.corda.core.internal.notary.validateTimeWindow
|
||||||
|
import net.corda.core.schemas.PersistentStateRef
|
||||||
|
import net.corda.core.serialization.CordaSerializable
|
||||||
|
import net.corda.core.serialization.SerializationDefaults
|
||||||
|
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||||
|
import net.corda.core.serialization.serialize
|
||||||
|
import net.corda.core.utilities.contextLogger
|
||||||
|
import net.corda.core.utilities.debug
|
||||||
|
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||||
|
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
|
||||||
|
import net.corda.notary.common.InternalResult
|
||||||
|
import net.corda.serialization.internal.CordaSerializationEncoding
|
||||||
|
import org.hibernate.Session
|
||||||
|
import java.sql.SQLException
|
||||||
|
import java.time.Clock
|
||||||
|
import java.time.Instant
|
||||||
|
import java.util.*
|
||||||
|
import java.util.concurrent.LinkedBlockingQueue
|
||||||
|
import java.util.concurrent.TimeUnit
|
||||||
|
import javax.annotation.concurrent.ThreadSafe
|
||||||
|
import javax.persistence.Column
|
||||||
|
import javax.persistence.EmbeddedId
|
||||||
|
import javax.persistence.Entity
|
||||||
|
import javax.persistence.Id
|
||||||
|
import javax.persistence.Lob
|
||||||
|
import javax.persistence.NamedQuery
|
||||||
|
import kotlin.concurrent.thread
|
||||||
|
|
||||||
|
/** A JPA backed Uniqueness provider */
|
||||||
|
@Suppress("MagicNumber") // database column length
|
||||||
|
@ThreadSafe
|
||||||
|
class JPAUniquenessProvider(
|
||||||
|
val clock: Clock,
|
||||||
|
val database: CordaPersistence,
|
||||||
|
val config: JPANotaryConfiguration = JPANotaryConfiguration(),
|
||||||
|
val notaryWorkerName: CordaX500Name,
|
||||||
|
val signBatch: BatchSigningFunction
|
||||||
|
) : UniquenessProvider, SingletonSerializeAsToken() {
|
||||||
|
|
||||||
|
// This is the prefix of the ID in the request log table, to allow running multiple instances that access the
|
||||||
|
// same table.
|
||||||
|
private val instanceId = UUID.randomUUID()
|
||||||
|
|
||||||
|
@Entity
|
||||||
|
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}notary_request_log")
|
||||||
|
@CordaSerializable
|
||||||
|
class Request(
|
||||||
|
@Id
|
||||||
|
@Column(nullable = true, length = 76)
|
||||||
|
var id: String? = null,
|
||||||
|
|
||||||
|
@Column(name = "consuming_transaction_id", nullable = true, length = 64)
|
||||||
|
val consumingTxHash: String?,
|
||||||
|
|
||||||
|
@Column(name = "requesting_party_name", nullable = true, length = 255)
|
||||||
|
var partyName: String?,
|
||||||
|
|
||||||
|
@Lob
|
||||||
|
@Column(name = "request_signature", nullable = false)
|
||||||
|
val requestSignature: ByteArray,
|
||||||
|
|
||||||
|
@Column(name = "request_timestamp", nullable = false)
|
||||||
|
var requestDate: Instant,
|
||||||
|
|
||||||
|
@Column(name = "worker_node_x500_name", nullable = true, length = 255)
|
||||||
|
val workerNodeX500Name: String?
|
||||||
|
)
|
||||||
|
|
||||||
|
private data class CommitRequest(
|
||||||
|
val states: List<StateRef>,
|
||||||
|
val txId: SecureHash,
|
||||||
|
val callerIdentity: Party,
|
||||||
|
val requestSignature: NotarisationRequestSignature,
|
||||||
|
val timeWindow: TimeWindow?,
|
||||||
|
val references: List<StateRef>,
|
||||||
|
val future: OpenFuture<UniquenessProvider.Result>,
|
||||||
|
val requestEntity: Request,
|
||||||
|
val committedStatesEntities: List<CommittedState>)
|
||||||
|
|
||||||
|
@Entity
|
||||||
|
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}notary_committed_states")
|
||||||
|
@NamedQuery(name = "CommittedState.select", query = "SELECT c from JPAUniquenessProvider\$CommittedState c WHERE c.id in :ids")
|
||||||
|
class CommittedState(
|
||||||
|
@EmbeddedId
|
||||||
|
val id: PersistentStateRef,
|
||||||
|
@Column(name = "consuming_transaction_id", nullable = false, length = 64)
|
||||||
|
val consumingTxHash: String)
|
||||||
|
|
||||||
|
@Entity
|
||||||
|
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}notary_committed_txs")
|
||||||
|
class CommittedTransaction(
|
||||||
|
@Id
|
||||||
|
@Column(name = "transaction_id", nullable = false, length = 64)
|
||||||
|
val transactionId: String
|
||||||
|
)
|
||||||
|
|
||||||
|
private val requestQueue = LinkedBlockingQueue<CommitRequest>(requestQueueSize)
|
||||||
|
|
||||||
|
/** A requestEntity processor thread. */
|
||||||
|
private val processorThread = thread(name = "Notary request queue processor", isDaemon = true) {
|
||||||
|
try {
|
||||||
|
val buffer = LinkedList<CommitRequest>()
|
||||||
|
while (!Thread.interrupted()) {
|
||||||
|
val drainedSize = Queues.drain(requestQueue, buffer, config.batchSize, config.batchTimeoutMs, TimeUnit.MILLISECONDS)
|
||||||
|
if (drainedSize == 0) continue
|
||||||
|
processRequests(buffer)
|
||||||
|
buffer.clear()
|
||||||
|
}
|
||||||
|
} catch (_: InterruptedException) {
|
||||||
|
log.debug { "Process interrupted."}
|
||||||
|
}
|
||||||
|
log.debug { "Shutting down with ${requestQueue.size} in-flight requests unprocessed." }
|
||||||
|
}
|
||||||
|
|
||||||
|
fun stop() {
|
||||||
|
processorThread.interrupt()
|
||||||
|
}
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
private const val requestQueueSize = 100_000
|
||||||
|
private const val jdbcBatchSize = 100_000
|
||||||
|
private val log = contextLogger()
|
||||||
|
|
||||||
|
fun encodeStateRef(s: StateRef): PersistentStateRef {
|
||||||
|
return PersistentStateRef(s.txhash.toString(), s.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
fun decodeStateRef(s: PersistentStateRef): StateRef {
|
||||||
|
return StateRef(txhash = SecureHash.parse(s.txId), index = s.index)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates and adds a [CommitRequest] to the requestEntity queue. If the requestEntity queue is full, this method will block
|
||||||
|
* until space is available.
|
||||||
|
*
|
||||||
|
* Returns a future that will complete once the requestEntity is processed, containing the commit [Result].
|
||||||
|
*/
|
||||||
|
override fun commit(
|
||||||
|
states: List<StateRef>,
|
||||||
|
txId: SecureHash,
|
||||||
|
callerIdentity: Party,
|
||||||
|
requestSignature: NotarisationRequestSignature,
|
||||||
|
timeWindow: TimeWindow?,
|
||||||
|
references: List<StateRef>
|
||||||
|
): CordaFuture<UniquenessProvider.Result> {
|
||||||
|
val future = openFuture<UniquenessProvider.Result>()
|
||||||
|
val requestEntities = Request(consumingTxHash = txId.toString(),
|
||||||
|
partyName = callerIdentity.name.toString(),
|
||||||
|
requestSignature = requestSignature.serialize(context = SerializationDefaults.STORAGE_CONTEXT.withEncoding(CordaSerializationEncoding.SNAPPY)).bytes,
|
||||||
|
requestDate = clock.instant(),
|
||||||
|
workerNodeX500Name = notaryWorkerName.toString())
|
||||||
|
val stateEntities = states.map {
|
||||||
|
CommittedState(
|
||||||
|
encodeStateRef(it),
|
||||||
|
txId.toString()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
val request = CommitRequest(states, txId, callerIdentity, requestSignature, timeWindow, references, future, requestEntities, stateEntities)
|
||||||
|
|
||||||
|
requestQueue.put(request)
|
||||||
|
|
||||||
|
return future
|
||||||
|
}
|
||||||
|
|
||||||
|
// Safe up to 100k requests per second.
|
||||||
|
private var nextRequestId = System.currentTimeMillis() * 100
|
||||||
|
|
||||||
|
private fun logRequests(requests: List<CommitRequest>) {
|
||||||
|
database.transaction {
|
||||||
|
for (request in requests) {
|
||||||
|
request.requestEntity.id = "$instanceId:${(nextRequestId++).toString(16)}"
|
||||||
|
session.persist(request.requestEntity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun commitRequests(session: Session, requests: List<CommitRequest>) {
|
||||||
|
for (request in requests) {
|
||||||
|
for (cs in request.committedStatesEntities) {
|
||||||
|
session.persist(cs)
|
||||||
|
}
|
||||||
|
session.persist(CommittedTransaction(request.txId.toString()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun findAlreadyCommitted(session: Session, states: List<StateRef>, references: List<StateRef>): Map<StateRef, StateConsumptionDetails> {
|
||||||
|
val persistentStateRefs = (states + references).map { encodeStateRef(it) }.toSet()
|
||||||
|
val committedStates = mutableListOf<CommittedState>()
|
||||||
|
|
||||||
|
for (idsBatch in persistentStateRefs.chunked(config.maxInputStates)) {
|
||||||
|
@Suppress("UNCHECKED_CAST")
|
||||||
|
val existing = session
|
||||||
|
.createNamedQuery("CommittedState.select")
|
||||||
|
.setParameter("ids", idsBatch)
|
||||||
|
.resultList as List<CommittedState>
|
||||||
|
committedStates.addAll(existing)
|
||||||
|
}
|
||||||
|
|
||||||
|
return committedStates.map {
|
||||||
|
val stateRef = StateRef(txhash = SecureHash.parse(it.id.txId), index = it.id.index)
|
||||||
|
val consumingTxId = SecureHash.parse(it.consumingTxHash)
|
||||||
|
if (stateRef in references) {
|
||||||
|
stateRef to StateConsumptionDetails(consumingTxId.sha256(), type = StateConsumptionDetails.ConsumedStateType.REFERENCE_INPUT_STATE)
|
||||||
|
} else {
|
||||||
|
stateRef to StateConsumptionDetails(consumingTxId.sha256())
|
||||||
|
}
|
||||||
|
}.toMap()
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun<T> withRetry(block: () -> T): T {
|
||||||
|
var retryCount = 0
|
||||||
|
var backOff = config.backOffBaseMs
|
||||||
|
var exceptionCaught: SQLException? = null
|
||||||
|
while (retryCount <= config.maxDBTransactionRetryCount) {
|
||||||
|
try {
|
||||||
|
val res = block()
|
||||||
|
return res
|
||||||
|
} catch (e: SQLException) {
|
||||||
|
retryCount++
|
||||||
|
Thread.sleep(backOff)
|
||||||
|
backOff *= 2
|
||||||
|
exceptionCaught = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw exceptionCaught!!
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun findAllConflicts(session: Session, requests: List<CommitRequest>): MutableMap<StateRef, StateConsumptionDetails> {
|
||||||
|
log.info("Processing notarization requests with ${requests.sumBy { it.states.size }} input states and ${requests.sumBy { it.references.size }} references")
|
||||||
|
|
||||||
|
val allStates = requests.flatMap { it.states }
|
||||||
|
val allReferences = requests.flatMap { it.references }
|
||||||
|
return findAlreadyCommitted(session, allStates, allReferences).toMutableMap()
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun processRequest(
|
||||||
|
session: Session,
|
||||||
|
request: CommitRequest,
|
||||||
|
consumedStates: MutableMap<StateRef, StateConsumptionDetails>,
|
||||||
|
processedTxIds: MutableMap<SecureHash, InternalResult>,
|
||||||
|
toCommit: MutableList<CommitRequest>
|
||||||
|
): InternalResult {
|
||||||
|
val conflicts = (request.states + request.references).mapNotNull {
|
||||||
|
if (consumedStates.containsKey(it)) it to consumedStates[it]!!
|
||||||
|
else null
|
||||||
|
}.toMap()
|
||||||
|
|
||||||
|
return if (conflicts.isNotEmpty()) {
|
||||||
|
handleStateConflicts(request, conflicts, session)
|
||||||
|
} else {
|
||||||
|
handleNoStateConflicts(request, toCommit, consumedStates, processedTxIds, session)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process the [request] given there are conflicting states already present in the DB or current batch.
|
||||||
|
*
|
||||||
|
* To ensure idempotency, if the request's transaction matches a previously consumed transaction then the
|
||||||
|
* same result (success) can be returned without committing it to the DB. Failure is only returned in the
|
||||||
|
* case where the request is not a duplicate of a previously processed request and hence it is a genuine
|
||||||
|
* double spend attempt.
|
||||||
|
*/
|
||||||
|
private fun handleStateConflicts(
|
||||||
|
request: CommitRequest,
|
||||||
|
stateConflicts: Map<StateRef, StateConsumptionDetails>,
|
||||||
|
session: Session
|
||||||
|
): InternalResult {
|
||||||
|
return when {
|
||||||
|
isConsumedByTheSameTx(request.txId.sha256(), stateConflicts) -> {
|
||||||
|
InternalResult.Success
|
||||||
|
}
|
||||||
|
request.states.isEmpty() && isPreviouslyNotarised(session, request.txId) -> {
|
||||||
|
InternalResult.Success
|
||||||
|
}
|
||||||
|
else -> {
|
||||||
|
InternalResult.Failure(NotaryError.Conflict(request.txId, stateConflicts))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process the [request] given there are no conflicting states already present in the DB or current batch.
|
||||||
|
*
|
||||||
|
* This method performs time window validation and adds the request to the commit list if applicable.
|
||||||
|
* It also checks the [processedTxIds] map to ensure that any time-window only duplicates within the batch
|
||||||
|
* are only committed once.
|
||||||
|
*/
|
||||||
|
private fun handleNoStateConflicts(
|
||||||
|
request: CommitRequest,
|
||||||
|
toCommit: MutableList<CommitRequest>,
|
||||||
|
consumedStates: MutableMap<StateRef, StateConsumptionDetails>,
|
||||||
|
processedTxIds: MutableMap<SecureHash, InternalResult>,
|
||||||
|
session: Session
|
||||||
|
): InternalResult {
|
||||||
|
return when {
|
||||||
|
request.states.isEmpty() && isPreviouslyNotarised(session, request.txId) -> {
|
||||||
|
InternalResult.Success
|
||||||
|
}
|
||||||
|
processedTxIds.containsKey(request.txId) -> {
|
||||||
|
processedTxIds[request.txId]!!
|
||||||
|
}
|
||||||
|
else -> {
|
||||||
|
val outsideTimeWindowError = validateTimeWindow(clock.instant(), request.timeWindow)
|
||||||
|
val internalResult = if (outsideTimeWindowError != null) {
|
||||||
|
InternalResult.Failure(outsideTimeWindowError)
|
||||||
|
} else {
|
||||||
|
// Mark states as consumed to capture conflicting transactions in the same batch
|
||||||
|
request.states.forEach {
|
||||||
|
consumedStates[it] = StateConsumptionDetails(request.txId.sha256())
|
||||||
|
}
|
||||||
|
toCommit.add(request)
|
||||||
|
InternalResult.Success
|
||||||
|
}
|
||||||
|
// Store transaction result to capture conflicting time-window only transactions in the same batch
|
||||||
|
processedTxIds[request.txId] = internalResult
|
||||||
|
internalResult
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun isPreviouslyNotarised(session: Session, txId: SecureHash): Boolean {
|
||||||
|
return session.find(CommittedTransaction::class.java, txId.toString()) != null
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suppress("TooGenericExceptionCaught")
|
||||||
|
private fun processRequests(requests: List<CommitRequest>) {
|
||||||
|
try {
|
||||||
|
// Note that there is an additional retry mechanism within the transaction itself.
|
||||||
|
val res = withRetry {
|
||||||
|
database.transaction {
|
||||||
|
val em = session.entityManagerFactory.createEntityManager()
|
||||||
|
em.unwrap(Session::class.java).jdbcBatchSize = jdbcBatchSize
|
||||||
|
|
||||||
|
val toCommit = mutableListOf<CommitRequest>()
|
||||||
|
val consumedStates = findAllConflicts(session, requests)
|
||||||
|
val processedTxIds = mutableMapOf<SecureHash, InternalResult>()
|
||||||
|
|
||||||
|
val results = requests.map { request ->
|
||||||
|
processRequest(session, request, consumedStates, processedTxIds, toCommit)
|
||||||
|
}
|
||||||
|
|
||||||
|
logRequests(requests)
|
||||||
|
commitRequests(session, toCommit)
|
||||||
|
|
||||||
|
results
|
||||||
|
}
|
||||||
|
}
|
||||||
|
completeResponses(requests, res)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
log.warn("Error processing commit requests", e)
|
||||||
|
for (request in requests) {
|
||||||
|
respondWithError(request, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun completeResponses(requests: List<CommitRequest>, results: List<InternalResult>): Int {
|
||||||
|
val zippedResults = requests.zip(results)
|
||||||
|
val successfulRequests = zippedResults
|
||||||
|
.filter { it.second is InternalResult.Success }
|
||||||
|
.map { it.first.txId }
|
||||||
|
.distinct()
|
||||||
|
val signature = if (successfulRequests.isNotEmpty())
|
||||||
|
signBatch(successfulRequests)
|
||||||
|
else null
|
||||||
|
|
||||||
|
var inputStateCount = 0
|
||||||
|
for ((request, result) in zippedResults) {
|
||||||
|
val resultToSet = when {
|
||||||
|
result is InternalResult.Failure -> UniquenessProvider.Result.Failure(result.error)
|
||||||
|
signature != null -> UniquenessProvider.Result.Success(signature.forParticipant(request.txId))
|
||||||
|
else -> throw IllegalStateException("Signature is required but not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
request.future.set(resultToSet)
|
||||||
|
inputStateCount += request.states.size
|
||||||
|
}
|
||||||
|
return inputStateCount
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun respondWithError(request: CommitRequest, exception: Exception) {
|
||||||
|
if (exception is NotaryInternalException) {
|
||||||
|
request.future.set(UniquenessProvider.Result.Failure(exception.error))
|
||||||
|
} else {
|
||||||
|
request.future.setException(NotaryInternalException(NotaryError.General(Exception("Internal service error."))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
18
node/src/main/kotlin/net/corda/notary/jpa/Schema.kt
Normal file
18
node/src/main/kotlin/net/corda/notary/jpa/Schema.kt
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
package net.corda.notary.jpa
|
||||||
|
|
||||||
|
import net.corda.core.schemas.MappedSchema
|
||||||
|
|
||||||
|
object JPANotarySchema
|
||||||
|
|
||||||
|
object JPANotarySchemaV1 : MappedSchema(
|
||||||
|
schemaFamily = JPANotarySchema.javaClass,
|
||||||
|
version = 1,
|
||||||
|
mappedTypes = listOf(
|
||||||
|
JPAUniquenessProvider.CommittedState::class.java,
|
||||||
|
JPAUniquenessProvider.Request::class.java,
|
||||||
|
JPAUniquenessProvider.CommittedTransaction::class.java
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
override val migrationResource: String?
|
||||||
|
get() = "node-notary.changelog-master"
|
||||||
|
}
|
@ -9,5 +9,7 @@
|
|||||||
<include file="migration/node-notary.changelog-v2.xml"/>
|
<include file="migration/node-notary.changelog-v2.xml"/>
|
||||||
<include file="migration/node-notary.changelog-pkey.xml"/>
|
<include file="migration/node-notary.changelog-pkey.xml"/>
|
||||||
<include file="migration/node-notary.changelog-committed-transactions-table.xml" />
|
<include file="migration/node-notary.changelog-committed-transactions-table.xml" />
|
||||||
|
<include file="migration/node-notary.changelog-v3.xml" />
|
||||||
|
<include file="migration/node-notary.changelog-worker-logging.xml" />
|
||||||
|
|
||||||
</databaseChangeLog>
|
</databaseChangeLog>
|
||||||
|
@ -0,0 +1,48 @@
|
|||||||
|
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||||
|
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd">
|
||||||
|
|
||||||
|
<changeSet author="R3.Corda" id="create-notary-committed-transactions-table" logicalFilePath="migration/node-notary.changelog-committed-transactions-table.xml">
|
||||||
|
<createTable tableName="node_notary_committed_txs">
|
||||||
|
<column name="transaction_id" type="NVARCHAR(64)">
|
||||||
|
<constraints nullable="false"/>
|
||||||
|
</column>
|
||||||
|
</createTable>
|
||||||
|
<addPrimaryKey columnNames="transaction_id" constraintName="node_notary_transactions_pkey" tableName="node_notary_committed_txs"/>
|
||||||
|
</changeSet>
|
||||||
|
|
||||||
|
<changeSet id="notary-request-log-change-id-type-oracle" author="R3.Corda">
|
||||||
|
<preConditions onFail="MARK_RAN">
|
||||||
|
<dbms type="oracle"/>
|
||||||
|
</preConditions>
|
||||||
|
<!--
|
||||||
|
For Oracle it's not possible to modify the data type for a column with existing values.
|
||||||
|
So we create a new column with the right type, copy over the values, drop the original one and rename the new one.
|
||||||
|
-->
|
||||||
|
<addColumn tableName="node_notary_request_log">
|
||||||
|
<column name="id_temp" type="NVARCHAR(76)"/>
|
||||||
|
</addColumn>
|
||||||
|
<!-- Copy old values from the table to the new column -->
|
||||||
|
<sql>
|
||||||
|
UPDATE node_notary_request_log SET id_temp = id
|
||||||
|
</sql>
|
||||||
|
<dropPrimaryKey tableName="node_notary_request_log" constraintName="node_notary_request_log_pkey"/>
|
||||||
|
<dropColumn tableName="node_notary_request_log" columnName="id"/>
|
||||||
|
<renameColumn tableName="node_notary_request_log" oldColumnName="id_temp" newColumnName="id"/>
|
||||||
|
<addNotNullConstraint tableName="node_notary_request_log" columnName="id" columnDataType="NVARCHAR(76)"/>
|
||||||
|
<addPrimaryKey columnNames="id" constraintName="node_notary_request_log_pkey" tableName="node_notary_request_log"/>
|
||||||
|
</changeSet>
|
||||||
|
|
||||||
|
<changeSet id="notary-request-log-change-id-type-others" author="R3.Corda">
|
||||||
|
<preConditions onFail="MARK_RAN">
|
||||||
|
<not>
|
||||||
|
<dbms type="oracle"/>
|
||||||
|
</not>
|
||||||
|
</preConditions>
|
||||||
|
<dropPrimaryKey tableName="node_notary_request_log" constraintName="node_notary_request_log_pkey"/>
|
||||||
|
<modifyDataType tableName="node_notary_request_log" columnName="id" newDataType="NVARCHAR(76) NOT NULL"/>
|
||||||
|
<addPrimaryKey columnNames="id" constraintName="node_notary_request_log_pkey" tableName="node_notary_request_log"/>
|
||||||
|
</changeSet>
|
||||||
|
|
||||||
|
</databaseChangeLog>
|
@ -0,0 +1,14 @@
|
|||||||
|
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||||
|
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd">
|
||||||
|
|
||||||
|
<changeSet author="R3.Corda" id="worker-logging">
|
||||||
|
<addColumn tableName="node_notary_request_log">
|
||||||
|
<column name="worker_node_x500_name" type="NVARCHAR(255)">
|
||||||
|
<constraints nullable="true"/>
|
||||||
|
</column>
|
||||||
|
</addColumn>
|
||||||
|
</changeSet>
|
||||||
|
|
||||||
|
</databaseChangeLog>
|
@ -60,14 +60,6 @@ class NodeConfigurationImplTest {
|
|||||||
assertThat(configValidationResult.first()).contains("crlCheckSoftFail")
|
assertThat(configValidationResult.first()).contains("crlCheckSoftFail")
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=3_000)
|
|
||||||
fun `check devModeOptions flag helper`() {
|
|
||||||
assertTrue { configDebugOptions(true, null).shouldCheckCheckpoints() }
|
|
||||||
assertTrue { configDebugOptions(true, DevModeOptions()).shouldCheckCheckpoints() }
|
|
||||||
assertTrue { configDebugOptions(true, DevModeOptions(false)).shouldCheckCheckpoints() }
|
|
||||||
assertFalse { configDebugOptions(true, DevModeOptions(true)).shouldCheckCheckpoints() }
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(timeout=3_000)
|
@Test(timeout=3_000)
|
||||||
fun `check crashShell flags helper`() {
|
fun `check crashShell flags helper`() {
|
||||||
assertFalse { testConfiguration.copy(sshd = null).shouldStartSSHDaemon() }
|
assertFalse { testConfiguration.copy(sshd = null).shouldStartSSHDaemon() }
|
||||||
|
@ -72,6 +72,29 @@ class NetworkMapClientTest {
|
|||||||
assertEquals(nodeInfo2, networkMapClient.getNodeInfo(nodeInfoHash2))
|
assertEquals(nodeInfo2, networkMapClient.getNodeInfo(nodeInfoHash2))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `registered node is added to the network map v2`() {
|
||||||
|
server.version = "2"
|
||||||
|
val (nodeInfo, signedNodeInfo) = createNodeInfoAndSigned(ALICE_NAME)
|
||||||
|
|
||||||
|
networkMapClient.publish(signedNodeInfo)
|
||||||
|
|
||||||
|
val nodeInfoHash = nodeInfo.serialize().sha256()
|
||||||
|
|
||||||
|
assertThat(networkMapClient.getNetworkMap().payload.nodeInfoHashes).containsExactly(nodeInfoHash)
|
||||||
|
assertEquals(nodeInfo, networkMapClient.getNodeInfos().single())
|
||||||
|
|
||||||
|
val (nodeInfo2, signedNodeInfo2) = createNodeInfoAndSigned(BOB_NAME)
|
||||||
|
|
||||||
|
networkMapClient.publish(signedNodeInfo2)
|
||||||
|
|
||||||
|
val nodeInfoHash2 = nodeInfo2.serialize().sha256()
|
||||||
|
assertThat(networkMapClient.getNetworkMap().payload.nodeInfoHashes).containsExactly(nodeInfoHash, nodeInfoHash2)
|
||||||
|
assertEquals(cacheTimeout, networkMapClient.getNetworkMap().cacheMaxAge)
|
||||||
|
assertEquals("2", networkMapClient.getNetworkMap().serverVersion)
|
||||||
|
assertThat(networkMapClient.getNodeInfos()).containsExactlyInAnyOrder(nodeInfo, nodeInfo2)
|
||||||
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout=300_000)
|
||||||
fun `negative test - registered invalid node is added to the network map`() {
|
fun `negative test - registered invalid node is added to the network map`() {
|
||||||
val invalidLongNodeName = CordaX500Name(
|
val invalidLongNodeName = CordaX500Name(
|
||||||
|
@ -3,6 +3,7 @@ package net.corda.node.services.network
|
|||||||
import com.google.common.jimfs.Configuration.unix
|
import com.google.common.jimfs.Configuration.unix
|
||||||
import com.google.common.jimfs.Jimfs
|
import com.google.common.jimfs.Jimfs
|
||||||
import com.nhaarman.mockito_kotlin.any
|
import com.nhaarman.mockito_kotlin.any
|
||||||
|
import com.nhaarman.mockito_kotlin.atLeast
|
||||||
import com.nhaarman.mockito_kotlin.mock
|
import com.nhaarman.mockito_kotlin.mock
|
||||||
import com.nhaarman.mockito_kotlin.never
|
import com.nhaarman.mockito_kotlin.never
|
||||||
import com.nhaarman.mockito_kotlin.times
|
import com.nhaarman.mockito_kotlin.times
|
||||||
@ -10,6 +11,7 @@ import com.nhaarman.mockito_kotlin.verify
|
|||||||
import net.corda.core.crypto.Crypto
|
import net.corda.core.crypto.Crypto
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.crypto.generateKeyPair
|
import net.corda.core.crypto.generateKeyPair
|
||||||
|
import net.corda.core.crypto.sha256
|
||||||
import net.corda.core.crypto.sign
|
import net.corda.core.crypto.sign
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
@ -383,6 +385,75 @@ class NetworkMapUpdaterTest {
|
|||||||
assertEquals(aliceInfo, networkMapClient.getNodeInfo(aliceHash))
|
assertEquals(aliceInfo, networkMapClient.getNodeInfo(aliceHash))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `update nodes is successful for network map supporting bulk operations but with only a few nodes requested`() {
|
||||||
|
server.version = "2"
|
||||||
|
setUpdater()
|
||||||
|
// on first update, bulk request is used
|
||||||
|
val (nodeInfo1, signedNodeInfo1) = createNodeInfoAndSigned("info1")
|
||||||
|
val nodeInfoHash1 = nodeInfo1.serialize().sha256()
|
||||||
|
val (nodeInfo2, signedNodeInfo2) = createNodeInfoAndSigned("info2")
|
||||||
|
val nodeInfoHash2 = nodeInfo2.serialize().sha256()
|
||||||
|
networkMapClient.publish(signedNodeInfo1)
|
||||||
|
networkMapClient.publish(signedNodeInfo2)
|
||||||
|
|
||||||
|
startUpdater()
|
||||||
|
|
||||||
|
Thread.sleep(2L * cacheExpiryMs)
|
||||||
|
verify(networkMapCache, times(1)).addOrUpdateNodes(listOf(nodeInfo1, nodeInfo2))
|
||||||
|
assertThat(networkMapCache.allNodeHashes).containsExactlyInAnyOrder(nodeInfoHash1, nodeInfoHash2)
|
||||||
|
|
||||||
|
// on subsequent updates, single requests are used
|
||||||
|
val (nodeInfo3, signedNodeInfo3) = createNodeInfoAndSigned("info3")
|
||||||
|
val nodeInfoHash3 = nodeInfo3.serialize().sha256()
|
||||||
|
val (nodeInfo4, signedNodeInfo4) = createNodeInfoAndSigned("info4")
|
||||||
|
val nodeInfoHash4 = nodeInfo4.serialize().sha256()
|
||||||
|
networkMapClient.publish(signedNodeInfo3)
|
||||||
|
networkMapClient.publish(signedNodeInfo4)
|
||||||
|
|
||||||
|
Thread.sleep(2L * cacheExpiryMs)
|
||||||
|
verify(networkMapCache, times(1)).addOrUpdateNodes(listOf(nodeInfo3))
|
||||||
|
verify(networkMapCache, times(1)).addOrUpdateNodes(listOf(nodeInfo4))
|
||||||
|
assertThat(networkMapCache.allNodeHashes).containsExactlyInAnyOrder(nodeInfoHash1, nodeInfoHash2, nodeInfoHash3, nodeInfoHash4)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
@SuppressWarnings("SpreadOperator")
|
||||||
|
fun `update nodes is successful for network map supporting bulk operations when high number of nodes is requested`() {
|
||||||
|
server.version = "2"
|
||||||
|
setUpdater()
|
||||||
|
val nodeInfos = (1..51).map { createNodeInfoAndSigned("info$it")
|
||||||
|
.also { nodeInfoAndSigned -> networkMapClient.publish(nodeInfoAndSigned.signed) }
|
||||||
|
.nodeInfo
|
||||||
|
}
|
||||||
|
val nodeInfoHashes = nodeInfos.map { it.serialize().sha256() }
|
||||||
|
|
||||||
|
startUpdater()
|
||||||
|
Thread.sleep(2L * cacheExpiryMs)
|
||||||
|
|
||||||
|
verify(networkMapCache, times(1)).addOrUpdateNodes(nodeInfos)
|
||||||
|
assertThat(networkMapCache.allNodeHashes).containsExactlyInAnyOrder(*(nodeInfoHashes.toTypedArray()))
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
@SuppressWarnings("SpreadOperator")
|
||||||
|
fun `update nodes is successful for network map not supporting bulk operations`() {
|
||||||
|
setUpdater()
|
||||||
|
val nodeInfos = (1..51).map { createNodeInfoAndSigned("info$it")
|
||||||
|
.also { nodeInfoAndSigned -> networkMapClient.publish(nodeInfoAndSigned.signed) }
|
||||||
|
.nodeInfo
|
||||||
|
}
|
||||||
|
val nodeInfoHashes = nodeInfos.map { it.serialize().sha256() }
|
||||||
|
|
||||||
|
startUpdater()
|
||||||
|
Thread.sleep(2L * cacheExpiryMs)
|
||||||
|
|
||||||
|
// we can't be sure about the number of requests (and updates), as it depends on the machine and the threads created
|
||||||
|
// but if they are more than 1 it's enough to deduct that the parallel way was favored
|
||||||
|
verify(networkMapCache, atLeast(2)).addOrUpdateNodes(any())
|
||||||
|
assertThat(networkMapCache.allNodeHashes).containsExactlyInAnyOrder(*(nodeInfoHashes.toTypedArray()))
|
||||||
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout=300_000)
|
||||||
fun `remove node from filesystem deletes it from network map cache`() {
|
fun `remove node from filesystem deletes it from network map cache`() {
|
||||||
setUpdater(netMapClient = null)
|
setUpdater(netMapClient = null)
|
||||||
|
@ -21,6 +21,7 @@ import net.corda.core.flows.StartableByService
|
|||||||
import net.corda.core.flows.StateMachineRunId
|
import net.corda.core.flows.StateMachineRunId
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.PLATFORM_VERSION
|
import net.corda.core.internal.PLATFORM_VERSION
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.internal.uncheckedCast
|
import net.corda.core.internal.uncheckedCast
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.AppServiceHub
|
import net.corda.core.node.AppServiceHub
|
||||||
@ -74,8 +75,10 @@ class FlowMetadataRecordingTest {
|
|||||||
fun `rpc started flows have metadata recorded`() {
|
fun `rpc started flows have metadata recorded`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true)) {
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
var flowId: StateMachineRunId? = null
|
var flowId: StateMachineRunId? = null
|
||||||
var context: InvocationContext? = null
|
var context: InvocationContext? = null
|
||||||
@ -162,8 +165,10 @@ class FlowMetadataRecordingTest {
|
|||||||
fun `rpc started flows have their arguments removed from in-memory checkpoint after zero'th checkpoint`() {
|
fun `rpc started flows have their arguments removed from in-memory checkpoint after zero'th checkpoint`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true)) {
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
var context: InvocationContext? = null
|
var context: InvocationContext? = null
|
||||||
var metadata: DBCheckpointStorage.DBFlowMetadata? = null
|
var metadata: DBCheckpointStorage.DBFlowMetadata? = null
|
||||||
@ -214,8 +219,10 @@ class FlowMetadataRecordingTest {
|
|||||||
fun `initiated flows have metadata recorded`() {
|
fun `initiated flows have metadata recorded`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true)) {
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
var flowId: StateMachineRunId? = null
|
var flowId: StateMachineRunId? = null
|
||||||
var context: InvocationContext? = null
|
var context: InvocationContext? = null
|
||||||
@ -260,8 +267,10 @@ class FlowMetadataRecordingTest {
|
|||||||
fun `service started flows have metadata recorded`() {
|
fun `service started flows have metadata recorded`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true)) {
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
var flowId: StateMachineRunId? = null
|
var flowId: StateMachineRunId? = null
|
||||||
var context: InvocationContext? = null
|
var context: InvocationContext? = null
|
||||||
@ -306,8 +315,10 @@ class FlowMetadataRecordingTest {
|
|||||||
fun `scheduled flows have metadata recorded`() {
|
fun `scheduled flows have metadata recorded`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true)) {
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val lock = Semaphore(0)
|
val lock = Semaphore(0)
|
||||||
|
|
||||||
@ -361,8 +372,10 @@ class FlowMetadataRecordingTest {
|
|||||||
fun `flows have their finish time recorded when completed`() {
|
fun `flows have their finish time recorded when completed`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true)) {
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
var flowId: StateMachineRunId? = null
|
var flowId: StateMachineRunId? = null
|
||||||
var metadata: DBCheckpointStorage.DBFlowMetadata? = null
|
var metadata: DBCheckpointStorage.DBFlowMetadata? = null
|
||||||
|
@ -4,6 +4,7 @@ import com.codahale.metrics.MetricRegistry
|
|||||||
import net.corda.core.contracts.TimeWindow
|
import net.corda.core.contracts.TimeWindow
|
||||||
import net.corda.core.crypto.Crypto
|
import net.corda.core.crypto.Crypto
|
||||||
import net.corda.core.crypto.DigitalSignature
|
import net.corda.core.crypto.DigitalSignature
|
||||||
|
import net.corda.core.crypto.MerkleTree
|
||||||
import net.corda.core.crypto.NullKeys
|
import net.corda.core.crypto.NullKeys
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.crypto.SignableData
|
import net.corda.core.crypto.SignableData
|
||||||
@ -21,9 +22,13 @@ import net.corda.node.services.schema.NodeSchemaService
|
|||||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||||
|
import net.corda.notary.common.BatchSignature
|
||||||
import net.corda.notary.experimental.raft.RaftConfig
|
import net.corda.notary.experimental.raft.RaftConfig
|
||||||
import net.corda.notary.experimental.raft.RaftNotarySchemaV1
|
import net.corda.notary.experimental.raft.RaftNotarySchemaV1
|
||||||
import net.corda.notary.experimental.raft.RaftUniquenessProvider
|
import net.corda.notary.experimental.raft.RaftUniquenessProvider
|
||||||
|
import net.corda.notary.jpa.JPANotaryConfiguration
|
||||||
|
import net.corda.notary.jpa.JPANotarySchemaV1
|
||||||
|
import net.corda.notary.jpa.JPAUniquenessProvider
|
||||||
import net.corda.testing.core.SerializationEnvironmentRule
|
import net.corda.testing.core.SerializationEnvironmentRule
|
||||||
import net.corda.testing.core.TestIdentity
|
import net.corda.testing.core.TestIdentity
|
||||||
import net.corda.testing.core.generateStateRef
|
import net.corda.testing.core.generateStateRef
|
||||||
@ -52,7 +57,7 @@ class UniquenessProviderTests(
|
|||||||
@JvmStatic
|
@JvmStatic
|
||||||
@Parameterized.Parameters(name = "{0}")
|
@Parameterized.Parameters(name = "{0}")
|
||||||
fun data(): Collection<UniquenessProviderFactory> = listOf(
|
fun data(): Collection<UniquenessProviderFactory> = listOf(
|
||||||
PersistentUniquenessProviderFactory(),
|
JPAUniquenessProviderFactory(),
|
||||||
RaftUniquenessProviderFactory()
|
RaftUniquenessProviderFactory()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -599,20 +604,6 @@ interface UniquenessProviderFactory {
|
|||||||
fun cleanUp() {}
|
fun cleanUp() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
class PersistentUniquenessProviderFactory : UniquenessProviderFactory {
|
|
||||||
private var database: CordaPersistence? = null
|
|
||||||
|
|
||||||
override fun create(clock: Clock): UniquenessProvider {
|
|
||||||
database?.close()
|
|
||||||
database = configureDatabase(makeTestDataSourceProperties(), DatabaseConfig(), { null }, { null }, NodeSchemaService(extraSchemas = setOf(NodeNotarySchemaV1)))
|
|
||||||
return PersistentUniquenessProvider(clock, database!!, TestingNamedCacheFactory(), ::signSingle)
|
|
||||||
}
|
|
||||||
|
|
||||||
override fun cleanUp() {
|
|
||||||
database?.close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class RaftUniquenessProviderFactory : UniquenessProviderFactory {
|
class RaftUniquenessProviderFactory : UniquenessProviderFactory {
|
||||||
private var database: CordaPersistence? = null
|
private var database: CordaPersistence? = null
|
||||||
private var provider: RaftUniquenessProvider? = null
|
private var provider: RaftUniquenessProvider? = null
|
||||||
@ -645,6 +636,36 @@ class RaftUniquenessProviderFactory : UniquenessProviderFactory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun signBatch(it: Iterable<SecureHash>): BatchSignature {
|
||||||
|
val root = MerkleTree.getMerkleTree(it.map { it.sha256() })
|
||||||
|
|
||||||
|
val signableMetadata = SignatureMetadata(4, Crypto.findSignatureScheme(pubKey).schemeNumberID)
|
||||||
|
val signature = keyService.sign(SignableData(root.hash, signableMetadata), pubKey)
|
||||||
|
return BatchSignature(signature, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
class JPAUniquenessProviderFactory : UniquenessProviderFactory {
|
||||||
|
private var database: CordaPersistence? = null
|
||||||
|
private val notaryConfig = JPANotaryConfiguration(maxInputStates = 10)
|
||||||
|
private val notaryWorkerName = CordaX500Name.parse("CN=NotaryWorker, O=Corda, L=London, C=GB")
|
||||||
|
|
||||||
|
override fun create(clock: Clock): UniquenessProvider {
|
||||||
|
database?.close()
|
||||||
|
database = configureDatabase(makeTestDataSourceProperties(), DatabaseConfig(), { null }, { null }, NodeSchemaService(extraSchemas = setOf(JPANotarySchemaV1)))
|
||||||
|
return JPAUniquenessProvider(
|
||||||
|
clock,
|
||||||
|
database!!,
|
||||||
|
notaryConfig,
|
||||||
|
notaryWorkerName,
|
||||||
|
::signBatch
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun cleanUp() {
|
||||||
|
database?.close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var ourKeyPair: KeyPair = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)
|
var ourKeyPair: KeyPair = Crypto.generateKeyPair(X509Utilities.DEFAULT_TLS_SIGNATURE_SCHEME)
|
||||||
val keyService = MockKeyManagementService(makeTestIdentityService(), ourKeyPair)
|
val keyService = MockKeyManagementService(makeTestIdentityService(), ourKeyPair)
|
||||||
val pubKey = keyService.freshKey()
|
val pubKey = keyService.freshKey()
|
||||||
|
@ -16,7 +16,9 @@ import net.corda.testing.common.internal.ProjectStructure.projectRootDir
|
|||||||
import net.corda.testing.core.BOB_NAME
|
import net.corda.testing.core.BOB_NAME
|
||||||
import net.corda.testing.core.DUMMY_BANK_A_NAME
|
import net.corda.testing.core.DUMMY_BANK_A_NAME
|
||||||
import net.corda.testing.core.DUMMY_BANK_B_NAME
|
import net.corda.testing.core.DUMMY_BANK_B_NAME
|
||||||
|
import net.corda.testing.core.DUMMY_NOTARY_NAME
|
||||||
import net.corda.testing.http.HttpApi
|
import net.corda.testing.http.HttpApi
|
||||||
|
import net.corda.testing.node.NotarySpec
|
||||||
import net.corda.testing.node.internal.addressMustBeBound
|
import net.corda.testing.node.internal.addressMustBeBound
|
||||||
import net.corda.testing.node.internal.addressMustNotBeBound
|
import net.corda.testing.node.internal.addressMustNotBeBound
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
import org.assertj.core.api.Assertions.assertThat
|
||||||
@ -118,7 +120,7 @@ class DriverTests {
|
|||||||
fun `started node, which is not waited for in the driver, is shutdown when the driver exits`() {
|
fun `started node, which is not waited for in the driver, is shutdown when the driver exits`() {
|
||||||
// First check that the process-id file is created by the node on startup, so that we can be sure our check that
|
// First check that the process-id file is created by the node on startup, so that we can be sure our check that
|
||||||
// it's deleted on shutdown isn't a false-positive.
|
// it's deleted on shutdown isn't a false-positive.
|
||||||
val baseDirectory = driver {
|
val baseDirectory = driver(DriverParameters(notarySpecs = listOf(NotarySpec(DUMMY_NOTARY_NAME, startInProcess = false)))) {
|
||||||
val baseDirectory = defaultNotaryNode.getOrThrow().baseDirectory
|
val baseDirectory = defaultNotaryNode.getOrThrow().baseDirectory
|
||||||
assertThat(baseDirectory / "process-id").exists()
|
assertThat(baseDirectory / "process-id").exists()
|
||||||
baseDirectory
|
baseDirectory
|
||||||
|
@ -26,6 +26,7 @@ import net.corda.testing.node.internal.genericDriver
|
|||||||
import net.corda.testing.node.internal.getTimestampAsDirectoryName
|
import net.corda.testing.node.internal.getTimestampAsDirectoryName
|
||||||
import net.corda.testing.node.internal.newContext
|
import net.corda.testing.node.internal.newContext
|
||||||
import rx.Observable
|
import rx.Observable
|
||||||
|
import java.io.File
|
||||||
import java.nio.file.Path
|
import java.nio.file.Path
|
||||||
import java.nio.file.Paths
|
import java.nio.file.Paths
|
||||||
import java.util.concurrent.atomic.AtomicInteger
|
import java.util.concurrent.atomic.AtomicInteger
|
||||||
@ -66,6 +67,8 @@ interface NodeHandle : AutoCloseable {
|
|||||||
fun stop()
|
fun stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun NodeHandle.logFile(): File = (baseDirectory / "logs").toFile().walk().filter { it.name.startsWith("node-") && it.extension == "log" }.single()
|
||||||
|
|
||||||
/** Interface which represents an out of process node and exposes its process handle. **/
|
/** Interface which represents an out of process node and exposes its process handle. **/
|
||||||
@DoNotImplement
|
@DoNotImplement
|
||||||
interface OutOfProcess : NodeHandle {
|
interface OutOfProcess : NodeHandle {
|
||||||
|
@ -13,24 +13,55 @@ import net.corda.testing.driver.VerifierType
|
|||||||
* @property rpcUsers A list of users able to instigate RPC for this node or cluster of nodes.
|
* @property rpcUsers A list of users able to instigate RPC for this node or cluster of nodes.
|
||||||
* @property verifierType How the notary will verify transactions.
|
* @property verifierType How the notary will verify transactions.
|
||||||
* @property cluster [ClusterSpec] if this is a distributed cluster notary. If null then this is a single-node notary.
|
* @property cluster [ClusterSpec] if this is a distributed cluster notary. If null then this is a single-node notary.
|
||||||
|
* @property startInProcess Should the notary be started in process.
|
||||||
*/
|
*/
|
||||||
data class NotarySpec(
|
data class NotarySpec(
|
||||||
val name: CordaX500Name,
|
val name: CordaX500Name,
|
||||||
val validating: Boolean = true,
|
val validating: Boolean = true,
|
||||||
val rpcUsers: List<User> = emptyList(),
|
val rpcUsers: List<User> = emptyList(),
|
||||||
val verifierType: VerifierType = VerifierType.InMemory,
|
val verifierType: VerifierType = VerifierType.InMemory,
|
||||||
val cluster: ClusterSpec? = null
|
val cluster: ClusterSpec? = null,
|
||||||
|
val startInProcess: Boolean = true
|
||||||
) {
|
) {
|
||||||
|
constructor(name: CordaX500Name,
|
||||||
|
validating: Boolean = true,
|
||||||
|
rpcUsers: List<User> = emptyList(),
|
||||||
|
verifierType: VerifierType = VerifierType.InMemory,
|
||||||
|
cluster: ClusterSpec? = null): this(name, validating, rpcUsers, verifierType, cluster, "512m", true)
|
||||||
|
|
||||||
|
constructor(name: CordaX500Name,
|
||||||
|
validating: Boolean = true,
|
||||||
|
rpcUsers: List<User> = emptyList(),
|
||||||
|
verifierType: VerifierType = VerifierType.InMemory,
|
||||||
|
cluster: ClusterSpec? = null,
|
||||||
|
maximumHeapSize: String): this(name, validating, rpcUsers, verifierType, cluster, maximumHeapSize, true)
|
||||||
|
|
||||||
// These extra fields are handled this way to preserve Kotlin wire compatibility wrt additional parameters with default values.
|
// These extra fields are handled this way to preserve Kotlin wire compatibility wrt additional parameters with default values.
|
||||||
constructor(name: CordaX500Name,
|
constructor(name: CordaX500Name,
|
||||||
validating: Boolean = true,
|
validating: Boolean = true,
|
||||||
rpcUsers: List<User> = emptyList(),
|
rpcUsers: List<User> = emptyList(),
|
||||||
verifierType: VerifierType = VerifierType.InMemory,
|
verifierType: VerifierType = VerifierType.InMemory,
|
||||||
cluster: ClusterSpec? = null,
|
cluster: ClusterSpec? = null,
|
||||||
maximumHeapSize: String = "512m"): this(name, validating, rpcUsers, verifierType, cluster) {
|
maximumHeapSize: String = "512m",
|
||||||
|
startInProcess: Boolean = true): this(name, validating, rpcUsers, verifierType, cluster, startInProcess) {
|
||||||
this.maximumHeapSize = maximumHeapSize
|
this.maximumHeapSize = maximumHeapSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun copy(
|
||||||
|
name: CordaX500Name,
|
||||||
|
validating: Boolean = true,
|
||||||
|
rpcUsers: List<User> = emptyList(),
|
||||||
|
verifierType: VerifierType = VerifierType.InMemory,
|
||||||
|
cluster: ClusterSpec? = null
|
||||||
|
) = this.copy(
|
||||||
|
name = name,
|
||||||
|
validating = validating,
|
||||||
|
rpcUsers = rpcUsers,
|
||||||
|
verifierType = verifierType,
|
||||||
|
cluster = cluster,
|
||||||
|
startInProcess = true
|
||||||
|
)
|
||||||
|
|
||||||
var maximumHeapSize: String = "512m"
|
var maximumHeapSize: String = "512m"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -591,7 +591,11 @@ class DriverDSLImpl(
|
|||||||
return startRegisteredNode(
|
return startRegisteredNode(
|
||||||
config,
|
config,
|
||||||
localNetworkMap,
|
localNetworkMap,
|
||||||
NodeParameters(rpcUsers = spec.rpcUsers, verifierType = spec.verifierType, customOverrides = notaryConfig + customOverrides, maximumHeapSize = spec.maximumHeapSize)
|
NodeParameters(rpcUsers = spec.rpcUsers,
|
||||||
|
verifierType = spec.verifierType,
|
||||||
|
startInSameProcess = spec.startInProcess,
|
||||||
|
customOverrides = notaryConfig + customOverrides,
|
||||||
|
maximumHeapSize = spec.maximumHeapSize)
|
||||||
).map { listOf(it) }
|
).map { listOf(it) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -646,6 +646,7 @@ private fun mockNodeConfiguration(certificatesDirectory: Path): NodeConfiguratio
|
|||||||
doReturn(NetworkParameterAcceptanceSettings()).whenever(it).networkParameterAcceptanceSettings
|
doReturn(NetworkParameterAcceptanceSettings()).whenever(it).networkParameterAcceptanceSettings
|
||||||
doReturn(rigorousMock<ConfigurationWithOptions>()).whenever(it).configurationWithOptions
|
doReturn(rigorousMock<ConfigurationWithOptions>()).whenever(it).configurationWithOptions
|
||||||
doReturn(2).whenever(it).flowExternalOperationThreadPoolSize
|
doReturn(2).whenever(it).flowExternalOperationThreadPoolSize
|
||||||
|
doReturn(false).whenever(it).reloadCheckpointAfterSuspend
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,6 +49,8 @@ class NetworkMapServer(private val pollInterval: Duration,
|
|||||||
private val service = InMemoryNetworkMapService()
|
private val service = InMemoryNetworkMapService()
|
||||||
private var parametersUpdate: ParametersUpdate? = null
|
private var parametersUpdate: ParametersUpdate? = null
|
||||||
private var nextNetworkParameters: NetworkParameters? = null
|
private var nextNetworkParameters: NetworkParameters? = null
|
||||||
|
// version toggle allowing to easily test behaviour of different version without spinning up a whole new server
|
||||||
|
var version: String = "1"
|
||||||
|
|
||||||
init {
|
init {
|
||||||
server = Server(InetSocketAddress(hostAndPort.host, hostAndPort.port)).apply {
|
server = Server(InetSocketAddress(hostAndPort.host, hostAndPort.port)).apply {
|
||||||
@ -171,7 +173,10 @@ class NetworkMapServer(private val pollInterval: Duration,
|
|||||||
private fun networkMapResponse(nodeInfoHashes: List<SecureHash>): Response {
|
private fun networkMapResponse(nodeInfoHashes: List<SecureHash>): Response {
|
||||||
val networkMap = NetworkMap(nodeInfoHashes, signedNetParams.raw.hash, parametersUpdate)
|
val networkMap = NetworkMap(nodeInfoHashes, signedNetParams.raw.hash, parametersUpdate)
|
||||||
val signedNetworkMap = networkMapCertAndKeyPair.sign(networkMap)
|
val signedNetworkMap = networkMapCertAndKeyPair.sign(networkMap)
|
||||||
return Response.ok(signedNetworkMap.serialize().bytes).header("Cache-Control", "max-age=${pollInterval.seconds}").build()
|
return Response.ok(signedNetworkMap.serialize().bytes)
|
||||||
|
.header("Cache-Control", "max-age=${pollInterval.seconds}")
|
||||||
|
.apply { if (version != "1") this.header("X-Corda-Server-Version", version)}
|
||||||
|
.build()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove nodeInfo for testing.
|
// Remove nodeInfo for testing.
|
||||||
@ -205,6 +210,15 @@ class NetworkMapServer(private val pollInterval: Duration,
|
|||||||
}.build()
|
}.build()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@GET
|
||||||
|
@Path("node-infos")
|
||||||
|
@Produces(MediaType.APPLICATION_OCTET_STREAM)
|
||||||
|
fun getNodeInfos(): Response {
|
||||||
|
val networkMap = NetworkMap(nodeInfoMap.keys.toList(), signedNetParams.raw.hash, parametersUpdate)
|
||||||
|
val signedNetworkMap = networkMapCertAndKeyPair.sign(networkMap)
|
||||||
|
return Response.ok(Pair(signedNetworkMap, nodeInfoMap.values.toList()).serialize().bytes).build()
|
||||||
|
}
|
||||||
|
|
||||||
@GET
|
@GET
|
||||||
@Path("network-parameters/{var}")
|
@Path("network-parameters/{var}")
|
||||||
@Produces(MediaType.APPLICATION_OCTET_STREAM)
|
@Produces(MediaType.APPLICATION_OCTET_STREAM)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Configuration status="info" packages="net.corda.common.logging">
|
<Configuration status="info">
|
||||||
|
|
||||||
<Properties>
|
<Properties>
|
||||||
<Property name="log-path">${sys:log-path:-logs}</Property>
|
<Property name="log-path">${sys:log-path:-logs}</Property>
|
||||||
@ -63,17 +63,14 @@
|
|||||||
|
|
||||||
<Rewrite name="Console-ErrorCode-Appender">
|
<Rewrite name="Console-ErrorCode-Appender">
|
||||||
<AppenderRef ref="Console-Appender"/>
|
<AppenderRef ref="Console-Appender"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
|
|
||||||
<Rewrite name="Console-ErrorCode-Appender-Println">
|
<Rewrite name="Console-ErrorCode-Appender-Println">
|
||||||
<AppenderRef ref="Console-Appender-Println"/>
|
<AppenderRef ref="Console-Appender-Println"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
|
|
||||||
<Rewrite name="RollingFile-ErrorCode-Appender">
|
<Rewrite name="RollingFile-ErrorCode-Appender">
|
||||||
<AppenderRef ref="RollingFile-Appender"/>
|
<AppenderRef ref="RollingFile-Appender"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
</Appenders>
|
</Appenders>
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Configuration status="info" packages="net.corda.common.logging">
|
<Configuration status="info">
|
||||||
|
|
||||||
<Properties>
|
<Properties>
|
||||||
<Property name="log-path">${sys:log-path:-logs}</Property>
|
<Property name="log-path">${sys:log-path:-logs}</Property>
|
||||||
@ -65,17 +65,14 @@
|
|||||||
|
|
||||||
<Rewrite name="Console-ErrorCode-Appender">
|
<Rewrite name="Console-ErrorCode-Appender">
|
||||||
<AppenderRef ref="Console-Appender"/>
|
<AppenderRef ref="Console-Appender"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
|
|
||||||
<Rewrite name="Console-ErrorCode-Appender-Println">
|
<Rewrite name="Console-ErrorCode-Appender-Println">
|
||||||
<AppenderRef ref="Console-Appender-Println"/>
|
<AppenderRef ref="Console-Appender-Println"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
|
|
||||||
<Rewrite name="RollingFile-ErrorCode-Appender">
|
<Rewrite name="RollingFile-ErrorCode-Appender">
|
||||||
<AppenderRef ref="RollingFile-Appender"/>
|
<AppenderRef ref="RollingFile-Appender"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
</Appenders>
|
</Appenders>
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ import net.corda.core.crypto.SecureHash
|
|||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.AbstractParty
|
import net.corda.core.identity.AbstractParty
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.internal.createDirectories
|
import net.corda.core.internal.createDirectories
|
||||||
import net.corda.core.internal.div
|
import net.corda.core.internal.div
|
||||||
import net.corda.core.internal.inputStream
|
import net.corda.core.internal.inputStream
|
||||||
@ -364,8 +365,10 @@ class InteractiveShellIntegrationTest {
|
|||||||
fun `dumpCheckpoints creates zip with json file for suspended flow`() {
|
fun `dumpCheckpoints creates zip with json file for suspended flow`() {
|
||||||
val user = User("u", "p", setOf(all()))
|
val user = User("u", "p", setOf(all()))
|
||||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
bobNode.stop()
|
bobNode.stop()
|
||||||
|
|
||||||
// Create logs directory since the driver is not creating it
|
// Create logs directory since the driver is not creating it
|
||||||
|
@ -111,6 +111,8 @@ object InteractiveShell {
|
|||||||
YAML
|
YAML
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private fun isShutdownCmd(cmd: String) = cmd == "shutdown" || cmd == "gracefulShutdown" || cmd == "terminate"
|
||||||
|
|
||||||
fun startShell(configuration: ShellConfiguration, classLoader: ClassLoader? = null, standalone: Boolean = false) {
|
fun startShell(configuration: ShellConfiguration, classLoader: ClassLoader? = null, standalone: Boolean = false) {
|
||||||
makeRPCConnection = { username: String, password: String ->
|
makeRPCConnection = { username: String, password: String ->
|
||||||
val connection = if (standalone) {
|
val connection = if (standalone) {
|
||||||
@ -623,6 +625,10 @@ object InteractiveShell {
|
|||||||
throw e.rootCause
|
throw e.rootCause
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (isShutdownCmd(cmd)) {
|
||||||
|
out.println("Called 'shutdown' on the node.\nQuitting the shell now.").also { out.flush() }
|
||||||
|
onExit.invoke()
|
||||||
|
}
|
||||||
} catch (e: StringToMethodCallParser.UnparseableCallException) {
|
} catch (e: StringToMethodCallParser.UnparseableCallException) {
|
||||||
out.println(e.message, Decoration.bold, Color.red)
|
out.println(e.message, Decoration.bold, Color.red)
|
||||||
if (e !is StringToMethodCallParser.UnparseableCallException.NoSuchFile) {
|
if (e !is StringToMethodCallParser.UnparseableCallException.NoSuchFile) {
|
||||||
@ -634,10 +640,6 @@ object InteractiveShell {
|
|||||||
InputStreamSerializer.invokeContext = null
|
InputStreamSerializer.invokeContext = null
|
||||||
InputStreamDeserializer.closeAll()
|
InputStreamDeserializer.closeAll()
|
||||||
}
|
}
|
||||||
if (cmd == "shutdown") {
|
|
||||||
out.println("Called 'shutdown' on the node.\nQuitting the shell now.").also { out.flush() }
|
|
||||||
onExit.invoke()
|
|
||||||
}
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user