Merge pull request #6529 from corda/os_4.6-feature_pass_in_client_id_when_starting_a_flow-merge

NOTICK - OS 4.6 to feature/pass_in_client_id_when_starting_a_flow merge
This commit is contained in:
Dan Newton 2020-07-30 12:30:21 +01:00 committed by GitHub
commit 35bfa6945f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
175 changed files with 4725 additions and 1819 deletions

View File

@ -5398,6 +5398,10 @@ public interface net.corda.core.schemas.QueryableState extends net.corda.core.co
##
public interface net.corda.core.schemas.StatePersistable
##
public interface net.corda.core.serialization.CheckpointCustomSerializer
public abstract OBJ fromProxy(PROXY)
public abstract PROXY toProxy(OBJ)
##
public interface net.corda.core.serialization.ClassWhitelist
public abstract boolean hasListed(Class<?>)
##

View File

@ -1,3 +1,14 @@
#!groovy
/**
* Jenkins pipeline to build Corda OS release with JDK11
*/
/**
* Kill already started job.
* Assume new commit takes precendence and results from previous
* unfinished builds are not required.
* This feature doesn't play well with disableConcurrentBuilds() option
*/
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
@ -19,16 +30,16 @@ if (isReleaseTag) {
switch (env.TAG_NAME) {
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
default: nexusIqStage = "operate"
default: nexusIqStage = "release"
}
}
pipeline {
agent {
label 'k8s'
}
agent { label 'k8s' }
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {
@ -37,6 +48,8 @@ pipeline {
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish JDK 11 Release to Artifactory".replaceAll("/", "::")
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
@ -45,14 +58,15 @@ pipeline {
sh "./gradlew --no-daemon clean jar"
script {
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: //'").trim()
/* every build related to Corda X.Y (GA, RC, HC, patch or snapshot) uses the same NexusIQ application */
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
def artifactId = 'corda'
nexusAppId = "jenkins-${groupId}-${artifactId}-jdk11-${version}"
}
nexusPolicyEvaluation (
failBuildOnNetworkError: false,
iqApplication: manualApplication(nexusAppId),
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
iqStage: nexusIqStage
)
@ -68,6 +82,8 @@ pipeline {
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\" " +
"-Ddocker.buildbase.tag=11latest " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Ddocker.dockerfile=DockerfileJDK11Azul" +
" clean pushBuildImage preAllocateForParallelRegressionTest preAllocateForAllParallelSlowIntegrationTest --stacktrace"
}
@ -126,7 +142,7 @@ pipeline {
rtGradleDeployer(
id: 'deployer',
serverId: 'R3-Artifactory',
repo: 'r3-corda-releases'
repo: 'corda-releases'
)
rtGradleRun(
usesPlugin: true,
@ -147,7 +163,7 @@ pipeline {
post {
always {
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
junit '**/build/test-results-xml/**/*.xml'
junit testResults: '**/build/test-results-xml/**/*.xml', keepLongStdio: true
}
cleanup {
deleteDir() /* clean up our workspace */

View File

@ -14,6 +14,7 @@ pipeline {
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
stages {
@ -35,4 +36,4 @@ pipeline {
deleteDir() /* clean up our workspace */
}
}
}
}

View File

@ -28,6 +28,7 @@ pipeline {
ansiColor('xterm')
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
/*
* a bit awkward to read
@ -64,7 +65,7 @@ pipeline {
post {
always {
archiveArtifacts allowEmptyArchive: true, artifacts: '**/logs/**/*.log'
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true
bat '.ci/kill_corda_procs.cmd'
}
cleanup {
@ -86,7 +87,7 @@ pipeline {
post {
always {
archiveArtifacts allowEmptyArchive: true, artifacts: '**/logs/**/*.log'
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true
bat '.ci/kill_corda_procs.cmd'
}
cleanup {

View File

@ -9,6 +9,7 @@ pipeline {
timestamps()
overrideIndexTriggers(false)
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
triggers {
pollSCM ignorePostCommitHooks: true, scmpoll_spec: '@midnight'
@ -19,6 +20,8 @@ pipeline {
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
@ -35,6 +38,8 @@ pipeline {
"-Dkubenetize=true " +
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean pushBuildImage --stacktrace"
}
@ -74,15 +79,13 @@ pipeline {
}
}
post {
always {
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
junit testResults: '**/build/test-results-xml/**/*.xml', allowEmptyResults: true, keepLongStdio: true
junit testResults: '**/build/test-results-xml/**/*.xml', keepLongStdio: true
}
cleanup {
deleteDir() /* clean up our workspace */
}
}
}

View File

@ -4,10 +4,11 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
pipeline {
agent { label 'k8s' }
agent { label 'standard' }
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {

46
.ci/dev/publish-api-docs/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,46 @@
#!groovy
/**
* Jenkins pipeline to build Corda OS KDoc & Javadoc archive
*/
/**
* Kill already started job.
* Assume new commit takes precendence and results from previous
* unfinished builds are not required.
* This feature doesn't play well with disableConcurrentBuilds() option
*/
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
pipeline {
agent { label 'standard' }
options {
ansiColor('xterm')
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
stage('Publish Archived API Docs to Artifactory') {
when { tag pattern: /^docs-release-os-V(\d+\.\d+)(\.\d+){0,1}(-GA){0,1}(-\d{4}-\d\d-\d\d-\d{4}){0,1}$/, comparator: 'REGEXP' }
steps {
sh "./gradlew :clean :docs:artifactoryPublish -DpublishApiDocs"
}
}
}
post {
cleanup {
deleteDir() /* clean up our workspace */
}
}
}

View File

@ -1,17 +1,34 @@
#!groovy
/**
* Jenkins pipeline to build Corda OS nightly snapshots
*/
/**
* Kill already started job.
* Assume new commit takes precendence and results from previous
* unfinished builds are not required.
* This feature doesn't play well with disableConcurrentBuilds() option
*/
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
/*
** calculate the stage for NexusIQ evaluation
** * build for snapshots
*/
def nexusIqStage = "build"
pipeline {
agent { label 'k8s' }
agent { label 'standard' }
options {
timestamps()
ansiColor('xterm')
overrideIndexTriggers(false)
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
triggers {
@ -27,6 +44,26 @@ pipeline {
}
stages {
stage('Sonatype Check') {
steps {
sh "./gradlew --no-daemon clean jar"
script {
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
/* every build related to Corda X.Y (GA, RC, HC, patch or snapshot) uses the same NexusIQ application */
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
def artifactId = 'corda'
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
}
nexusPolicyEvaluation (
failBuildOnNetworkError: false,
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
iqStage: nexusIqStage
)
}
}
stage('Publish to Artifactory') {
steps {
rtServer (

View File

@ -12,6 +12,7 @@ pipeline {
ansiColor('xterm')
overrideIndexTriggers(false)
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {

View File

@ -18,6 +18,7 @@ killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
* Sense environment
*/
boolean isReleaseTag = (env.TAG_NAME =~ /^release-.*(?<!_JDK11)$/)
boolean isInternalRelease = (env.TAG_NAME =~ /^internal-release-.*$/)
/*
** calculate the stage for NexusIQ evaluation
** * build for snapshots
@ -29,7 +30,7 @@ if (isReleaseTag) {
switch (env.TAG_NAME) {
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
default: nexusIqStage = "operate"
default: nexusIqStage = "release"
}
}
@ -39,6 +40,7 @@ pipeline {
timestamps()
disableConcurrentBuilds()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {
@ -48,6 +50,8 @@ pipeline {
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish Release to Artifactory".replaceAll("/", "::")
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
@ -56,14 +60,15 @@ pipeline {
sh "./gradlew --no-daemon clean jar"
script {
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: //'").trim()
/* every build related to Corda X.Y (GA, RC, HC, patch or snapshot) uses the same NexusIQ application */
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
def artifactId = 'corda'
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
}
nexusPolicyEvaluation (
failBuildOnNetworkError: false,
iqApplication: manualApplication(nexusAppId),
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
iqStage: nexusIqStage
)
@ -83,6 +88,8 @@ pipeline {
"-Dkubenetize=true " +
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean preAllocateForParallelRegressionTest preAllocateForAllParallelSlowIntegrationTest pushBuildImage --stacktrace"
}
@ -134,7 +141,7 @@ pipeline {
rtGradleDeployer(
id: 'deployer',
serverId: 'R3-Artifactory',
repo: 'r3-corda-releases'
repo: 'corda-releases'
)
rtGradleRun(
usesPlugin: true,
@ -153,7 +160,7 @@ pipeline {
stage('Publish Release to Docker Hub') {
when {
expression { isReleaseTag }
expression { !isInternalRelease && isReleaseTag }
}
steps {
withCredentials([
@ -166,7 +173,6 @@ pipeline {
}
}
post {
always {
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false

5
Jenkinsfile vendored
View File

@ -9,6 +9,7 @@ pipeline {
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {
@ -16,6 +17,8 @@ pipeline {
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
@ -26,6 +29,8 @@ pipeline {
"-Dkubenetize=true " +
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean preAllocateForAllParallelUnitTest preAllocateForAllParallelIntegrationTest pushBuildImage --stacktrace"
}

View File

@ -62,14 +62,14 @@ buildscript {
ext.asm_version = '7.1'
ext.artemis_version = '2.6.2'
// TODO Upgrade Jackson only when corda is using kotlin 1.3.10
ext.jackson_version = '2.9.7'
// TODO Upgrade to Jackson 2.10+ only when corda is using kotlin 1.3.10
ext.jackson_version = '2.9.8'
ext.jetty_version = '9.4.19.v20190610'
ext.jersey_version = '2.25'
ext.servlet_version = '4.0.1'
ext.assertj_version = '3.12.2'
ext.slf4j_version = '1.7.26'
ext.log4j_version = '2.11.2'
ext.slf4j_version = '1.7.30'
ext.log4j_version = '2.13.3'
ext.bouncycastle_version = constants.getProperty("bouncycastleVersion")
ext.guava_version = constants.getProperty("guavaVersion")
ext.caffeine_version = constants.getProperty("caffeineVersion")
@ -155,22 +155,39 @@ buildscript {
ext.corda_docs_link = "https://docs.corda.net/docs/corda-os/$baseVersion"
repositories {
mavenLocal()
mavenCentral()
jcenter()
maven {
url 'https://kotlin.bintray.com/kotlinx'
}
maven {
url "$artifactory_contextUrl/corda-dependencies-dev"
}
maven {
url "$artifactory_contextUrl/corda-releases"
// Use system environment to activate caching with Artifactory,
// because it is actually easier to pass that during parallel build.
// NOTE: it has to be a name of a virtual repository with all
// required remote or local repositories!
if (System.getenv("CORDA_USE_CACHE")) {
maven {
name "R3 Maven remote repositories"
url "${artifactory_contextUrl}/${System.getenv("CORDA_USE_CACHE")}"
authentication {
basic(BasicAuthentication)
}
credentials {
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
}
}
} else {
mavenCentral()
jcenter()
maven {
url 'https://kotlin.bintray.com/kotlinx'
}
maven {
url "${artifactory_contextUrl}/corda-dependencies-dev"
}
maven {
url "${artifactory_contextUrl}/corda-releases"
}
}
}
dependencies {
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
classpath "org.jetbrains.kotlin:kotlin-allopen:$kotlin_version"
classpath 'com.jfrog.bintray.gradle:gradle-bintray-plugin:1.4'
classpath "net.corda.plugins:publish-utils:$gradle_plugins_version"
classpath "net.corda.plugins:quasar-utils:$gradle_plugins_version"
classpath "net.corda.plugins:cordformation:$gradle_plugins_version"
@ -204,7 +221,6 @@ plugins {
apply plugin: 'project-report'
apply plugin: 'com.github.ben-manes.versions'
apply plugin: 'net.corda.plugins.publish-utils'
apply plugin: 'maven-publish'
apply plugin: 'com.jfrog.artifactory'
apply plugin: "com.bmuschko.docker-remote-api"
apply plugin: "com.r3.dependx.dependxies"
@ -357,11 +373,29 @@ allprojects {
repositories {
mavenLocal()
mavenCentral()
jcenter()
maven { url "$artifactory_contextUrl/corda-dependencies" }
maven { url 'https://repo.gradle.org/gradle/libs-releases' }
maven { url "$artifactory_contextUrl/corda-dev" }
// Use system environment to activate caching with Artifactory,
// because it is actually easier to pass that during parallel build.
// NOTE: it has to be a name of a virtual repository with all
// required remote or local repositories!
if (System.getenv("CORDA_USE_CACHE")) {
maven {
name "R3 Maven remote repositories"
url "${artifactory_contextUrl}/${System.getenv("CORDA_USE_CACHE")}"
authentication {
basic(BasicAuthentication)
}
credentials {
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
}
}
} else {
mavenCentral()
jcenter()
maven { url "${artifactory_contextUrl}/corda-dependencies" }
maven { url 'https://repo.gradle.org/gradle/libs-releases' }
maven { url "${artifactory_contextUrl}/corda-dev" }
}
}
configurations {
@ -626,7 +660,7 @@ dependxiesModule {
skipTasks = "test,integrationTest,smokeTest,slowIntegrationTest"
}
tasks.register('generateApi', net.corda.plugins.GenerateApi) {
tasks.register('generateApi', net.corda.plugins.apiscanner.GenerateApi) {
baseName = "api-corda"
}

View File

@ -292,6 +292,7 @@ class ReconnectingCordaRPCOps private constructor(
}
private class ErrorInterceptingHandler(val reconnectingRPCConnection: ReconnectingRPCConnection) : InvocationHandler {
private fun Method.isStartFlow() = name.startsWith("startFlow") || name.startsWith("startTrackedFlow")
private fun Method.isShutdown() = name == "shutdown" || name == "gracefulShutdown" || name == "terminate"
private fun checkIfIsStartFlow(method: Method, e: InvocationTargetException) {
if (method.isStartFlow()) {
@ -306,7 +307,7 @@ class ReconnectingCordaRPCOps private constructor(
*
* A negative number for [maxNumberOfAttempts] means an unlimited number of retries will be performed.
*/
@Suppress("ThrowsCount", "ComplexMethod")
@Suppress("ThrowsCount", "ComplexMethod", "NestedBlockDepth")
private fun doInvoke(method: Method, args: Array<out Any>?, maxNumberOfAttempts: Int): Any? {
checkIfClosed()
var remainingAttempts = maxNumberOfAttempts
@ -318,20 +319,20 @@ class ReconnectingCordaRPCOps private constructor(
log.debug { "RPC $method invoked successfully." }
}
} catch (e: InvocationTargetException) {
if (method.name.equals("shutdown", true)) {
log.debug("Shutdown invoked, stop reconnecting.", e)
reconnectingRPCConnection.notifyServerAndClose()
break
}
when (e.targetException) {
is RejectedCommandException -> {
log.warn("Node is being shutdown. Operation ${method.name} rejected. Shutting down...", e)
throw e.targetException
}
is ConnectionFailureException -> {
log.warn("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)
reconnectingRPCConnection.reconnectOnError(e)
checkIfIsStartFlow(method, e)
if (method.isShutdown()) {
log.debug("Shutdown invoked, stop reconnecting.", e)
reconnectingRPCConnection.notifyServerAndClose()
} else {
log.warn("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)
reconnectingRPCConnection.reconnectOnError(e)
checkIfIsStartFlow(method, e)
}
}
is RPCException -> {
rethrowIfUnrecoverable(e.targetException as RPCException)

View File

@ -1,28 +0,0 @@
package net.corda.common.logging
import org.apache.logging.log4j.core.Core
import org.apache.logging.log4j.core.LogEvent
import org.apache.logging.log4j.core.appender.rewrite.RewritePolicy
import org.apache.logging.log4j.core.config.plugins.Plugin
import org.apache.logging.log4j.core.config.plugins.PluginFactory
import org.apache.logging.log4j.core.impl.Log4jLogEvent
@Plugin(name = "ErrorCodeRewritePolicy", category = Core.CATEGORY_NAME, elementType = "rewritePolicy", printObject = false)
class ErrorCodeRewritePolicy : RewritePolicy {
override fun rewrite(source: LogEvent): LogEvent? {
val newMessage = source.message?.withErrorCodeFor(source.thrown, source.level)
return if (newMessage == source.message) {
source
} else {
Log4jLogEvent.Builder(source).setMessage(newMessage).build()
}
}
companion object {
@JvmStatic
@PluginFactory
fun createPolicy(): ErrorCodeRewritePolicy {
return ErrorCodeRewritePolicy()
}
}
}

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="info" packages="net.corda.common.logging" shutdownHook="disable">
<Configuration status="info" shutdownHook="disable">
<Properties>
<Property name="log-path">${sys:log-path:-logs}</Property>
@ -172,21 +172,17 @@
<Rewrite name="Console-ErrorCode-Selector">
<AppenderRef ref="Console-Selector"/>
<ErrorCodeRewritePolicy/>
</Rewrite>
<Rewrite name="Console-ErrorCode-Appender-Println">
<AppenderRef ref="Console-Appender-Println"/>
<ErrorCodeRewritePolicy/>
</Rewrite>
<Rewrite name="RollingFile-ErrorCode-Appender">
<AppenderRef ref="RollingFile-Appender"/>
<ErrorCodeRewritePolicy/>
</Rewrite>
<Rewrite name="Diagnostic-RollingFile-ErrorCode-Appender">
<AppenderRef ref="Diagnostic-RollingFile-Appender"/>
<ErrorCodeRewritePolicy/>
</Rewrite>
</Appenders>

View File

@ -4,14 +4,14 @@
cordaVersion=4.6
versionSuffix=SNAPSHOT
gradlePluginsVersion=5.0.9
gradlePluginsVersion=5.0.11
kotlinVersion=1.2.71
java8MinUpdateVersion=171
# ***************************************************************#
# When incrementing platformVersion make sure to update #
# net.corda.core.internal.CordaUtilsKt.PLATFORM_VERSION as well. #
# ***************************************************************#
platformVersion=7
platformVersion=8
guavaVersion=28.0-jre
# Quasar version to use with Java 8:
quasarVersion=0.7.12_r3
@ -20,12 +20,12 @@ quasarClassifier=jdk8
quasarVersion11=0.8.0_r3
jdkClassifier11=jdk11
proguardVersion=6.1.1
bouncycastleVersion=1.60
bouncycastleVersion=1.66
classgraphVersion=4.8.78
disruptorVersion=3.4.2
typesafeConfigVersion=1.3.4
jsr305Version=3.0.2
artifactoryPluginVersion=4.7.3
artifactoryPluginVersion=4.16.1
snakeYamlVersion=1.19
caffeineVersion=2.7.0
metricsVersion=4.1.0

View File

@ -54,8 +54,8 @@ tasks.named('jar', Jar) {
enabled = false
}
def coreJarTask = tasks.getByPath(':core:jar')
def originalJar = coreJarTask.outputs.files.singleFile
def coreJarTask = project(':core').tasks.named('jar', Jar)
def originalJar = coreJarTask.map { it.outputs.files.singleFile }
def patchCore = tasks.register('patchCore', Zip) {
dependsOn coreJarTask
@ -132,7 +132,7 @@ def jarFilter = tasks.register('jarFilter', JarFilterTask) {
}
}
task determinise(type: ProGuardTask) {
def determinise = tasks.register('determinise', ProGuardTask) {
injars jarFilter
outjars file("$buildDir/proguard/$jarBaseName-${project.version}.jar")
@ -166,17 +166,20 @@ task determinise(type: ProGuardTask) {
keepclassmembers 'class net.corda.core.** { public synthetic <methods>; }'
}
task metafix(type: MetaFixerTask) {
def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask)
def metafix = tasks.register('metafix', MetaFixerTask) {
outputDir file("$buildDir/libs")
jars determinise
suffix ""
// Strip timestamps from the JAR to make it reproducible.
preserveTimestamps = false
finalizedBy checkDeterminism
}
// DOCSTART 01
def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask) {
checkDeterminism.configure {
dependsOn jdkTask
injars metafix
@ -197,14 +200,17 @@ def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask) {
// DOCEND 01
defaultTasks "determinise"
determinise.finalizedBy metafix
metafix.finalizedBy checkDeterminism
assemble.dependsOn checkDeterminism
determinise.configure {
finalizedBy metafix
}
tasks.named('assemble') {
dependsOn checkDeterminism
}
def deterministicJar = metafix.outputs.files.singleFile
def deterministicJar = metafix.map { it.outputs.files.singleFile }
artifacts {
deterministicArtifacts file: deterministicJar, name: jarBaseName, type: 'jar', extension: 'jar', builtBy: metafix
publish file: deterministicJar, name: jarBaseName, type: 'jar', extension: 'jar', builtBy: metafix
deterministicArtifacts deterministicJar
publish deterministicJar
}
tasks.named('sourceJar', Jar) {

View File

@ -8,6 +8,7 @@ import net.corda.core.identity.Party;
import net.corda.core.utilities.KotlinUtilsKt;
import net.corda.testing.core.TestConstants;
import net.corda.testing.core.TestUtils;
import net.corda.testing.driver.DriverDSL;
import net.corda.testing.driver.DriverParameters;
import net.corda.testing.driver.NodeHandle;
import net.corda.testing.driver.NodeParameters;
@ -19,8 +20,11 @@ import org.slf4j.LoggerFactory;
import java.io.Serializable;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
import static net.corda.testing.driver.Driver.driver;
@ -29,14 +33,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
@Test
public void awaitFlowExternalOperationInJava() {
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
NodeHandle alice = KotlinUtilsKt.getOrThrow(
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
Duration.of(1, ChronoUnit.MINUTES)
);
NodeHandle bob = KotlinUtilsKt.getOrThrow(
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
Duration.of(1, ChronoUnit.MINUTES)
);
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
NodeHandle alice = aliceAndBob.get(0);
NodeHandle bob = aliceAndBob.get(1);
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
FlowWithExternalOperationInJava.class,
TestUtils.singleIdentity(bob.getNodeInfo())
@ -47,14 +46,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
@Test
public void awaitFlowExternalAsyncOperationInJava() {
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
NodeHandle alice = KotlinUtilsKt.getOrThrow(
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
Duration.of(1, ChronoUnit.MINUTES)
);
NodeHandle bob = KotlinUtilsKt.getOrThrow(
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
Duration.of(1, ChronoUnit.MINUTES)
);
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
NodeHandle alice = aliceAndBob.get(0);
NodeHandle bob = aliceAndBob.get(1);
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
FlowWithExternalAsyncOperationInJava.class,
TestUtils.singleIdentity(bob.getNodeInfo())
@ -65,14 +59,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
@Test
public void awaitFlowExternalOperationInJavaCanBeRetried() {
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
NodeHandle alice = KotlinUtilsKt.getOrThrow(
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
Duration.of(1, ChronoUnit.MINUTES)
);
NodeHandle bob = KotlinUtilsKt.getOrThrow(
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
Duration.of(1, ChronoUnit.MINUTES)
);
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
NodeHandle alice = aliceAndBob.get(0);
NodeHandle bob = aliceAndBob.get(1);
KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
FlowWithExternalOperationThatGetsRetriedInJava.class,
TestUtils.singleIdentity(bob.getNodeInfo())
@ -190,4 +179,15 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
return operation.apply(futureService, deduplicationId);
}
}
private List<NodeHandle> aliceAndBob(DriverDSL driver) {
return Arrays.asList(TestConstants.ALICE_NAME, TestConstants.BOB_NAME)
.stream()
.map(nm -> driver.startNode(new NodeParameters().withProvidedName(nm)))
.collect(Collectors.toList())
.stream()
.map(future -> KotlinUtilsKt.getOrThrow(future,
Duration.of(1, ChronoUnit.MINUTES)))
.collect(Collectors.toList());
}
}

View File

@ -4,6 +4,7 @@ import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.HospitalizeFlowException
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.minutes
@ -24,8 +25,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.startFlow(::FlowWithExternalAsyncOperation, bob.nodeInfo.singleIdentity())
.returnValue.getOrThrow(1.minutes)
assertHospitalCounters(0, 0)
@ -35,8 +38,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation that checks deduplicationId is not rerun when flow is retried`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
assertFailsWith<DuplicatedProcessException> {
alice.rpc.startFlow(
::FlowWithExternalAsyncOperationWithDeduplication,
@ -50,8 +55,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation propagates exception to calling flow`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
assertFailsWith<MyCordaException> {
alice.rpc.startFlow(
::FlowWithExternalAsyncOperationPropagatesException,
@ -66,8 +73,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation exception can be caught in flow`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
val result = alice.rpc.startFlow(
::FlowWithExternalAsyncOperationThatThrowsExceptionAndCaughtInFlow,
bob.nodeInfo.singleIdentity()
@ -80,8 +89,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation with exception that hospital keeps for observation does not fail`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
blockUntilFlowKeptInForObservation {
alice.rpc.startFlow(
::FlowWithExternalAsyncOperationPropagatesException,
@ -96,8 +107,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation with exception that hospital discharges is retried and runs the future again`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
blockUntilFlowKeptInForObservation {
alice.rpc.startFlow(
::FlowWithExternalAsyncOperationPropagatesException,
@ -112,8 +125,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation that throws exception rather than completing future exceptionally fails with internal exception`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
assertFailsWith<StateTransitionException> {
alice.rpc.startFlow(::FlowWithExternalAsyncOperationUnhandledException, bob.nodeInfo.singleIdentity())
.returnValue.getOrThrow(1.minutes)
@ -125,8 +140,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation that passes serviceHub into process can be retried`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
blockUntilFlowKeptInForObservation {
alice.rpc.startFlow(
::FlowWithExternalAsyncOperationThatPassesInServiceHubCanRetry,
@ -140,8 +157,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
assertFailsWith<DirectlyAccessedServiceHubException> {
alice.rpc.startFlow(
::FlowWithExternalAsyncOperationThatDirectlyAccessesServiceHubFailsRetry,
@ -155,8 +174,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `starting multiple futures and joining on their results`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.startFlow(::FlowThatStartsMultipleFuturesAndJoins, bob.nodeInfo.singleIdentity()).returnValue.getOrThrow(1.minutes)
assertHospitalCounters(0, 0)
}
@ -167,7 +188,7 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
@Suspendable
override fun testCode(): Any =
await(ExternalAsyncOperation(serviceHub) { _, _ ->
await(ExternalAsyncOperation(serviceHub) { serviceHub, _ ->
serviceHub.cordaService(FutureService::class.java).createFuture()
})
}

View File

@ -3,6 +3,7 @@ package net.corda.coretests.flows
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.minutes
@ -18,8 +19,10 @@ class FlowExternalOperationStartFlowTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `starting a flow inside of a flow that starts a future will succeed`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.startFlow(::FlowThatStartsAnotherFlowInAnExternalOperation, bob.nodeInfo.singleIdentity())
.returnValue.getOrThrow(1.minutes)
assertHospitalCounters(0, 0)
@ -29,8 +32,10 @@ class FlowExternalOperationStartFlowTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `multiple flows can be started and their futures joined from inside a flow`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.startFlow(::ForkJoinFlows, bob.nodeInfo.singleIdentity())
.returnValue.getOrThrow(1.minutes)
assertHospitalCounters(0, 0)

View File

@ -5,6 +5,7 @@ import net.corda.core.flows.FlowLogic
import net.corda.core.flows.HospitalizeFlowException
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.internal.packageName
import net.corda.core.messaging.startFlow
import net.corda.core.node.services.queryBy
@ -29,8 +30,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external operation`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.startFlow(::FlowWithExternalOperation, bob.nodeInfo.singleIdentity())
.returnValue.getOrThrow(1.minutes)
assertHospitalCounters(0, 0)
@ -40,8 +43,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external operation that checks deduplicationId is not rerun when flow is retried`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
assertFailsWith<DuplicatedProcessException> {
alice.rpc.startFlow(
::FlowWithExternalOperationWithDeduplication,
@ -55,8 +60,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external operation propagates exception to calling flow`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
assertFailsWith<MyCordaException> {
alice.rpc.startFlow(
::FlowWithExternalOperationPropagatesException,
@ -71,8 +78,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external operation exception can be caught in flow`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.startFlow(::FlowWithExternalOperationThatThrowsExceptionAndCaughtInFlow, bob.nodeInfo.singleIdentity())
.returnValue.getOrThrow(1.minutes)
assertHospitalCounters(0, 0)
@ -82,8 +91,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external operation with exception that hospital keeps for observation does not fail`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
blockUntilFlowKeptInForObservation {
alice.rpc.startFlow(
::FlowWithExternalOperationPropagatesException,
@ -98,8 +109,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external operation with exception that hospital discharges is retried and runs the external operation again`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
blockUntilFlowKeptInForObservation {
alice.rpc.startFlow(
::FlowWithExternalOperationPropagatesException,
@ -114,8 +127,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation that passes serviceHub into process can be retried`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
blockUntilFlowKeptInForObservation {
alice.rpc.startFlow(
::FlowWithExternalOperationThatPassesInServiceHubCanRetry,
@ -129,8 +144,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
assertFailsWith<DirectlyAccessedServiceHubException> {
alice.rpc.startFlow(
::FlowWithExternalOperationThatDirectlyAccessesServiceHubFailsRetry,
@ -199,8 +216,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
@Test(timeout = 300_000)
fun `external operation can be retried when an error occurs inside of database transaction`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
val success = alice.rpc.startFlow(
::FlowWithExternalOperationThatErrorsInsideOfDatabaseTransaction,
bob.nodeInfo.singleIdentity()

View File

@ -10,6 +10,7 @@ import net.corda.core.flows.StartableByRPC
import net.corda.core.flows.StateMachineRunId
import net.corda.core.flows.UnexpectedFlowEndException
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.minutes
@ -56,9 +57,10 @@ class FlowIsKilledTest {
@Test(timeout = 300_000)
fun `manually handled killed flows propagate error to counter parties`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.let { rpc ->
val handle = rpc.startFlow(
::AFlowThatWantsToDieAndKillsItsFriends,
@ -85,8 +87,11 @@ class FlowIsKilledTest {
@Test(timeout = 300_000)
fun `a manually killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
val handle = alice.rpc.startFlow(
::AFlowThatGetsMurderedByItsFriend,
bob.nodeInfo.singleIdentity()

View File

@ -7,6 +7,7 @@ import net.corda.core.flows.InitiatedBy
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.minutes
@ -53,8 +54,10 @@ class FlowSleepTest {
fun `flow can sleep and perform other suspending functions`() {
// ensures that events received while the flow is sleeping are not processed
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
val (start, finish) = alice.rpc.startFlow(
::SleepAndInteractWithPartyFlow,
bob.nodeInfo.singleIdentity()

View File

@ -53,7 +53,7 @@ class ReceiveFinalityFlowTest {
val paymentReceiverId = paymentReceiverFuture.getOrThrow()
assertThat(bob.services.vaultService.queryBy<FungibleAsset<*>>().states).isEmpty()
bob.assertFlowSentForObservationDueToConstraintError(paymentReceiverId)
bob.assertFlowSentForObservationDueToUntrustedAttachmentsException(paymentReceiverId)
// Restart Bob with the contracts CorDapp so that it can recover from the error
bob = mockNet.restartNode(bob, parameters = InternalMockNodeParameters(additionalCordapps = listOf(FINANCE_CONTRACTS_CORDAPP)))
@ -69,7 +69,7 @@ class ReceiveFinalityFlowTest {
.ofType(R::class.java)
}
private fun TestStartedNode.assertFlowSentForObservationDueToConstraintError(runId: StateMachineRunId) {
private fun TestStartedNode.assertFlowSentForObservationDueToUntrustedAttachmentsException(runId: StateMachineRunId) {
val observation = medicalRecordsOfType<Flow>()
.filter { it.flowId == runId }
.toBlocking()
@ -77,6 +77,6 @@ class ReceiveFinalityFlowTest {
assertThat(observation.outcome).isEqualTo(Outcome.OVERNIGHT_OBSERVATION)
assertThat(observation.by).contains(FinalityDoctor)
val error = observation.errors.single()
assertThat(error).isInstanceOf(TransactionVerificationException.ContractConstraintRejection::class.java)
assertThat(error).isInstanceOf(TransactionVerificationException.UntrustedAttachmentsException::class.java)
}
}

View File

@ -55,7 +55,7 @@ class AttachmentsClassLoaderSerializationTests {
arrayOf(isolatedId, att1, att2).map { storage.openAttachment(it)!! },
testNetworkParameters(),
SecureHash.zeroHash,
{ attachmentTrustCalculator.calculate(it) }) { classLoader ->
{ attachmentTrustCalculator.calculate(it) }, attachmentsClassLoaderCache = null) { classLoader ->
val contractClass = Class.forName(ISOLATED_CONTRACT_CLASS_NAME, true, classLoader)
val contract = contractClass.getDeclaredConstructor().newInstance() as Contract
assertEquals("helloworld", contract.declaredField<Any?>("magicString").value)

View File

@ -23,6 +23,7 @@ import net.corda.core.internal.inputStream
import net.corda.core.node.NetworkParameters
import net.corda.core.node.services.AttachmentId
import net.corda.core.serialization.internal.AttachmentsClassLoader
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
import net.corda.testing.common.internal.testNetworkParameters
import net.corda.node.services.attachments.NodeAttachmentTrustCalculator
import net.corda.testing.contracts.DummyContract
@ -521,6 +522,7 @@ class AttachmentsClassLoaderTests {
val id = SecureHash.randomSHA256()
val timeWindow: TimeWindow? = null
val privacySalt = PrivacySalt()
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(cacheFactory)
val transaction = createLedgerTransaction(
inputs,
outputs,
@ -532,7 +534,8 @@ class AttachmentsClassLoaderTests {
privacySalt,
testNetworkParameters(),
emptyList(),
isAttachmentTrusted = { true }
isAttachmentTrusted = { true },
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
transaction.verify()
}

View File

@ -10,6 +10,7 @@ import net.corda.core.internal.AbstractAttachment
import net.corda.core.internal.TESTDSL_UPLOADER
import net.corda.core.internal.createLedgerTransaction
import net.corda.core.node.NotaryInfo
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
import net.corda.core.transactions.SignedTransaction
import net.corda.core.transactions.WireTransaction
import net.corda.testing.common.internal.testNetworkParameters
@ -18,6 +19,7 @@ import net.corda.testing.core.*
import net.corda.testing.internal.createWireTransaction
import net.corda.testing.internal.fakeAttachment
import net.corda.coretesting.internal.rigorousMock
import net.corda.testing.internal.TestingNamedCacheFactory
import org.junit.Rule
import org.junit.Test
import java.math.BigInteger
@ -131,6 +133,7 @@ class TransactionTests {
val id = SecureHash.randomSHA256()
val timeWindow: TimeWindow? = null
val privacySalt = PrivacySalt()
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(TestingNamedCacheFactory())
val transaction = createLedgerTransaction(
inputs,
outputs,
@ -142,7 +145,8 @@ class TransactionTests {
privacySalt,
testNetworkParameters(),
emptyList(),
isAttachmentTrusted = { true }
isAttachmentTrusted = { true },
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
transaction.verify()
@ -183,6 +187,7 @@ class TransactionTests {
val id = SecureHash.randomSHA256()
val timeWindow: TimeWindow? = null
val privacySalt = PrivacySalt()
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(TestingNamedCacheFactory())
fun buildTransaction() = createLedgerTransaction(
inputs,
@ -195,7 +200,8 @@ class TransactionTests {
privacySalt,
testNetworkParameters(notaries = listOf(NotaryInfo(DUMMY_NOTARY, true))),
emptyList(),
isAttachmentTrusted = { true }
isAttachmentTrusted = { true },
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
assertFailsWith<TransactionVerificationException.NotaryChangeInWrongTransactionType> { buildTransaction().verify() }

View File

@ -89,6 +89,7 @@ interface OwnableState : ContractState {
// DOCEND 3
/** Something which is scheduled to happen at a point in time. */
@KeepForDJVM
interface Scheduled {
val scheduledAt: Instant
}
@ -101,6 +102,7 @@ interface Scheduled {
* lifecycle processing needs to take place. e.g. a fixing or a late payment etc.
*/
@CordaSerializable
@KeepForDJVM
data class ScheduledStateRef(val ref: StateRef, override val scheduledAt: Instant) : Scheduled
/**
@ -115,7 +117,7 @@ data class ScheduledStateRef(val ref: StateRef, override val scheduledAt: Instan
* for a particular [ContractState] have been processed/fired etc. If the activity is not "on ledger" then the
* scheduled activity shouldn't be either.
*/
@DeleteForDJVM
@KeepForDJVM
data class ScheduledActivity(val logicRef: FlowLogicRef, override val scheduledAt: Instant) : Scheduled
// DOCSTART 2
@ -134,7 +136,7 @@ interface LinearState : ContractState {
val linearId: UniqueIdentifier
}
// DOCEND 2
@DeleteForDJVM
@KeepForDJVM
interface SchedulableState : ContractState {
/**
* Indicate whether there is some activity to be performed at some future point in time with respect to this

View File

@ -7,6 +7,7 @@ import net.corda.core.crypto.SecureHash
import net.corda.core.flows.FlowLogic
import net.corda.core.internal.cordapp.CordappImpl.Companion.UNKNOWN_VALUE
import net.corda.core.schemas.MappedSchema
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.core.serialization.SerializationCustomSerializer
import net.corda.core.serialization.SerializationWhitelist
import net.corda.core.serialization.SerializeAsToken
@ -29,6 +30,7 @@ import java.net.URL
* @property services List of RPC services
* @property serializationWhitelists List of Corda plugin registries
* @property serializationCustomSerializers List of serializers
* @property checkpointCustomSerializers List of serializers for checkpoints
* @property customSchemas List of custom schemas
* @property allFlows List of all flow classes
* @property jarPath The path to the JAR for this CorDapp
@ -49,6 +51,7 @@ interface Cordapp {
val services: List<Class<out SerializeAsToken>>
val serializationWhitelists: List<SerializationWhitelist>
val serializationCustomSerializers: List<SerializationCustomSerializer<*, *>>
val checkpointCustomSerializers: List<CheckpointCustomSerializer<*, *>>
val customSchemas: Set<MappedSchema>
val allFlows: List<Class<out FlowLogic<*>>>
val jarPath: URL

View File

@ -25,6 +25,7 @@ import net.corda.core.node.NodeInfo
import net.corda.core.node.ServiceHub
import net.corda.core.serialization.CordaSerializable
import net.corda.core.transactions.SignedTransaction
import net.corda.core.utilities.NonEmptySet
import net.corda.core.utilities.ProgressTracker
import net.corda.core.utilities.UntrustworthyData
import net.corda.core.utilities.debug
@ -378,6 +379,22 @@ abstract class FlowLogic<out T> {
stateMachine.suspend(request, maySkipCheckpoint)
}
/**
* Closes the provided sessions and performs cleanup of any resources tied to these sessions.
*
* Note that sessions are closed automatically when the corresponding top-level flow terminates.
* So, it's beneficial to eagerly close them in long-lived flows that might have many open sessions that are not needed anymore and consume resources (e.g. memory, disk etc.).
* A closed session cannot be used anymore, e.g. to send or receive messages. So, you have to ensure you are calling this method only when the provided sessions are not going to be used anymore.
* As a result, any operations on a closed session will fail with an [UnexpectedFlowEndException].
* When a session is closed, the other side is informed and the session is closed there too eventually.
* To prevent misuse of the API, if there is an attempt to close an uninitialised session the invocation will fail with an [IllegalStateException].
*/
@Suspendable
fun close(sessions: NonEmptySet<FlowSession>) {
val request = FlowIORequest.CloseSessions(sessions)
stateMachine.suspend(request, false)
}
/**
* Invokes the given subflow. This function returns once the subflow completes successfully with the result
* returned by that subflow's [call] method. If the subflow has a progress tracker, it is attached to the

View File

@ -1,7 +1,9 @@
package net.corda.core.flows
import net.corda.core.CordaInternal
import net.corda.core.DeleteForDJVM
import net.corda.core.DoNotImplement
import net.corda.core.KeepForDJVM
import net.corda.core.serialization.CordaSerializable
/**
@ -11,11 +13,13 @@ import net.corda.core.serialization.CordaSerializable
* the flow to run at the scheduled time.
*/
@DoNotImplement
@KeepForDJVM
interface FlowLogicRefFactory {
/**
* Construct a FlowLogicRef. This is intended for cases where the calling code has the relevant class already
* and can provide it directly.
*/
@DeleteForDJVM
fun create(flowClass: Class<out FlowLogic<*>>, vararg args: Any?): FlowLogicRef
/**
@ -30,12 +34,14 @@ interface FlowLogicRefFactory {
* [SchedulableFlow] annotation.
*/
@CordaInternal
@DeleteForDJVM
fun createForRPC(flowClass: Class<out FlowLogic<*>>, vararg args: Any?): FlowLogicRef
/**
* Converts a [FlowLogicRef] object that was obtained from the calls above into a [FlowLogic], after doing some
* validation to ensure it points to a legitimate flow class.
*/
@DeleteForDJVM
fun toFlowLogic(ref: FlowLogicRef): FlowLogic<*>
}
@ -59,4 +65,5 @@ class IllegalFlowLogicException(val type: String, msg: String) :
// TODO: align this with the existing [FlowRef] in the bank-side API (probably replace some of the API classes)
@CordaSerializable
@DoNotImplement
@KeepForDJVM
interface FlowLogicRef

View File

@ -191,6 +191,19 @@ abstract class FlowSession {
*/
@Suspendable
abstract fun send(payload: Any)
/**
* Closes this session and performs cleanup of any resources tied to this session.
*
* Note that sessions are closed automatically when the corresponding top-level flow terminates.
* So, it's beneficial to eagerly close them in long-lived flows that might have many open sessions that are not needed anymore and consume resources (e.g. memory, disk etc.).
* A closed session cannot be used anymore, e.g. to send or receive messages. So, you have to ensure you are calling this method only when the session is not going to be used anymore.
* As a result, any operations on a closed session will fail with an [UnexpectedFlowEndException].
* When a session is closed, the other side is informed and the session is closed there too eventually.
* To prevent misuse of the API, if there is an attempt to close an uninitialised session the invocation will fail with an [IllegalStateException].
*/
@Suspendable
abstract fun close()
}
/**

View File

@ -28,7 +28,7 @@ import java.util.jar.JarInputStream
// *Internal* Corda-specific utilities.
const val PLATFORM_VERSION = 7
const val PLATFORM_VERSION = 8
fun ServicesForResolution.ensureMinimumPlatformVersion(requiredMinPlatformVersion: Int, feature: String) {
checkMinimumPlatformVersion(networkParameters.minimumPlatformVersion, requiredMinPlatformVersion, feature)

View File

@ -55,6 +55,13 @@ sealed class FlowIORequest<out R : Any> {
}}, shouldRetrySend=$shouldRetrySend)"
}
/**
* Closes the specified sessions.
*
* @property sessions the sessions to be closed.
*/
data class CloseSessions(val sessions: NonEmptySet<FlowSession>): FlowIORequest<Unit>()
/**
* Wait for a transaction to be committed to the database.
*

View File

@ -5,6 +5,7 @@ import net.corda.core.DeleteForDJVM
import net.corda.core.internal.notary.NotaryService
import net.corda.core.node.ServiceHub
import net.corda.core.node.StatesToRecord
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import java.util.concurrent.ExecutorService
// TODO: This should really be called ServiceHubInternal but that name is already taken by net.corda.node.services.api.ServiceHubInternal.
@ -21,6 +22,8 @@ interface ServiceHubCoreInternal : ServiceHub {
val notaryService: NotaryService?
fun createTransactionsResolver(flow: ResolveTransactionsFlow): TransactionsResolver
val attachmentsClassLoaderCache: AttachmentsClassLoaderCache
}
interface TransactionsResolver {

View File

@ -9,6 +9,7 @@ import net.corda.core.internal.VisibleForTesting
import net.corda.core.internal.notary.NotaryService
import net.corda.core.internal.toPath
import net.corda.core.schemas.MappedSchema
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.core.serialization.SerializationCustomSerializer
import net.corda.core.serialization.SerializationWhitelist
import net.corda.core.serialization.SerializeAsToken
@ -25,6 +26,7 @@ data class CordappImpl(
override val services: List<Class<out SerializeAsToken>>,
override val serializationWhitelists: List<SerializationWhitelist>,
override val serializationCustomSerializers: List<SerializationCustomSerializer<*, *>>,
override val checkpointCustomSerializers: List<CheckpointCustomSerializer<*, *>>,
override val customSchemas: Set<MappedSchema>,
override val allFlows: List<Class<out FlowLogic<*>>>,
override val jarPath: URL,
@ -79,6 +81,7 @@ data class CordappImpl(
services = emptyList(),
serializationWhitelists = emptyList(),
serializationCustomSerializers = emptyList(),
checkpointCustomSerializers = emptyList(),
customSchemas = emptySet(),
jarPath = Paths.get("").toUri().toURL(),
info = UNKNOWN_INFO,

View File

@ -25,3 +25,26 @@ interface SerializationCustomSerializer<OBJ, PROXY> {
*/
fun fromProxy(proxy: PROXY): OBJ
}
/**
* Allows CorDapps to provide custom serializers for classes that do not serialize successfully during a checkpoint.
* In this case, a proxy serializer can be written that implements this interface whose purpose is to move between
* unserializable types and an intermediate representation.
*
* NOTE: Only implement this interface if you have a class that triggers an error during normal checkpoint
* serialization/deserialization.
*/
@KeepForDJVM
interface CheckpointCustomSerializer<OBJ, PROXY> {
/**
* Should facilitate the conversion of the third party object into the serializable
* local class specified by [PROXY]
*/
fun toProxy(obj: OBJ): PROXY
/**
* Should facilitate the conversion of the proxy object into a new instance of the
* unserializable type
*/
fun fromProxy(proxy: PROXY): OBJ
}

View File

@ -1,5 +1,8 @@
package net.corda.core.serialization.internal
import com.github.benmanes.caffeine.cache.Cache
import com.github.benmanes.caffeine.cache.Caffeine
import net.corda.core.DeleteForDJVM
import net.corda.core.contracts.Attachment
import net.corda.core.contracts.ContractAttachment
import net.corda.core.contracts.TransactionVerificationException
@ -21,6 +24,7 @@ import java.lang.ref.WeakReference
import java.net.*
import java.security.Permission
import java.util.*
import java.util.function.Function
/**
* A custom ClassLoader that knows how to load classes from a set of attachments. The attachments themselves only
@ -289,31 +293,27 @@ class AttachmentsClassLoader(attachments: List<Attachment>,
*/
@VisibleForTesting
object AttachmentsClassLoaderBuilder {
private const val CACHE_SIZE = 1000
const val CACHE_SIZE = 16
// We use a set here because the ordering of attachments doesn't affect code execution, due to the no
// overlap rule, and attachments don't have any particular ordering enforced by the builders. So we
// can just do unordered comparisons here. But the same attachments run with different network parameters
// may behave differently, so that has to be a part of the cache key.
private data class Key(val hashes: Set<SecureHash>, val params: NetworkParameters)
// This runs in the DJVM so it can't use caffeine.
private val cache: MutableMap<Key, SerializationContext> = createSimpleCache<Key, SerializationContext>(CACHE_SIZE).toSynchronised()
private val fallBackCache: AttachmentsClassLoaderCache = AttachmentsClassLoaderSimpleCacheImpl(CACHE_SIZE)
/**
* Runs the given block with serialization execution context set up with a (possibly cached) attachments classloader.
*
* @param txId The transaction ID that triggered this request; it's unused except for error messages and exceptions that can occur during setup.
*/
@Suppress("LongParameterList")
fun <T> withAttachmentsClassloaderContext(attachments: List<Attachment>,
params: NetworkParameters,
txId: SecureHash,
isAttachmentTrusted: (Attachment) -> Boolean,
parent: ClassLoader = ClassLoader.getSystemClassLoader(),
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?,
block: (ClassLoader) -> T): T {
val attachmentIds = attachments.map(Attachment::id).toSet()
val serializationContext = cache.computeIfAbsent(Key(attachmentIds, params)) {
val cache = attachmentsClassLoaderCache ?: fallBackCache
val serializationContext = cache.computeIfAbsent(AttachmentsClassLoaderKey(attachmentIds, params), Function {
// Create classloader and load serializers, whitelisted classes
val transactionClassLoader = AttachmentsClassLoader(attachments, params, txId, isAttachmentTrusted, parent)
val serializers = try {
@ -336,7 +336,7 @@ object AttachmentsClassLoaderBuilder {
.withWhitelist(whitelistedClasses)
.withCustomSerializers(serializers)
.withoutCarpenter()
}
})
// Deserialize all relevant classes in the transaction classloader.
return SerializationFactory.defaultFactory.withCurrentContext(serializationContext) {
@ -420,6 +420,36 @@ private class AttachmentsHolderImpl : AttachmentsHolder {
}
}
interface AttachmentsClassLoaderCache {
fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext
}
@DeleteForDJVM
class AttachmentsClassLoaderCacheImpl(cacheFactory: NamedCacheFactory) : SingletonSerializeAsToken(), AttachmentsClassLoaderCache {
private val cache: Cache<AttachmentsClassLoaderKey, SerializationContext> = cacheFactory.buildNamed(Caffeine.newBuilder(), "AttachmentsClassLoader_cache")
override fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext {
return cache.get(key, mappingFunction) ?: throw NullPointerException("null returned from cache mapping function")
}
}
class AttachmentsClassLoaderSimpleCacheImpl(cacheSize: Int) : AttachmentsClassLoaderCache {
private val cache: MutableMap<AttachmentsClassLoaderKey, SerializationContext>
= createSimpleCache<AttachmentsClassLoaderKey, SerializationContext>(cacheSize).toSynchronised()
override fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext {
return cache.computeIfAbsent(key, mappingFunction)
}
}
// We use a set here because the ordering of attachments doesn't affect code execution, due to the no
// overlap rule, and attachments don't have any particular ordering enforced by the builders. So we
// can just do unordered comparisons here. But the same attachments run with different network parameters
// may behave differently, so that has to be a part of the cache key.
data class AttachmentsClassLoaderKey(val hashes: Set<SecureHash>, val params: NetworkParameters)
private class AttachmentURLConnection(url: URL, private val attachment: Attachment) : URLConnection(url) {
override fun getContentLengthLong(): Long = attachment.size.toLong()
override fun getInputStream(): InputStream = attachment.open()

View File

@ -56,6 +56,10 @@ interface CheckpointSerializationContext {
* otherwise they appear as new copies of the object.
*/
val objectReferencesEnabled: Boolean
/**
* User defined custom serializers for use in checkpoint serialization.
*/
val checkpointCustomSerializers: Iterable<CheckpointCustomSerializer<*,*>>
/**
* Helper method to return a new context based on this context with the property added.
@ -86,6 +90,11 @@ interface CheckpointSerializationContext {
* A shallow copy of this context but with the given encoding whitelist.
*/
fun withEncodingWhitelist(encodingWhitelist: EncodingWhitelist): CheckpointSerializationContext
/**
* A shallow copy of this context but with the given custom serializers.
*/
fun withCheckpointCustomSerializers(checkpointCustomSerializers: Iterable<CheckpointCustomSerializer<*, *>>): CheckpointSerializationContext
}
/*

View File

@ -153,7 +153,8 @@ data class ContractUpgradeWireTransaction(
listOf(legacyAttachment, upgradedAttachment),
params,
id,
{ (services as ServiceHubCoreInternal).attachmentTrustCalculator.calculate(it) }) { transactionClassLoader ->
{ (services as ServiceHubCoreInternal).attachmentTrustCalculator.calculate(it) },
attachmentsClassLoaderCache = (services as ServiceHubCoreInternal).attachmentsClassLoaderCache) { transactionClassLoader ->
val resolvedInput = binaryInput.deserialize()
val upgradedContract = upgradedContract(upgradedContractClassName, transactionClassLoader)
val outputState = calculateUpgradedState(resolvedInput, upgradedContract, upgradedAttachment)

View File

@ -26,6 +26,7 @@ import net.corda.core.internal.deserialiseComponentGroup
import net.corda.core.internal.isUploaderTrusted
import net.corda.core.internal.uncheckedCast
import net.corda.core.node.NetworkParameters
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.serialization.internal.AttachmentsClassLoaderBuilder
import net.corda.core.utilities.contextLogger
import java.util.Collections.unmodifiableList
@ -87,7 +88,8 @@ private constructor(
private val serializedInputs: List<SerializedStateAndRef>?,
private val serializedReferences: List<SerializedStateAndRef>?,
private val isAttachmentTrusted: (Attachment) -> Boolean,
private val verifierFactory: (LedgerTransaction, ClassLoader) -> Verifier
private val verifierFactory: (LedgerTransaction, ClassLoader) -> Verifier,
private val attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
) : FullTransaction() {
init {
@ -124,7 +126,8 @@ private constructor(
componentGroups: List<ComponentGroup>? = null,
serializedInputs: List<SerializedStateAndRef>? = null,
serializedReferences: List<SerializedStateAndRef>? = null,
isAttachmentTrusted: (Attachment) -> Boolean
isAttachmentTrusted: (Attachment) -> Boolean,
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
): LedgerTransaction {
return LedgerTransaction(
inputs = inputs,
@ -141,7 +144,8 @@ private constructor(
serializedInputs = protect(serializedInputs),
serializedReferences = protect(serializedReferences),
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = ::BasicVerifier
verifierFactory = ::BasicVerifier,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
}
@ -176,7 +180,8 @@ private constructor(
serializedInputs = null,
serializedReferences = null,
isAttachmentTrusted = { true },
verifierFactory = ::BasicVerifier
verifierFactory = ::BasicVerifier,
attachmentsClassLoaderCache = null
)
}
}
@ -218,7 +223,8 @@ private constructor(
txAttachments,
getParamsWithGoo(),
id,
isAttachmentTrusted = isAttachmentTrusted) { transactionClassLoader ->
isAttachmentTrusted = isAttachmentTrusted,
attachmentsClassLoaderCache = attachmentsClassLoaderCache) { transactionClassLoader ->
// Create a copy of the outer LedgerTransaction which deserializes all fields inside the [transactionClassLoader].
// Only the copy will be used for verification, and the outer shell will be discarded.
// This artifice is required to preserve backwards compatibility.
@ -254,7 +260,8 @@ private constructor(
serializedInputs = serializedInputs,
serializedReferences = serializedReferences,
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = alternateVerifier
verifierFactory = alternateVerifier,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
// Read network parameters with backwards compatibility goo.
@ -320,7 +327,8 @@ private constructor(
serializedInputs = serializedInputs,
serializedReferences = serializedReferences,
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = verifierFactory
verifierFactory = verifierFactory,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
} else {
// This branch is only present for backwards compatibility.
@ -704,7 +712,8 @@ private constructor(
serializedInputs = null,
serializedReferences = null,
isAttachmentTrusted = { it.isUploaderTrusted() },
verifierFactory = ::BasicVerifier
verifierFactory = ::BasicVerifier,
attachmentsClassLoaderCache = null
)
@Deprecated("LedgerTransaction should not be created directly, use WireTransaction.toLedgerTransaction instead.")
@ -733,7 +742,8 @@ private constructor(
serializedInputs = null,
serializedReferences = null,
isAttachmentTrusted = { it.isUploaderTrusted() },
verifierFactory = ::BasicVerifier
verifierFactory = ::BasicVerifier,
attachmentsClassLoaderCache = null
)
@Deprecated("LedgerTransactions should not be created directly, use WireTransaction.toLedgerTransaction instead.")
@ -761,7 +771,8 @@ private constructor(
serializedInputs = serializedInputs,
serializedReferences = serializedReferences,
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = verifierFactory
verifierFactory = verifierFactory,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
}
@ -791,7 +802,8 @@ private constructor(
serializedInputs = serializedInputs,
serializedReferences = serializedReferences,
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = verifierFactory
verifierFactory = verifierFactory,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
}
}

View File

@ -15,6 +15,7 @@ import net.corda.core.node.ServicesForResolution
import net.corda.core.node.services.AttachmentId
import net.corda.core.serialization.CordaSerializable
import net.corda.core.serialization.SerializedBytes
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.serialization.serialize
import net.corda.core.utilities.OpaqueBytes
import java.security.PublicKey
@ -109,7 +110,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
services.networkParametersService.lookup(hashToResolve)
},
// `as?` is used due to [MockServices] not implementing [ServiceHubCoreInternal]
isAttachmentTrusted = { (services as? ServiceHubCoreInternal)?.attachmentTrustCalculator?.calculate(it) ?: true }
isAttachmentTrusted = { (services as? ServiceHubCoreInternal)?.attachmentTrustCalculator?.calculate(it) ?: true },
attachmentsClassLoaderCache = (services as? ServiceHubCoreInternal)?.attachmentsClassLoaderCache
)
)
}
@ -145,7 +147,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
resolveAttachment,
{ stateRef -> resolveStateRef(stateRef)?.serialize() },
{ null },
{ it.isUploaderTrusted() }
{ it.isUploaderTrusted() },
null
)
}
@ -161,16 +164,19 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
resolveAttachment,
{ stateRef -> resolveStateRef(stateRef)?.serialize() },
resolveParameters,
{ true } // Any attachment loaded through the DJVM should be trusted
{ true }, // Any attachment loaded through the DJVM should be trusted
null
)
}
@Suppress("LongParameterList", "ThrowsCount")
private fun toLedgerTransactionInternal(
resolveIdentity: (PublicKey) -> Party?,
resolveAttachment: (SecureHash) -> Attachment?,
resolveStateRefAsSerialized: (StateRef) -> SerializedBytes<TransactionState<ContractState>>?,
resolveParameters: (SecureHash?) -> NetworkParameters?,
isAttachmentTrusted: (Attachment) -> Boolean
isAttachmentTrusted: (Attachment) -> Boolean,
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
): LedgerTransaction {
// Look up public keys to authenticated identities.
val authenticatedCommands = commands.lazyMapped { cmd, _ ->
@ -206,7 +212,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
componentGroups,
serializedResolvedInputs,
serializedResolvedReferences,
isAttachmentTrusted
isAttachmentTrusted,
attachmentsClassLoaderCache
)
checkTransactionSize(ltx, resolvedNetworkParameters.maxTransactionSize, serializedResolvedInputs, serializedResolvedReferences)

View File

@ -4,6 +4,7 @@ import net.corda.core.contracts.*
import net.corda.core.crypto.SecureHash
import net.corda.core.identity.Party
import net.corda.core.node.NetworkParameters
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.transactions.ComponentGroup
import net.corda.core.transactions.LedgerTransaction
import net.corda.core.transactions.WireTransaction
@ -17,6 +18,7 @@ fun WireTransaction.accessGroupHashes() = this.groupHashes
fun WireTransaction.accessGroupMerkleRoots() = this.groupsMerkleRoots
fun WireTransaction.accessAvailableComponentHashes() = this.availableComponentHashes
@Suppress("LongParameterList")
fun createLedgerTransaction(
inputs: List<StateAndRef<ContractState>>,
outputs: List<TransactionState<ContractState>>,
@ -31,8 +33,9 @@ fun createLedgerTransaction(
componentGroups: List<ComponentGroup>? = null,
serializedInputs: List<SerializedStateAndRef>? = null,
serializedReferences: List<SerializedStateAndRef>? = null,
isAttachmentTrusted: (Attachment) -> Boolean
): LedgerTransaction = LedgerTransaction.create(inputs, outputs, commands, attachments, id, notary, timeWindow, privacySalt, networkParameters, references, componentGroups, serializedInputs, serializedReferences, isAttachmentTrusted)
isAttachmentTrusted: (Attachment) -> Boolean,
attachmentsClassLoaderCache: AttachmentsClassLoaderCache
): LedgerTransaction = LedgerTransaction.create(inputs, outputs, commands, attachments, id, notary, timeWindow, privacySalt, networkParameters, references, componentGroups, serializedInputs, serializedReferences, isAttachmentTrusted, attachmentsClassLoaderCache)
fun createContractCreationError(txId: SecureHash, contractClass: String, cause: Throwable) = TransactionVerificationException.ContractCreationError(txId, contractClass, cause)
fun createContractRejection(txId: SecureHash, contract: Contract, cause: Throwable) = TransactionVerificationException.ContractRejection(txId, contract, cause)

View File

@ -1398,7 +1398,7 @@
<ID>ThrowsCount:JarScanningCordappLoader.kt$JarScanningCordappLoader$private fun parseVersion(versionStr: String?, attributeName: String): Int</ID>
<ID>ThrowsCount:LedgerDSLInterpreter.kt$Verifies$ fun failsWith(expectedMessage: String?): EnforceVerifyOrFail</ID>
<ID>ThrowsCount:MockServices.kt$ fun &lt;T : SerializeAsToken&gt; createMockCordaService(serviceHub: MockServices, serviceConstructor: (AppServiceHub) -&gt; T): T</ID>
<ID>ThrowsCount:NetworkRegistrationHelper.kt$NetworkRegistrationHelper$private fun validateCertificates(registeringPublicKey: PublicKey, certificates: List&lt;X509Certificate&gt;)</ID>
<ID>ThrowsCount:NetworkRegistrationHelper.kt$NetworkRegistrationHelper$private fun validateCertificates( registeringPublicKey: PublicKey, registeringLegalName: CordaX500Name, expectedCertRole: CertRole, certificates: List&lt;X509Certificate&gt; )</ID>
<ID>ThrowsCount:NodeInfoFilesCopier.kt$NodeInfoFilesCopier$private fun atomicCopy(source: Path, destination: Path)</ID>
<ID>ThrowsCount:NodeVaultService.kt$NodeVaultService$@Throws(VaultQueryException::class) private fun &lt;T : ContractState&gt; _queryBy(criteria: QueryCriteria, paging_: PageSpecification, sorting: Sort, contractStateType: Class&lt;out T&gt;, skipPagingChecks: Boolean): Vault.Page&lt;T&gt;</ID>
<ID>ThrowsCount:NodeVaultService.kt$NodeVaultService$private fun makeUpdates(batch: Iterable&lt;CoreTransaction&gt;, statesToRecord: StatesToRecord, previouslySeen: Boolean): List&lt;Vault.Update&lt;ContractState&gt;&gt;</ID>

View File

@ -11,7 +11,7 @@ evaluationDependsOn(':jdk8u-deterministic')
def jdk8uDeterministic = project(':jdk8u-deterministic')
ext {
jdkTask = jdk8uDeterministic.assemble
jdkTask = jdk8uDeterministic.tasks.named('assemble')
deterministic_jdk_home = jdk8uDeterministic.jdk_home
deterministic_rt_jar = jdk8uDeterministic.rt_jar
}

122
docs/build.gradle Normal file
View File

@ -0,0 +1,122 @@
import org.apache.tools.ant.taskdefs.condition.Os
apply plugin: 'org.jetbrains.dokka'
apply plugin: 'net.corda.plugins.publish-utils'
apply plugin: 'maven-publish'
apply plugin: 'com.jfrog.artifactory'
def internalPackagePrefixes(sourceDirs) {
def prefixes = []
// Kotlin allows packages to deviate from the directory structure, but let's assume they don't:
sourceDirs.collect { sourceDir ->
sourceDir.traverse(type: groovy.io.FileType.DIRECTORIES) {
if (it.name == 'internal') {
prefixes.add sourceDir.toPath().relativize(it.toPath()).toString().replace(File.separator, '.')
}
}
}
prefixes
}
ext {
// TODO: Add '../client/jfx/src/main/kotlin' and '../client/mock/src/main/kotlin' if we decide to make them into public API
dokkaSourceDirs = files('../core/src/main/kotlin', '../client/rpc/src/main/kotlin', '../finance/workflows/src/main/kotlin', '../finance/contracts/src/main/kotlin', '../client/jackson/src/main/kotlin',
'../testing/test-utils/src/main/kotlin', '../testing/node-driver/src/main/kotlin')
internalPackagePrefixes = internalPackagePrefixes(dokkaSourceDirs)
archivedApiDocsBaseFilename = 'api-docs'
}
dokka {
outputDirectory = file("${rootProject.rootDir}/docs/build/html/api/kotlin")
}
task dokkaJavadoc(type: org.jetbrains.dokka.gradle.DokkaTask) {
outputFormat = "javadoc"
outputDirectory = file("${rootProject.rootDir}/docs/build/html/api/javadoc")
}
[dokka, dokkaJavadoc].collect {
it.configure {
moduleName = 'corda'
processConfigurations = ['compile']
sourceDirs = dokkaSourceDirs
includes = ['packages.md']
jdkVersion = 8
externalDocumentationLink {
url = new URL("http://fasterxml.github.io/jackson-core/javadoc/2.9/")
}
externalDocumentationLink {
url = new URL("https://docs.oracle.com/javafx/2/api/")
}
externalDocumentationLink {
url = new URL("http://www.bouncycastle.org/docs/docs1.5on/")
}
internalPackagePrefixes.collect { packagePrefix ->
packageOptions {
prefix = packagePrefix
suppress = true
}
}
}
}
task apidocs(dependsOn: ['dokka', 'dokkaJavadoc']) {
group "Documentation"
description "Build API documentation"
}
task makeHTMLDocs(type: Exec){
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
commandLine "docker", "run", "--rm", "-v", "${project.projectDir}:/opt/docs_builder", "-v", "${project.projectDir}/..:/opt", "corda/docs-builder:latest", "bash", "-c", "make-docsite-html.sh"
} else {
commandLine "bash", "-c", "docker run --rm --user \$(id -u):\$(id -g) -v ${project.projectDir}:/opt/docs_builder -v ${project.projectDir}/..:/opt corda/docs-builder:latest bash -c make-docsite-html.sh"
}
}
task makePDFDocs(type: Exec){
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
commandLine "docker", "run", "--rm", "-v", "${project.projectDir}:/opt/docs_builder", "-v", "${project.projectDir}/..:/opt", "corda/docs-builder:latest", "bash", "-c", "make-docsite-pdf.sh"
} else {
commandLine "bash", "-c", "docker run --rm --user \$(id -u):\$(id -g) -v ${project.projectDir}:/opt/docs_builder -v ${project.projectDir}/..:/opt corda/docs-builder:latest bash -c make-docsite-pdf.sh"
}
}
task makeDocs(dependsOn: ['makeHTMLDocs', 'makePDFDocs'])
apidocs.shouldRunAfter makeDocs
task archiveApiDocs(type: Tar) {
dependsOn apidocs
from buildDir
include 'html/**'
extension 'tgz'
compression Compression.GZIP
}
publishing {
publications {
if (System.getProperty('publishApiDocs') != null) {
archivedApiDocs(MavenPublication) {
artifact archiveApiDocs {
artifactId archivedApiDocsBaseFilename
}
}
}
}
}
artifactoryPublish {
publications('archivedApiDocs')
version = version.replaceAll('-SNAPSHOT', '')
publishPom = false
}
artifactory {
publish {
contextUrl = artifactory_contextUrl
repository {
repoKey = 'corda-dependencies-dev'
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
}
}
}

View File

@ -37,7 +37,9 @@ def copyJdk = tasks.register('copyJdk', Copy) {
}
}
assemble.dependsOn copyJdk
tasks.named('assemble') {
dependsOn copyJdk
}
tasks.named('jar', Jar) {
enabled = false
}

View File

@ -6,6 +6,7 @@ import net.corda.core.crypto.internal.Instances
import org.bouncycastle.asn1.x509.AlgorithmIdentifier
import org.bouncycastle.operator.ContentSigner
import java.io.OutputStream
import java.security.InvalidKeyException
import java.security.PrivateKey
import java.security.Provider
import java.security.SecureRandom
@ -24,14 +25,18 @@ object ContentSignerBuilder {
else
Signature.getInstance(signatureScheme.signatureName, provider)
val sig = signatureInstance.apply {
// TODO special handling for Sphincs due to a known BouncyCastle's Sphincs bug we reported.
// It is fixed in BC 161b12, so consider updating the below if-statement after updating BouncyCastle.
if (random != null && signatureScheme != SPHINCS256_SHA256) {
initSign(privateKey, random)
} else {
initSign(privateKey)
val sig = try {
signatureInstance.apply {
// TODO special handling for Sphincs due to a known BouncyCastle's Sphincs bug we reported.
// It is fixed in BC 161b12, so consider updating the below if-statement after updating BouncyCastle.
if (random != null && signatureScheme != SPHINCS256_SHA256) {
initSign(privateKey, random)
} else {
initSign(privateKey)
}
}
} catch(ex: InvalidKeyException) {
throw InvalidKeyException("Incorrect key type ${privateKey.algorithm} for signature scheme ${signatureInstance.algorithm}", ex)
}
return object : ContentSigner {
private val stream = SignatureOutputStream(sig, optimised)

View File

@ -0,0 +1,103 @@
package net.corda.nodeapi.internal.serialization.kryo
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.Serializer
import com.esotericsoftware.kryo.io.Input
import com.esotericsoftware.kryo.io.Output
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.serialization.internal.amqp.CORDAPP_TYPE
import java.lang.reflect.Type
import kotlin.reflect.jvm.javaType
import kotlin.reflect.jvm.jvmErasure
/**
* Adapts CheckpointCustomSerializer for use in Kryo
*/
internal class CustomSerializerCheckpointAdaptor<OBJ, PROXY>(private val userSerializer : CheckpointCustomSerializer<OBJ, PROXY>) : Serializer<OBJ>() {
/**
* The class name of the serializer we are adapting.
*/
val serializerName: String = userSerializer.javaClass.name
/**
* The input type of this custom serializer.
*/
val cordappType: Type
/**
* Check we have access to the types specified on the CheckpointCustomSerializer interface.
*
* Throws UnableToDetermineSerializerTypesException if the types are missing.
*/
init {
val types: List<Type> = userSerializer::class
.supertypes
.filter { it.jvmErasure == CheckpointCustomSerializer::class }
.flatMap { it.arguments }
.mapNotNull { it.type?.javaType }
// We are expecting a cordapp type and a proxy type.
// We will only use the cordapp type in this class
// but we want to check both are present.
val typeParameterCount = 2
if (types.size != typeParameterCount) {
throw UnableToDetermineSerializerTypesException("Unable to determine serializer parent types")
}
cordappType = types[CORDAPP_TYPE]
}
/**
* Serialize obj to the Kryo stream.
*/
override fun write(kryo: Kryo, output: Output, obj: OBJ) {
fun <T> writeToKryo(obj: T) = kryo.writeClassAndObject(output, obj)
// Write serializer type
writeToKryo(serializerName)
// Write proxy object
writeToKryo(userSerializer.toProxy(obj))
}
/**
* Deserialize an object from the Kryo stream.
*/
override fun read(kryo: Kryo, input: Input, type: Class<OBJ>): OBJ {
@Suppress("UNCHECKED_CAST")
fun <T> readFromKryo() = kryo.readClassAndObject(input) as T
// Check the serializer type
checkSerializerType(readFromKryo())
// Read the proxy object
return userSerializer.fromProxy(readFromKryo())
}
/**
* Throws a `CustomCheckpointSerializersHaveChangedException` if the serializer type in the kryo stream does not match the serializer
* type for this custom serializer.
*
* @param checkpointSerializerType Serializer type from the Kryo stream
*/
private fun checkSerializerType(checkpointSerializerType: String) {
if (checkpointSerializerType != serializerName)
throw CustomCheckpointSerializersHaveChangedException("The custom checkpoint serializers have changed while checkpoints exist. " +
"Please restore the CorDapps to when this checkpoint was created.")
}
}
/**
* Thrown when the input/output types are missing from the custom serializer.
*/
class UnableToDetermineSerializerTypesException(message: String) : RuntimeException(message)
/**
* Thrown when the custom serializer is found to be reading data from another type of custom serializer.
*
* This was expected to happen if the user adds or removes CorDapps while checkpoints exist but it turned out that registering serializers
* as default made the system reliable.
*/
class CustomCheckpointSerializersHaveChangedException(message: String) : RuntimeException(message)

View File

@ -10,12 +10,14 @@ import com.esotericsoftware.kryo.io.Output
import com.esotericsoftware.kryo.pool.KryoPool
import com.esotericsoftware.kryo.serializers.ClosureSerializer
import net.corda.core.internal.uncheckedCast
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.core.serialization.ClassWhitelist
import net.corda.core.serialization.SerializationDefaults
import net.corda.core.serialization.SerializedBytes
import net.corda.core.serialization.internal.CheckpointSerializationContext
import net.corda.core.serialization.internal.CheckpointSerializer
import net.corda.core.utilities.ByteSequence
import net.corda.core.utilities.loggerFor
import net.corda.serialization.internal.AlwaysAcceptEncodingWhitelist
import net.corda.serialization.internal.ByteBufferInputStream
import net.corda.serialization.internal.CheckpointSerializationContextImpl
@ -40,10 +42,10 @@ private object AutoCloseableSerialisationDetector : Serializer<AutoCloseable>()
}
object KryoCheckpointSerializer : CheckpointSerializer {
private val kryoPoolsForContexts = ConcurrentHashMap<Pair<ClassWhitelist, ClassLoader>, KryoPool>()
private val kryoPoolsForContexts = ConcurrentHashMap<Triple<ClassWhitelist, ClassLoader, Iterable<CheckpointCustomSerializer<*,*>>>, KryoPool>()
private fun getPool(context: CheckpointSerializationContext): KryoPool {
return kryoPoolsForContexts.computeIfAbsent(Pair(context.whitelist, context.deserializationClassLoader)) {
return kryoPoolsForContexts.computeIfAbsent(Triple(context.whitelist, context.deserializationClassLoader, context.checkpointCustomSerializers)) {
KryoPool.Builder {
val serializer = Fiber.getFiberSerializer(false) as KryoSerializer
val classResolver = CordaClassResolver(context).apply { setKryo(serializer.kryo) }
@ -56,12 +58,60 @@ object KryoCheckpointSerializer : CheckpointSerializer {
addDefaultSerializer(AutoCloseable::class.java, AutoCloseableSerialisationDetector)
register(ClosureSerializer.Closure::class.java, CordaClosureSerializer)
classLoader = it.second
// Add custom serializers
val customSerializers = buildCustomSerializerAdaptors(context)
warnAboutDuplicateSerializers(customSerializers)
val classToSerializer = mapInputClassToCustomSerializer(context.deserializationClassLoader, customSerializers)
addDefaultCustomSerializers(this, classToSerializer)
}
}.build()
}
}
/**
* Returns a sorted list of CustomSerializerCheckpointAdaptor based on the custom serializers inside context.
*
* The adaptors are sorted by serializerName which maps to javaClass.name for the serializer class
*/
private fun buildCustomSerializerAdaptors(context: CheckpointSerializationContext) =
context.checkpointCustomSerializers.map { CustomSerializerCheckpointAdaptor(it) }.sortedBy { it.serializerName }
/**
* Returns a list of pairs where the first element is the input class of the custom serializer and the second element is the
* custom serializer.
*/
private fun mapInputClassToCustomSerializer(classLoader: ClassLoader, customSerializers: Iterable<CustomSerializerCheckpointAdaptor<*, *>>) =
customSerializers.map { getInputClassForCustomSerializer(classLoader, it) to it }
/**
* Returns the Class object for the serializers input type.
*/
private fun getInputClassForCustomSerializer(classLoader: ClassLoader, customSerializer: CustomSerializerCheckpointAdaptor<*, *>): Class<*> {
val typeNameWithoutGenerics = customSerializer.cordappType.typeName.substringBefore('<')
return classLoader.loadClass(typeNameWithoutGenerics)
}
/**
* Emit a warning if two or more custom serializers are found for the same input type.
*/
private fun warnAboutDuplicateSerializers(customSerializers: Iterable<CustomSerializerCheckpointAdaptor<*,*>>) =
customSerializers
.groupBy({ it.cordappType }, { it.serializerName })
.filter { (_, serializerNames) -> serializerNames.distinct().size > 1 }
.forEach { (inputType, serializerNames) -> loggerFor<KryoCheckpointSerializer>().warn("Duplicate custom checkpoint serializer for type $inputType. Serializers: ${serializerNames.joinToString(", ")}") }
/**
* Register all custom serializers as default, this class + subclass, registrations.
*
* Serializers registered before this will take priority. This needs to run after registrations we want to keep otherwise it may
* replace them.
*/
private fun addDefaultCustomSerializers(kryo: Kryo, classToSerializer: Iterable<Pair<Class<*>, CustomSerializerCheckpointAdaptor<*, *>>>) =
classToSerializer
.forEach { (clazz, customSerializer) -> kryo.addDefaultSerializer(clazz, customSerializer) }
private fun <T : Any> CheckpointSerializationContext.kryo(task: Kryo.() -> T): T {
return getPool(this).run { kryo ->
kryo.context.ensureCapacity(properties.size)

View File

@ -0,0 +1,33 @@
package net.corda.nodeapi.internal.crypto
import net.corda.core.crypto.Crypto
import org.assertj.core.api.Assertions.assertThatExceptionOfType
import org.junit.Test
import java.math.BigInteger
import java.security.InvalidKeyException
class ContentSignerBuilderTest {
companion object {
private const val entropy = "20200723"
}
@Test(timeout = 300_000)
fun `should build content signer for valid eddsa key`() {
val signatureScheme = Crypto.EDDSA_ED25519_SHA512
val provider = Crypto.findProvider(signatureScheme.providerName)
val issuerKeyPair = Crypto.deriveKeyPairFromEntropy(signatureScheme, BigInteger(entropy))
ContentSignerBuilder.build(signatureScheme, issuerKeyPair.private, provider)
}
@Test(timeout = 300_000)
fun `should fail to build content signer for incorrect key type`() {
val signatureScheme = Crypto.EDDSA_ED25519_SHA512
val provider = Crypto.findProvider(signatureScheme.providerName)
val issuerKeyPair = Crypto.deriveKeyPairFromEntropy(Crypto.ECDSA_SECP256R1_SHA256, BigInteger(entropy))
assertThatExceptionOfType(InvalidKeyException::class.java)
.isThrownBy {
ContentSignerBuilder.build(signatureScheme, issuerKeyPair.private, provider)
}
.withMessage("Incorrect key type EC for signature scheme NONEwithEdDSA")
}
}

View File

@ -39,9 +39,9 @@ capsule {
def nodeProject = project(':node')
task buildCordaJAR(type: FatCapsule, dependsOn: [
nodeProject.tasks.jar,
project(':core-deterministic').tasks.assemble,
project(':serialization-deterministic').tasks.assemble
nodeProject.tasks.named('jar'),
project(':core-deterministic').tasks.named('assemble'),
project(':serialization-deterministic').tasks.named('assemble')
]) {
applicationClass 'net.corda.node.Corda'
archiveBaseName = 'corda'

View File

@ -23,8 +23,10 @@ class NodesStartStopSingleVmTests(@Suppress("unused") private val iteration: Int
@Test(timeout = 300_000)
fun nodesStartStop() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
startNode(providedName = ALICE_NAME).getOrThrow()
startNode(providedName = BOB_NAME).getOrThrow()
val alice = startNode(providedName = ALICE_NAME)
val bob = startNode(providedName = BOB_NAME)
alice.getOrThrow()
bob.getOrThrow()
}
}
}

View File

@ -1,6 +1,7 @@
package net.corda.node.flows
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.CordaException
import net.corda.core.flows.*
import net.corda.core.identity.CordaX500Name
import net.corda.core.identity.Party
@ -16,7 +17,6 @@ import net.corda.testing.driver.DriverDSL
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeParameters
import net.corda.testing.driver.driver
import net.corda.testing.node.internal.ListenProcessDeathException
import net.corda.testing.node.internal.assertUncompletedCheckpoints
import net.corda.testing.node.internal.enclosedCordapp
import org.assertj.core.api.Assertions.assertThat
@ -77,7 +77,7 @@ class FlowCheckpointVersionNodeStartupCheckTest {
private fun DriverDSL.assertBobFailsToStartWithLogMessage(logMessage: String) {
assertUncompletedCheckpoints(BOB_NAME, 1)
assertFailsWith(ListenProcessDeathException::class) {
assertFailsWith(CordaException::class) {
startNode(NodeParameters(
providedName = BOB_NAME,
customOverrides = mapOf("devMode" to false)

View File

@ -1,5 +1,6 @@
package net.corda.node.logging
import net.corda.core.internal.concurrent.transpose
import net.corda.core.internal.div
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.OpaqueBytes
@ -22,8 +23,10 @@ class IssueCashLoggingTests {
fun `issuing and sending cash as payment do not result in duplicate insertion warnings`() {
val user = User("mark", "dadada", setOf(all()))
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {
val nodeA = startNode(rpcUsers = listOf(user)).getOrThrow()
val nodeB = startNode().getOrThrow()
val (nodeA, nodeB) = listOf(startNode(rpcUsers = listOf(user)),
startNode())
.transpose()
.getOrThrow()
val amount = 1.DOLLARS
val ref = OpaqueBytes.of(0)

View File

@ -1,355 +0,0 @@
package net.corda.node.services.rpc
import net.corda.client.rpc.CordaRPCClient
import net.corda.client.rpc.CordaRPCClientConfiguration
import net.corda.client.rpc.GracefulReconnect
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps
import net.corda.client.rpc.notUsed
import net.corda.core.contracts.Amount
import net.corda.core.flows.StateMachineRunId
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.StateMachineUpdate
import net.corda.core.node.services.Vault
import net.corda.core.node.services.vault.PageSpecification
import net.corda.core.node.services.vault.QueryCriteria
import net.corda.core.node.services.vault.builder
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.OpaqueBytes
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.finance.contracts.asset.Cash
import net.corda.finance.flows.CashIssueAndPaymentFlow
import net.corda.finance.schemas.CashSchemaV1
import net.corda.node.services.Permissions
import net.corda.node.services.rpc.RpcReconnectTests.Companion.NUMBER_OF_FLOWS_TO_RUN
import net.corda.testing.core.DUMMY_BANK_A_NAME
import net.corda.testing.core.DUMMY_BANK_B_NAME
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeHandle
import net.corda.testing.driver.OutOfProcess
import net.corda.testing.driver.driver
import net.corda.testing.driver.internal.OutOfProcessImpl
import net.corda.testing.driver.internal.incrementalPortAllocation
import net.corda.testing.node.User
import net.corda.testing.node.internal.FINANCE_CORDAPPS
import org.assertj.core.api.Assertions.assertThat
import org.junit.Test
import java.util.*
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import kotlin.concurrent.thread
import kotlin.math.absoluteValue
import kotlin.math.max
import kotlin.test.assertEquals
import kotlin.test.assertTrue
import kotlin.test.currentStackTrace
/**
* This is a stress test for the rpc reconnection logic, which triggers failures in a probabilistic way.
*
* You can adjust the variable [NUMBER_OF_FLOWS_TO_RUN] to adjust the number of flows to run and the duration of the test.
*/
class RpcReconnectTests {
companion object {
// this many flows take ~5 minutes
const val NUMBER_OF_FLOWS_TO_RUN = 100
private val log = contextLogger()
}
private val portAllocator = incrementalPortAllocation()
private lateinit var proxy: RandomFailingProxy
private lateinit var node: NodeHandle
private lateinit var currentAddressPair: AddressPair
/**
* This test showcases and stress tests the demo [ReconnectingCordaRPCOps].
*
* Note that during node failure events can be lost and starting flows can become unreliable.
* The only available way to retry failed flows is to attempt a "logical retry" which is also showcased.
*
* This test runs flows in a loop and in the background kills the node or restarts it.
* Also the RPC connection is made through a proxy that introduces random latencies and is also periodically killed.
*/
@Suppress("ComplexMethod")
@Test(timeout=420_000)
fun `test that the RPC client is able to reconnect and proceed after node failure, restart, or connection reset`() {
val nodeRunningTime = { Random().nextInt(12000) + 8000 }
val demoUser = User("demo", "demo", setOf(Permissions.all()))
// When this reaches 0 - the test will end.
val flowsCountdownLatch = CountDownLatch(NUMBER_OF_FLOWS_TO_RUN)
// These are the expected progress steps for the CashIssueAndPayFlow.
val expectedProgress = listOf(
"Starting",
"Issuing cash",
"Generating transaction",
"Signing transaction",
"Finalising transaction",
"Broadcasting transaction to participants",
"Paying recipient",
"Generating anonymous identities",
"Generating transaction",
"Signing transaction",
"Finalising transaction",
"Requesting signature by notary service",
"Requesting signature by Notary service",
"Validating response from Notary service",
"Broadcasting transaction to participants",
"Done"
)
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS, startNodesInProcess = false, inMemoryDB = false)) {
fun startBankA(address: NetworkHostAndPort) = startNode(providedName = DUMMY_BANK_A_NAME, rpcUsers = listOf(demoUser), customOverrides = mapOf("rpcSettings.address" to address.toString()))
fun startProxy(addressPair: AddressPair) = RandomFailingProxy(serverPort = addressPair.proxyAddress.port, remotePort = addressPair.nodeAddress.port).start()
val addresses = (1..2).map { getRandomAddressPair() }
currentAddressPair = addresses[0]
proxy = startProxy(currentAddressPair)
val (bankA, bankB) = listOf(
startBankA(currentAddressPair.nodeAddress),
startNode(providedName = DUMMY_BANK_B_NAME, rpcUsers = listOf(demoUser))
).transpose().getOrThrow()
node = bankA
val notary = defaultNotaryIdentity
val baseAmount = Amount.parseCurrency("0 USD")
val issuerRef = OpaqueBytes.of(0x01)
var numDisconnects = 0
var numReconnects = 0
val maxStackOccurrences = AtomicInteger()
val addressesForRpc = addresses.map { it.proxyAddress }
// DOCSTART rpcReconnectingRPC
val onReconnect = {
numReconnects++
// We only expect to see a single reconnectOnError in the stack trace. Otherwise we're in danger of stack overflow recursion
maxStackOccurrences.set(max(maxStackOccurrences.get(), currentStackTrace().count { it.methodName == "reconnectOnError" }))
Unit
}
val reconnect = GracefulReconnect(onDisconnect = { numDisconnects++ }, onReconnect = onReconnect)
val config = CordaRPCClientConfiguration.DEFAULT.copy(
connectionRetryInterval = 1.seconds,
connectionRetryIntervalMultiplier = 1.0
)
val client = CordaRPCClient(addressesForRpc, configuration = config)
val bankAReconnectingRPCConnection = client.start(demoUser.username, demoUser.password, gracefulReconnect = reconnect)
val bankAReconnectingRpc = bankAReconnectingRPCConnection.proxy as ReconnectingCordaRPCOps
// DOCEND rpcReconnectingRPC
// Observe the vault and collect the observations.
val vaultEvents = Collections.synchronizedList(mutableListOf<Vault.Update<Cash.State>>())
// DOCSTART rpcReconnectingRPCVaultTracking
val vaultFeed = bankAReconnectingRpc.vaultTrackByWithPagingSpec(
Cash.State::class.java,
QueryCriteria.VaultQueryCriteria(),
PageSpecification(1, 1))
val vaultSubscription = vaultFeed.updates.subscribe { update: Vault.Update<Cash.State> ->
log.info("vault update produced ${update.produced.map { it.state.data.amount }} consumed ${update.consumed.map { it.ref }}")
vaultEvents.add(update)
}
// DOCEND rpcReconnectingRPCVaultTracking
// Observe the stateMachine and collect the observations.
val stateMachineEvents = Collections.synchronizedList(mutableListOf<StateMachineUpdate>())
val stateMachineSubscription = bankAReconnectingRpc.stateMachinesFeed().updates.subscribe { update ->
log.info(update.toString())
stateMachineEvents.add(update)
}
// While the flows are running, randomly apply a different failure scenario.
val nrRestarts = AtomicInteger()
thread(name = "Node killer") {
while (true) {
if (flowsCountdownLatch.count == 0L) break
// Let the node run for a random time interval.
nodeRunningTime().also { ms ->
log.info("Running node for ${ms / 1000} s.")
Thread.sleep(ms.toLong())
}
if (flowsCountdownLatch.count == 0L) break
when (Random().nextInt().rem(7).absoluteValue) {
0 -> {
log.info("Forcefully killing node and proxy.")
(node as OutOfProcessImpl).onStopCallback()
(node as OutOfProcess).process.destroyForcibly()
proxy.stop()
node = startBankA(currentAddressPair.nodeAddress).get()
proxy.start()
}
1 -> {
log.info("Forcefully killing node.")
(node as OutOfProcessImpl).onStopCallback()
(node as OutOfProcess).process.destroyForcibly()
node = startBankA(currentAddressPair.nodeAddress).get()
}
2 -> {
log.info("Shutting down node.")
node.stop()
proxy.stop()
node = startBankA(currentAddressPair.nodeAddress).get()
proxy.start()
}
3, 4 -> {
log.info("Killing proxy.")
proxy.stop()
Thread.sleep(Random().nextInt(5000).toLong())
proxy.start()
}
5 -> {
log.info("Dropping connection.")
proxy.failConnection()
}
6 -> {
log.info("Performing failover to a different node")
node.stop()
proxy.stop()
currentAddressPair = (addresses - currentAddressPair).first()
node = startBankA(currentAddressPair.nodeAddress).get()
proxy = startProxy(currentAddressPair)
}
}
nrRestarts.incrementAndGet()
}
}
// Start nrOfFlowsToRun and provide a logical retry function that checks the vault.
val flowProgressEvents = mutableMapOf<StateMachineRunId, MutableList<String>>()
for (amount in (1..NUMBER_OF_FLOWS_TO_RUN)) {
// DOCSTART rpcReconnectingRPCFlowStarting
bankAReconnectingRpc.runFlowWithLogicalRetry(
runFlow = { rpc ->
log.info("Starting CashIssueAndPaymentFlow for $amount")
val flowHandle = rpc.startTrackedFlowDynamic(
CashIssueAndPaymentFlow::class.java,
baseAmount.plus(Amount.parseCurrency("$amount USD")),
issuerRef,
bankB.nodeInfo.legalIdentities.first(),
false,
notary
)
val flowId = flowHandle.id
log.info("Started flow $amount with flowId: $flowId")
flowProgressEvents.addEvent(flowId, null)
flowHandle.stepsTreeFeed?.updates?.notUsed()
flowHandle.stepsTreeIndexFeed?.updates?.notUsed()
// No reconnecting possible.
flowHandle.progress.subscribe(
{ prog ->
flowProgressEvents.addEvent(flowId, prog)
log.info("Progress $flowId : $prog")
},
{ error ->
log.error("Error thrown in the flow progress observer", error)
})
flowHandle.id
},
hasFlowStarted = { rpc ->
// Query for a state that is the result of this flow.
val criteria = QueryCriteria.VaultCustomQueryCriteria(builder { CashSchemaV1.PersistentCashState::pennies.equal(amount.toLong() * 100) }, status = Vault.StateStatus.ALL)
val results = rpc.vaultQueryByCriteria(criteria, Cash.State::class.java)
log.info("$amount - Found states ${results.states}")
// The flow has completed if a state is found
results.states.isNotEmpty()
},
onFlowConfirmed = {
flowsCountdownLatch.countDown()
log.info("Flow started for $amount. Remaining flows: ${flowsCountdownLatch.count}")
}
)
// DOCEND rpcReconnectingRPCFlowStarting
Thread.sleep(Random().nextInt(250).toLong())
}
log.info("Started all flows")
// Wait until all flows have been started.
val flowsConfirmed = flowsCountdownLatch.await(10, TimeUnit.MINUTES)
if (flowsConfirmed) {
log.info("Confirmed all flows have started.")
} else {
log.info("Timed out waiting for confirmation that all flows have started. Remaining flows: ${flowsCountdownLatch.count}")
}
// Wait for all events to come in and flows to finish.
Thread.sleep(4000)
val nrFailures = nrRestarts.get()
log.info("Checking results after $nrFailures restarts.")
// We should get one disconnect and one reconnect for each failure
assertThat(numDisconnects).isEqualTo(numReconnects)
assertThat(numReconnects).isLessThanOrEqualTo(nrFailures)
assertThat(maxStackOccurrences.get()).isLessThan(2)
// Query the vault and check that states were created for all flows.
fun readCashStates() = bankAReconnectingRpc
.vaultQueryByWithPagingSpec(Cash.State::class.java, QueryCriteria.VaultQueryCriteria(status = Vault.StateStatus.CONSUMED), PageSpecification(1, 10000))
.states
var allCashStates = readCashStates()
var nrRetries = 0
// It might be necessary to wait more for all events to arrive when the node is slow.
while (allCashStates.size < NUMBER_OF_FLOWS_TO_RUN && nrRetries++ < 50) {
Thread.sleep(2000)
allCashStates = readCashStates()
}
val allCash = allCashStates.map { it.state.data.amount.quantity }.toSet()
val missingCash = (1..NUMBER_OF_FLOWS_TO_RUN).filterNot { allCash.contains(it.toLong() * 100) }
log.info("Missing cash states: $missingCash")
assertEquals(NUMBER_OF_FLOWS_TO_RUN, allCashStates.size, "Not all flows were executed successfully")
// The progress status for each flow can only miss the last events, because the node might have been killed.
val missingProgressEvents = flowProgressEvents.filterValues { expectedProgress.subList(0, it.size) != it }
assertTrue(missingProgressEvents.isEmpty(), "The flow progress tracker is missing events: $missingProgressEvents")
// DOCSTART missingVaultEvents
// Check that enough vault events were received.
// This check is fuzzy because events can go missing during node restarts.
// Ideally there should be nrOfFlowsToRun events receive but some might get lost for each restart.
assertThat(vaultEvents!!.size + nrFailures * 3).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN)
// DOCEND missingVaultEvents
// Check that no flow was triggered twice.
val duplicates = allCashStates.groupBy { it.state.data.amount }.filterValues { it.size > 1 }
assertTrue(duplicates.isEmpty(), "${duplicates.size} flows were retried illegally.")
log.info("State machine events seen: ${stateMachineEvents!!.size}")
// State machine events are very likely to get lost more often because they seem to be sent with a delay.
assertThat(stateMachineEvents.count { it is StateMachineUpdate.Added }).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN / 3)
assertThat(stateMachineEvents.count { it is StateMachineUpdate.Removed }).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN / 3)
// Stop the observers.
vaultSubscription.unsubscribe()
stateMachineSubscription.unsubscribe()
bankAReconnectingRPCConnection.close()
}
proxy.close()
}
@Synchronized
fun MutableMap<StateMachineRunId, MutableList<String>>.addEvent(id: StateMachineRunId, progress: String?): Boolean {
return getOrPut(id) { mutableListOf() }.let { if (progress != null) it.add(progress) else false }
}
private fun getRandomAddressPair() = AddressPair(getRandomAddress(), getRandomAddress())
private fun getRandomAddress() = NetworkHostAndPort("localhost", portAllocator.nextPort())
data class AddressPair(val proxyAddress: NetworkHostAndPort, val nodeAddress: NetworkHostAndPort)
}

View File

@ -62,30 +62,49 @@ abstract class StateMachineErrorHandlingTest {
}
}
internal fun DriverDSL.createBytemanNode(
providedName: CordaX500Name,
internal fun DriverDSL.createBytemanNode(nodeProvidedName: CordaX500Name): Pair<NodeHandle, Int> {
val port = nextPort()
val bytemanNodeHandle = (this as InternalDriverDSL).startNode(
NodeParameters(
providedName = nodeProvidedName,
rpcUsers = listOf(rpcUser)
),
bytemanPort = port
)
return bytemanNodeHandle.getOrThrow() to port
}
internal fun DriverDSL.createNode(nodeProvidedName: CordaX500Name): NodeHandle {
return (this as InternalDriverDSL).startNode(
NodeParameters(
providedName = nodeProvidedName,
rpcUsers = listOf(rpcUser)
)
).getOrThrow()
}
internal fun DriverDSL.createNodeAndBytemanNode(
nodeProvidedName: CordaX500Name,
bytemanNodeProvidedName: CordaX500Name,
additionalCordapps: Collection<TestCordapp> = emptyList()
): Pair<NodeHandle, Int> {
): Triple<NodeHandle, NodeHandle, Int> {
val port = nextPort()
val nodeHandle = (this as InternalDriverDSL).startNode(
NodeParameters(
providedName = providedName,
providedName = nodeProvidedName,
rpcUsers = listOf(rpcUser),
additionalCordapps = additionalCordapps
)
)
val bytemanNodeHandle = startNode(
NodeParameters(
providedName = bytemanNodeProvidedName,
rpcUsers = listOf(rpcUser),
additionalCordapps = additionalCordapps
),
bytemanPort = port
).getOrThrow()
return nodeHandle to port
}
internal fun DriverDSL.createNode(providedName: CordaX500Name, additionalCordapps: Collection<TestCordapp> = emptyList()): NodeHandle {
return startNode(
NodeParameters(
providedName = providedName,
rpcUsers = listOf(rpcUser),
additionalCordapps = additionalCordapps
)
).getOrThrow()
)
return Triple(nodeHandle.getOrThrow(), bytemanNodeHandle.getOrThrow(), port)
}
internal fun submitBytemanRules(rules: String, port: Int) {
@ -285,4 +304,4 @@ abstract class StateMachineErrorHandlingTest {
internal val stateMachineManagerClassName: String by lazy {
Class.forName("net.corda.node.services.statemachine.SingleThreadedStateMachineManager").name
}
}
}

View File

@ -35,8 +35,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error recording a transaction inside of ReceiveFinalityFlow will keep the flow in for observation`() {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
// could not get rule for FinalityDoctor + observation counter to work
val rules = """
@ -97,8 +96,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error resolving a transaction's dependencies inside of ReceiveFinalityFlow will keep the flow in for observation`() {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
// could not get rule for FinalityDoctor + observation counter to work
val rules = """
@ -161,8 +159,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and complete successfully`() {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
val rules = """
RULE Create Counter
@ -229,8 +226,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and be kept for observation is error persists`() {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
val rules = """
RULE Create Counter

View File

@ -40,8 +40,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -88,8 +87,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `unexpected error during flow initialisation throws exception to client`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
CLASS ${FlowStateMachineImpl::class.java.name}
@ -134,8 +132,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during initialisation when trying to rollback the flow's database transaction the flow is able to retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -187,8 +184,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during initialisation when trying to close the flow's database transaction the flow is able to retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -242,8 +238,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -298,8 +293,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during retrying a flow that failed when committing its original checkpoint will retry the flow again and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Throw exception on executeCommitTransaction action after first suspend + commit
@ -351,8 +345,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
val rules = """
RULE Create Counter
@ -400,8 +393,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
val rules = """
RULE Create Counter
@ -464,8 +456,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `responding flow - session init can be retried when there is a transient connection error to the database`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
val rules = """
RULE Create Counter
@ -529,8 +520,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `responding flow - session init can be retried when there is a transient connection error to the database goes to observation if error persists`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
val rules = """
RULE Create Counter

View File

@ -35,8 +35,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with SendInitial action is retried 3 times and kept for observation if error persists`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -87,8 +86,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with SendInitial action that does not persist will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -135,8 +133,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with AcknowledgeMessages action is swallowed and flow completes successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Set flag when inside executeAcknowledgeMessages
@ -230,8 +227,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during flow retry when executing retryFlowFromSafePoint the flow is able to retry and recover`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Set flag when executing first suspend
@ -296,8 +292,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action that occurs after the first suspend will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
// seems to be restarting the flow from the beginning every time
val rules = """
@ -362,8 +357,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
// seems to be restarting the flow from the beginning every time
val rules = """
@ -419,8 +413,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action and ConstraintViolationException that occurs when completing a flow will retry and be kept for observation if error persists`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -488,8 +481,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `flow can be retried when there is a transient connection error to the database`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -552,8 +544,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `flow can be retried when there is a transient connection error to the database goes to observation if error persists`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -610,8 +601,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `responding flow - error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
val rules = """
RULE Create Counter

View File

@ -103,8 +103,7 @@ class StateMachineKillFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `flow killed when it is in the flow hospital for observation is removed correctly`() {
startDriver {
val (alice, port) = createBytemanNode(ALICE_NAME)
val charlie = createNode(CHARLIE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter

View File

@ -40,8 +40,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `initiating subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -119,8 +118,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `initiating subflow - error during transition with CommitTransaction action that occurs after the first receive will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -190,8 +188,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter
@ -253,8 +250,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first receive will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
val rules = """
RULE Create Counter

View File

@ -41,7 +41,7 @@ class AddressBindingFailureTests {
assertThatThrownBy {
driver(DriverParameters(startNodesInProcess = false,
notarySpecs = listOf(NotarySpec(notaryName)),
notarySpecs = listOf(NotarySpec(notaryName, startInProcess = false)),
notaryCustomOverrides = mapOf("p2pAddress" to address.toString()),
portAllocation = portAllocation,
cordappsForAllNodes = emptyList())

View File

@ -0,0 +1,99 @@
package net.corda.node.customcheckpointserializer
import com.nhaarman.mockito_kotlin.doReturn
import com.nhaarman.mockito_kotlin.whenever
import net.corda.core.crypto.generateKeyPair
import net.corda.core.serialization.EncodingWhitelist
import net.corda.core.serialization.internal.CheckpointSerializationContext
import net.corda.core.serialization.internal.checkpointDeserialize
import net.corda.core.serialization.internal.checkpointSerialize
import net.corda.coretesting.internal.rigorousMock
import net.corda.serialization.internal.AllWhitelist
import net.corda.serialization.internal.CheckpointSerializationContextImpl
import net.corda.serialization.internal.CordaSerializationEncoding
import net.corda.testing.core.internal.CheckpointSerializationEnvironmentRule
import org.junit.Assert
import org.junit.Rule
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
@RunWith(Parameterized::class)
class CustomCheckpointSerializerTest(private val compression: CordaSerializationEncoding?) {
companion object {
@Parameterized.Parameters(name = "{0}")
@JvmStatic
fun compression() = arrayOf<CordaSerializationEncoding?>(null) + CordaSerializationEncoding.values()
}
@get:Rule
val serializationRule = CheckpointSerializationEnvironmentRule(inheritable = true)
private val context: CheckpointSerializationContext = CheckpointSerializationContextImpl(
deserializationClassLoader = javaClass.classLoader,
whitelist = AllWhitelist,
properties = emptyMap(),
objectReferencesEnabled = true,
encoding = compression,
encodingWhitelist = rigorousMock<EncodingWhitelist>().also {
if (compression != null) doReturn(true).whenever(it).acceptEncoding(compression)
},
checkpointCustomSerializers = listOf(
TestCorDapp.TestAbstractClassSerializer(),
TestCorDapp.TestClassSerializer(),
TestCorDapp.TestInterfaceSerializer(),
TestCorDapp.TestFinalClassSerializer(),
TestCorDapp.BrokenPublicKeySerializer()
)
)
@Test(timeout=300_000)
fun `test custom checkpoint serialization`() {
testBrokenMapSerialization(DifficultToSerialize.BrokenMapClass())
}
@Test(timeout=300_000)
fun `test custom checkpoint serialization using interface`() {
testBrokenMapSerialization(DifficultToSerialize.BrokenMapInterfaceImpl())
}
@Test(timeout=300_000)
fun `test custom checkpoint serialization using abstract class`() {
testBrokenMapSerialization(DifficultToSerialize.BrokenMapAbstractImpl())
}
@Test(timeout=300_000)
fun `test custom checkpoint serialization using final class`() {
testBrokenMapSerialization(DifficultToSerialize.BrokenMapFinal())
}
@Test(timeout=300_000)
fun `test PublicKey serializer has not been overridden`() {
val publicKey = generateKeyPair().public
// Serialize/deserialize
val checkpoint = publicKey.checkpointSerialize(context)
val deserializedCheckpoint = checkpoint.checkpointDeserialize(context)
// Check the elements are as expected
Assert.assertArrayEquals(publicKey.encoded, deserializedCheckpoint.encoded)
}
private fun testBrokenMapSerialization(brokenMap : MutableMap<String, String>): MutableMap<String, String> {
// Add elements to the map
brokenMap.putAll(mapOf("key" to "value"))
// Serialize/deserialize
val checkpoint = brokenMap.checkpointSerialize(context)
val deserializedCheckpoint = checkpoint.checkpointDeserialize(context)
// Check the elements are as expected
Assert.assertEquals(1, deserializedCheckpoint.size)
Assert.assertEquals("value", deserializedCheckpoint.get("key"))
// Return map for extra checks
return deserializedCheckpoint
}
}

View File

@ -0,0 +1,27 @@
package net.corda.node.customcheckpointserializer
import net.corda.core.flows.FlowException
class DifficultToSerialize {
// Broken Map
// This map breaks the rules for the put method. Making the normal map serializer fail.
open class BrokenMapBaseImpl<K,V>(delegate: MutableMap<K, V> = mutableMapOf()) : MutableMap<K,V> by delegate {
override fun put(key: K, value: V): V? = throw FlowException("Broken on purpose")
}
// A class to test custom serializers applied to implementations
class BrokenMapClass<K,V> : BrokenMapBaseImpl<K, V>()
// An interface and implementation to test custom serializers applied to interface types
interface BrokenMapInterface<K, V> : MutableMap<K, V>
class BrokenMapInterfaceImpl<K,V> : BrokenMapBaseImpl<K, V>(), BrokenMapInterface<K, V>
// An abstract class and implementation to test custom serializers applied to interface types
abstract class BrokenMapAbstract<K, V> : BrokenMapBaseImpl<K, V>(), MutableMap<K, V>
class BrokenMapAbstractImpl<K,V> : BrokenMapAbstract<K, V>()
// A final class
final class BrokenMapFinal<K, V>: BrokenMapBaseImpl<K, V>()
}

View File

@ -0,0 +1,59 @@
package net.corda.node.customcheckpointserializer
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.messaging.startFlow
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.core.utilities.getOrThrow
import net.corda.testing.driver.driver
import net.corda.testing.driver.logFile
import org.assertj.core.api.Assertions
import org.junit.Test
import java.time.Duration
class DuplicateSerializerLogTest{
@Test(timeout=300_000)
fun `check duplicate serialisers are logged`() {
driver {
val node = startNode(startInSameProcess = false).getOrThrow()
node.rpc.startFlow(::TestFlow).returnValue.get()
val text = node.logFile().readLines().filter { it.startsWith("[WARN") }
// Initial message is correct
Assertions.assertThat(text).anyMatch {it.contains("Duplicate custom checkpoint serializer for type net.corda.node.customcheckpointserializer.DifficultToSerialize\$BrokenMapInterface<java.lang.Object, java.lang.Object>. Serializers: ")}
// Message mentions TestInterfaceSerializer
Assertions.assertThat(text).anyMatch {it.contains("net.corda.node.customcheckpointserializer.TestCorDapp\$TestInterfaceSerializer")}
// Message mentions DuplicateSerializer
Assertions.assertThat(text).anyMatch {it.contains("net.corda.node.customcheckpointserializer.DuplicateSerializerLogTest\$DuplicateSerializer")}
}
}
@StartableByRPC
@InitiatingFlow
class TestFlow : FlowLogic<DifficultToSerialize.BrokenMapInterface<String, String>>() {
override fun call(): DifficultToSerialize.BrokenMapInterface<String, String> {
val brokenMap: DifficultToSerialize.BrokenMapInterface<String, String> = DifficultToSerialize.BrokenMapInterfaceImpl()
brokenMap.putAll(mapOf("test" to "input"))
sleep(Duration.ofSeconds(0))
return brokenMap
}
}
@Suppress("unused")
class DuplicateSerializer :
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapInterface<Any, Any>, HashMap<Any, Any>> {
override fun toProxy(obj: DifficultToSerialize.BrokenMapInterface<Any, Any>): HashMap<Any, Any> {
val proxy = HashMap<Any, Any>()
return obj.toMap(proxy)
}
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapInterface<Any, Any> {
return DifficultToSerialize.BrokenMapInterfaceImpl<Any, Any>()
.also { it.putAll(proxy) }
}
}
}

View File

@ -0,0 +1,58 @@
package net.corda.node.customcheckpointserializer
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.messaging.startFlow
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.core.serialization.CordaSerializable
import net.corda.core.utilities.getOrThrow
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.driver.logFile
import net.corda.testing.node.internal.enclosedCordapp
import org.assertj.core.api.Assertions
import org.junit.Test
import java.time.Duration
class DuplicateSerializerLogWithSameSerializerTest {
@Test(timeout=300_000)
fun `check duplicate serialisers are logged not logged for the same class`() {
// Duplicate the cordapp in this node
driver(DriverParameters(cordappsForAllNodes = listOf(this.enclosedCordapp(), this.enclosedCordapp()))) {
val node = startNode(startInSameProcess = false).getOrThrow()
node.rpc.startFlow(::TestFlow).returnValue.get()
val text = node.logFile().readLines().filter { it.startsWith("[WARN") }
// Initial message is not logged
Assertions.assertThat(text)
.anyMatch { !it.contains("Duplicate custom checkpoint serializer for type ") }
// Log does not mention DuplicateSerializerThatShouldNotBeLogged
Assertions.assertThat(text)
.anyMatch { !it.contains("DuplicateSerializerThatShouldNotBeLogged") }
}
}
@CordaSerializable
class UnusedClass
@Suppress("unused")
class DuplicateSerializerThatShouldNotBeLogged : CheckpointCustomSerializer<UnusedClass, String> {
override fun toProxy(obj: UnusedClass): String = ""
override fun fromProxy(proxy: String): UnusedClass = UnusedClass()
}
@StartableByRPC
@InitiatingFlow
class TestFlow : FlowLogic<UnusedClass>() {
override fun call(): UnusedClass {
val unusedClass = UnusedClass()
sleep(Duration.ofSeconds(0))
return unusedClass
}
}
}

View File

@ -0,0 +1,75 @@
package net.corda.node.customcheckpointserializer
import co.paralleluniverse.fibers.Suspendable
import net.corda.testing.node.MockNetwork
import net.corda.testing.node.MockNetworkParameters
import org.assertj.core.api.Assertions
import org.junit.After
import org.junit.Before
import org.junit.Test
class MockNetworkCustomCheckpointSerializerTest {
private lateinit var mockNetwork: MockNetwork
@Before
fun setup() {
mockNetwork = MockNetwork(MockNetworkParameters(cordappsForAllNodes = listOf(TestCorDapp.getCorDapp())))
}
@After
fun shutdown() {
mockNetwork.stopNodes()
}
@Test(timeout = 300_000)
fun `flow suspend with custom kryo serializer`() {
val node = mockNetwork.createPartyNode()
val expected = 5
val actual = node.startFlow(TestCorDapp.TestFlowWithDifficultToSerializeLocalVariable(5)).get()
Assertions.assertThat(actual).isEqualTo(expected)
}
@Test(timeout = 300_000)
fun `check references are restored correctly`() {
val node = mockNetwork.createPartyNode()
val expectedReference = DifficultToSerialize.BrokenMapClass<String, Int>()
expectedReference.putAll(mapOf("one" to 1))
val actualReference = node.startFlow(TestCorDapp.TestFlowCheckingReferencesWork(expectedReference)).get()
Assertions.assertThat(actualReference).isSameAs(expectedReference)
Assertions.assertThat(actualReference["one"]).isEqualTo(1)
}
@Test(timeout = 300_000)
@Suspendable
fun `check serialization of interfaces`() {
val node = mockNetwork.createPartyNode()
val result = node.startFlow(TestCorDapp.TestFlowWithDifficultToSerializeLocalVariableAsInterface(5)).get()
Assertions.assertThat(result).isEqualTo(5)
}
@Test(timeout = 300_000)
@Suspendable
fun `check serialization of abstract classes`() {
val node = mockNetwork.createPartyNode()
val result = node.startFlow(TestCorDapp.TestFlowWithDifficultToSerializeLocalVariableAsAbstract(5)).get()
Assertions.assertThat(result).isEqualTo(5)
}
@Test(timeout = 300_000)
@Suspendable
fun `check serialization of final classes`() {
val node = mockNetwork.createPartyNode()
val result = node.startFlow(TestCorDapp.TestFlowWithDifficultToSerializeLocalVariableAsFinal(5)).get()
Assertions.assertThat(result).isEqualTo(5)
}
@Test(timeout = 300_000)
@Suspendable
fun `check PublicKey serializer has not been overridden`() {
val node = mockNetwork.createPartyNode()
val result = node.startFlow(TestCorDapp.TestFlowCheckingPublicKeySerializer()).get()
Assertions.assertThat(result.encoded).isEqualTo(node.info.legalIdentities.first().owningKey.encoded)
}
}

View File

@ -0,0 +1,75 @@
package net.corda.node.customcheckpointserializer
import com.nhaarman.mockito_kotlin.doReturn
import com.nhaarman.mockito_kotlin.whenever
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.core.serialization.EncodingWhitelist
import net.corda.core.serialization.internal.CheckpointSerializationContext
import net.corda.core.serialization.internal.checkpointDeserialize
import net.corda.core.serialization.internal.checkpointSerialize
import net.corda.coretesting.internal.rigorousMock
import net.corda.serialization.internal.AllWhitelist
import net.corda.serialization.internal.CheckpointSerializationContextImpl
import net.corda.serialization.internal.CordaSerializationEncoding
import net.corda.testing.core.internal.CheckpointSerializationEnvironmentRule
import org.junit.Assert
import org.junit.Rule
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
@RunWith(Parameterized::class)
class ReferenceLoopTest(private val compression: CordaSerializationEncoding?) {
companion object {
@Parameterized.Parameters(name = "{0}")
@JvmStatic
fun compression() = arrayOf<CordaSerializationEncoding?>(null) + CordaSerializationEncoding.values()
}
@get:Rule
val serializationRule = CheckpointSerializationEnvironmentRule(inheritable = true)
private val context: CheckpointSerializationContext = CheckpointSerializationContextImpl(
deserializationClassLoader = javaClass.classLoader,
whitelist = AllWhitelist,
properties = emptyMap(),
objectReferencesEnabled = true,
encoding = compression,
encodingWhitelist = rigorousMock<EncodingWhitelist>()
.also {
if (compression != null) doReturn(true).whenever(it)
.acceptEncoding(compression)
},
checkpointCustomSerializers = listOf(PersonSerializer()))
@Test(timeout=300_000)
fun `custom checkpoint serialization with reference loop`() {
val person = Person("Test name")
val result = person.checkpointSerialize(context).checkpointDeserialize(context)
Assert.assertEquals("Test name", result.name)
Assert.assertEquals("Test name", result.bestFriend.name)
Assert.assertSame(result, result.bestFriend)
}
/**
* Test class that will hold a reference to itself
*/
class Person(val name: String, bestFriend: Person? = null) {
val bestFriend: Person = bestFriend ?: this
}
/**
* Custom serializer for the Person class
*/
@Suppress("unused")
class PersonSerializer : CheckpointCustomSerializer<Person, Map<String, Any>> {
override fun toProxy(obj: Person): Map<String, Any> {
return mapOf("name" to obj.name, "bestFriend" to obj.bestFriend)
}
override fun fromProxy(proxy: Map<String, Any>): Person {
return Person(proxy["name"] as String, proxy["bestFriend"] as Person?)
}
}
}

View File

@ -0,0 +1,214 @@
package net.corda.node.customcheckpointserializer
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.FlowException
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.StartableByRPC
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.testing.node.internal.CustomCordapp
import net.corda.testing.node.internal.enclosedCordapp
import net.i2p.crypto.eddsa.EdDSAPublicKey
import org.assertj.core.api.Assertions
import java.security.PublicKey
import java.time.Duration
/**
* Contains all the flows and custom serializers for testing custom checkpoint serializers
*/
class TestCorDapp {
companion object {
fun getCorDapp(): CustomCordapp = enclosedCordapp()
}
// Flows
@StartableByRPC
class TestFlowWithDifficultToSerializeLocalVariableAsAbstract(private val purchase: Int) : FlowLogic<Int>() {
@Suspendable
override fun call(): Int {
// This object is difficult to serialize with Kryo
val difficultToSerialize: DifficultToSerialize.BrokenMapAbstract<String, Int> = DifficultToSerialize.BrokenMapAbstractImpl()
difficultToSerialize.putAll(mapOf("foo" to purchase))
// Force a checkpoint
sleep(Duration.ofSeconds(0))
// Return value from deserialized object
return difficultToSerialize["foo"] ?: 0
}
}
@StartableByRPC
class TestFlowWithDifficultToSerializeLocalVariableAsFinal(private val purchase: Int) : FlowLogic<Int>() {
@Suspendable
override fun call(): Int {
// This object is difficult to serialize with Kryo
val difficultToSerialize: DifficultToSerialize.BrokenMapFinal<String, Int> = DifficultToSerialize.BrokenMapFinal()
difficultToSerialize.putAll(mapOf("foo" to purchase))
// Force a checkpoint
sleep(Duration.ofSeconds(0))
// Return value from deserialized object
return difficultToSerialize["foo"] ?: 0
}
}
@StartableByRPC
class TestFlowWithDifficultToSerializeLocalVariableAsInterface(private val purchase: Int) : FlowLogic<Int>() {
@Suspendable
override fun call(): Int {
// This object is difficult to serialize with Kryo
val difficultToSerialize: DifficultToSerialize.BrokenMapInterface<String, Int> = DifficultToSerialize.BrokenMapInterfaceImpl()
difficultToSerialize.putAll(mapOf("foo" to purchase))
// Force a checkpoint
sleep(Duration.ofSeconds(0))
// Return value from deserialized object
return difficultToSerialize["foo"] ?: 0
}
}
@StartableByRPC
class TestFlowWithDifficultToSerializeLocalVariable(private val purchase: Int) : FlowLogic<Int>() {
@Suspendable
override fun call(): Int {
// This object is difficult to serialize with Kryo
val difficultToSerialize: DifficultToSerialize.BrokenMapClass<String, Int> = DifficultToSerialize.BrokenMapClass()
difficultToSerialize.putAll(mapOf("foo" to purchase))
// Force a checkpoint
sleep(Duration.ofSeconds(0))
// Return value from deserialized object
return difficultToSerialize["foo"] ?: 0
}
}
@StartableByRPC
class TestFlowCheckingReferencesWork(private val reference: DifficultToSerialize.BrokenMapClass<String, Int>) :
FlowLogic<DifficultToSerialize.BrokenMapClass<String, Int>>() {
private val referenceField = reference
@Suspendable
override fun call(): DifficultToSerialize.BrokenMapClass<String, Int> {
val ref = referenceField
// Force a checkpoint
sleep(Duration.ofSeconds(0))
// Check all objects refer to same object
Assertions.assertThat(reference).isSameAs(referenceField)
Assertions.assertThat(referenceField).isSameAs(ref)
// Return deserialized object
return ref
}
}
@StartableByRPC
class TestFlowCheckingPublicKeySerializer :
FlowLogic<PublicKey>() {
@Suspendable
override fun call(): PublicKey {
val ref = ourIdentity.owningKey
// Force a checkpoint
sleep(Duration.ofSeconds(0))
// Return deserialized object
return ref
}
}
// Custom serializers
@Suppress("unused")
class TestInterfaceSerializer :
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapInterface<Any, Any>, HashMap<Any, Any>> {
override fun toProxy(obj: DifficultToSerialize.BrokenMapInterface<Any, Any>): HashMap<Any, Any> {
val proxy = HashMap<Any, Any>()
return obj.toMap(proxy)
}
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapInterface<Any, Any> {
return DifficultToSerialize.BrokenMapInterfaceImpl<Any, Any>()
.also { it.putAll(proxy) }
}
}
@Suppress("unused")
class TestClassSerializer :
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapClass<Any, Any>, HashMap<Any, Any>> {
override fun toProxy(obj: DifficultToSerialize.BrokenMapClass<Any, Any>): HashMap<Any, Any> {
val proxy = HashMap<Any, Any>()
return obj.toMap(proxy)
}
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapClass<Any, Any> {
return DifficultToSerialize.BrokenMapClass<Any, Any>()
.also { it.putAll(proxy) }
}
}
@Suppress("unused")
class TestAbstractClassSerializer :
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapAbstract<Any, Any>, HashMap<Any, Any>> {
override fun toProxy(obj: DifficultToSerialize.BrokenMapAbstract<Any, Any>): HashMap<Any, Any> {
val proxy = HashMap<Any, Any>()
return obj.toMap(proxy)
}
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapAbstract<Any, Any> {
return DifficultToSerialize.BrokenMapAbstractImpl<Any, Any>()
.also { it.putAll(proxy) }
}
}
@Suppress("unused")
class TestFinalClassSerializer :
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapFinal<Any, Any>, HashMap<Any, Any>> {
override fun toProxy(obj: DifficultToSerialize.BrokenMapFinal<Any, Any>): HashMap<Any, Any> {
val proxy = HashMap<Any, Any>()
return obj.toMap(proxy)
}
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapFinal<Any, Any> {
return DifficultToSerialize.BrokenMapFinal<Any, Any>()
.also { it.putAll(proxy) }
}
}
@Suppress("unused")
class BrokenPublicKeySerializer :
CheckpointCustomSerializer<PublicKey, String> {
override fun toProxy(obj: PublicKey): String {
throw FlowException("Broken on purpose")
}
override fun fromProxy(proxy: String): PublicKey {
throw FlowException("Broken on purpose")
}
}
@Suppress("unused")
class BrokenEdDSAPublicKeySerializer :
CheckpointCustomSerializer<EdDSAPublicKey, String> {
override fun toProxy(obj: EdDSAPublicKey): String {
throw FlowException("Broken on purpose")
}
override fun fromProxy(proxy: String): EdDSAPublicKey {
throw FlowException("Broken on purpose")
}
}
}

View File

@ -12,6 +12,7 @@ import net.corda.core.flows.ReceiveFinalityFlow
import net.corda.core.flows.SignTransactionFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.node.AppServiceHub
import net.corda.core.node.services.CordaService
@ -318,8 +319,10 @@ class FlowEntityManagerTest : AbstractFlowEntityManagerTest() {
StaffedFlowHospital.onFlowDischarged.add { _, _ -> ++counter }
driver(DriverParameters(startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
val txId =
alice.rpc.startFlow(::EntityManagerWithFlushCatchAndInteractWithOtherPartyFlow, bob.nodeInfo.singleIdentity())

View File

@ -3,6 +3,7 @@ package net.corda.node.flows
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.*
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.unwrap
@ -65,36 +66,35 @@ class FlowOverrideTests {
private val nodeAClasses = setOf(Ping::class.java, Pong::class.java, Pongiest::class.java)
private val nodeBClasses = setOf(Ping::class.java, Pong::class.java)
@Test(timeout=300_000)
fun `should use the most specific implementation of a responding flow`() {
@Test(timeout = 300_000)
fun `should use the most specific implementation of a responding flow`() {
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
val nodeA = startNode(NodeParameters(
providedName = ALICE_NAME,
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray()))
)).getOrThrow()
val nodeB = startNode(NodeParameters(
providedName = BOB_NAME,
additionalCordapps = setOf(cordappForClasses(*nodeBClasses.toTypedArray()))
)).getOrThrow()
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
.map {
NodeParameters(providedName = it,
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())))
}
.map { startNode(it) }
.transpose()
.getOrThrow()
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pongiest.GORGONZOLA))
}
}
@Test(timeout=300_000)
fun `should use the overriden implementation of a responding flow`() {
@Test(timeout = 300_000)
fun `should use the overriden implementation of a responding flow`() {
val flowOverrides = mapOf(Ping::class.java to Pong::class.java)
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
val nodeA = startNode(NodeParameters(
providedName = ALICE_NAME,
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())),
flowOverrides = flowOverrides
)).getOrThrow()
val nodeB = startNode(NodeParameters(
providedName = BOB_NAME,
additionalCordapps = setOf(cordappForClasses(*nodeBClasses.toTypedArray()))
)).getOrThrow()
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
.map {
NodeParameters(providedName = it,
flowOverrides = flowOverrides,
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())))
}
.map { startNode(it) }
.transpose()
.getOrThrow()
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pong.PONG))
}
}
}

View File

@ -0,0 +1,511 @@
package net.corda.node.flows
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowSession
import net.corda.core.flows.HospitalizeFlowException
import net.corda.core.flows.InitiatedBy
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.flows.StateMachineRunId
import net.corda.core.identity.Party
import net.corda.core.internal.FlowIORequest
import net.corda.core.internal.IdempotentFlow
import net.corda.core.internal.TimedFlow
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.StateMachineTransactionMapping
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.OpaqueBytes
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.core.utilities.unwrap
import net.corda.finance.DOLLARS
import net.corda.finance.flows.CashIssueAndPaymentFlow
import net.corda.node.services.config.NodeConfiguration
import net.corda.node.services.statemachine.FlowStateMachineImpl
import net.corda.node.services.statemachine.FlowTimeoutException
import net.corda.node.services.statemachine.StaffedFlowHospital
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.BOB_NAME
import net.corda.testing.core.singleIdentity
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.node.internal.FINANCE_CORDAPPS
import net.corda.testing.node.internal.enclosedCordapp
import org.junit.Test
import java.sql.SQLTransientConnectionException
import java.util.concurrent.Semaphore
import kotlin.test.assertEquals
import kotlin.test.assertNull
class FlowReloadAfterCheckpointTest {
private companion object {
val cordapps = listOf(enclosedCordapp())
}
@Test(timeout = 300_000)
fun `flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true`() {
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
}
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map {
startNode(
providedName = it,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
)
}
.transpose()
.getOrThrow()
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, false)
val flowStartedByAlice = handle.id
handle.returnValue.getOrThrow()
assertEquals(5, reloadCounts[flowStartedByAlice])
assertEquals(6, reloadCounts[ReloadFromCheckpointResponder.flowId])
}
}
@Test(timeout = 300_000)
fun `flow will not reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is false`() {
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
}
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map {
startNode(
providedName = it,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to false)
)
}
.transpose()
.getOrThrow()
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, false)
val flowStartedByAlice = handle.id
handle.returnValue.getOrThrow()
assertNull(reloadCounts[flowStartedByAlice])
assertNull(reloadCounts[ReloadFromCheckpointResponder.flowId])
}
}
@Test(timeout = 300_000)
fun `flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true and be kept for observation due to failed deserialization`() {
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
}
lateinit var flowKeptForObservation: StateMachineRunId
val lock = Semaphore(0)
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { id, _ ->
flowKeptForObservation = id
lock.release()
}
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map {
startNode(
providedName = it,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
)
}
.transpose()
.getOrThrow()
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), true, false, false)
val flowStartedByAlice = handle.id
lock.acquire()
assertEquals(flowStartedByAlice, flowKeptForObservation)
assertEquals(4, reloadCounts[flowStartedByAlice])
assertEquals(4, reloadCounts[ReloadFromCheckpointResponder.flowId])
}
}
@Test(timeout = 300_000)
fun `flow will reload from a previous checkpoint after calling suspending function and skipping the persisting the current checkpoint when reloadCheckpointAfterSuspend is true`() {
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
}
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map {
startNode(
providedName = it,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
)
}
.transpose()
.getOrThrow()
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, true)
val flowStartedByAlice = handle.id
handle.returnValue.getOrThrow()
assertEquals(5, reloadCounts[flowStartedByAlice])
assertEquals(6, reloadCounts[ReloadFromCheckpointResponder.flowId])
}
}
@Test(timeout = 300_000)
fun `idempotent flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true`() {
var reloadCount = 0
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val alice = startNode(
providedName = ALICE_NAME,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
).getOrThrow()
alice.rpc.startFlow(::MyIdempotentFlow, false).returnValue.getOrThrow()
assertEquals(5, reloadCount)
}
}
@Test(timeout = 300_000)
fun `idempotent flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true but can't throw deserialization error from objects in the call function`() {
var reloadCount = 0
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val alice = startNode(
providedName = ALICE_NAME,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
).getOrThrow()
alice.rpc.startFlow(::MyIdempotentFlow, true).returnValue.getOrThrow()
assertEquals(5, reloadCount)
}
}
@Test(timeout = 300_000)
fun `timed flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true`() {
var reloadCount = 0
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val alice = startNode(
providedName = ALICE_NAME,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
).getOrThrow()
alice.rpc.startFlow(::MyTimedFlow).returnValue.getOrThrow()
assertEquals(5, reloadCount)
}
}
@Test(timeout = 300_000)
fun `flow will correctly retry after an error when reloadCheckpointAfterSuspend is true`() {
var reloadCount = 0
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
var timesDischarged = 0
StaffedFlowHospital.onFlowDischarged.add { _, _ -> timesDischarged += 1 }
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val alice = startNode(
providedName = ALICE_NAME,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
).getOrThrow()
alice.rpc.startFlow(::TransientConnectionFailureFlow).returnValue.getOrThrow()
assertEquals(5, reloadCount)
assertEquals(3, timesDischarged)
}
}
@Test(timeout = 300_000)
fun `flow continues reloading from checkpoints after node restart when reloadCheckpointAfterSuspend is true`() {
var reloadCount = 0
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
driver(
DriverParameters(
inMemoryDB = false,
startNodesInProcess = true,
notarySpecs = emptyList(),
cordappsForAllNodes = cordapps
)
) {
val alice = startNode(
providedName = ALICE_NAME,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
).getOrThrow()
alice.rpc.startFlow(::MyHospitalizingFlow)
Thread.sleep(10.seconds.toMillis())
alice.stop()
startNode(
providedName = ALICE_NAME,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
).getOrThrow()
Thread.sleep(20.seconds.toMillis())
assertEquals(5, reloadCount)
}
}
@Test(timeout = 300_000)
fun `idempotent flow continues reloading from checkpoints after node restart when reloadCheckpointAfterSuspend is true`() {
var reloadCount = 0
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
driver(
DriverParameters(
inMemoryDB = false,
startNodesInProcess = true,
notarySpecs = emptyList(),
cordappsForAllNodes = cordapps
)
) {
val alice = startNode(
providedName = ALICE_NAME,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
).getOrThrow()
alice.rpc.startFlow(::IdempotentHospitalizingFlow)
Thread.sleep(10.seconds.toMillis())
alice.stop()
startNode(
providedName = ALICE_NAME,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
).getOrThrow()
Thread.sleep(20.seconds.toMillis())
// restarts completely from the beginning and forgets the in-memory reload count therefore
// it reloads an extra 2 times for checkpoints it had already reloaded before the node shutdown
assertEquals(7, reloadCount)
}
}
@Test(timeout = 300_000)
fun `more complicated flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true`() {
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
}
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = FINANCE_CORDAPPS)) {
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map {
startNode(
providedName = it,
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
)
}
.transpose()
.getOrThrow()
val handle = alice.rpc.startFlow(
::CashIssueAndPaymentFlow,
500.DOLLARS,
OpaqueBytes.of(0x01),
bob.nodeInfo.singleIdentity(),
false,
defaultNotaryIdentity
)
val flowStartedByAlice = handle.id
handle.returnValue.getOrThrow(30.seconds)
val flowStartedByBob = bob.rpc.stateMachineRecordedTransactionMappingSnapshot()
.map(StateMachineTransactionMapping::stateMachineRunId)
.toSet()
.single()
Thread.sleep(10.seconds.toMillis())
assertEquals(7, reloadCounts[flowStartedByAlice])
assertEquals(6, reloadCounts[flowStartedByBob])
}
}
/**
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
* Therefore this flow should reload 5 times when completed without errors or restarts.
*/
@StartableByRPC
@InitiatingFlow
class ReloadFromCheckpointFlow(
private val party: Party,
private val shouldHaveDeserializationError: Boolean,
private val counterPartyHasDeserializationError: Boolean,
private val skipCheckpoints: Boolean
) : FlowLogic<Unit>() {
@Suspendable
override fun call() {
val session = initiateFlow(party)
session.send(counterPartyHasDeserializationError, skipCheckpoints)
session.receive(String::class.java, skipCheckpoints).unwrap { it }
stateMachine.suspend(FlowIORequest.ForceCheckpoint, skipCheckpoints)
val map = if (shouldHaveDeserializationError) {
BrokenMap(mutableMapOf("i dont want" to "this to work"))
} else {
mapOf("i dont want" to "this to work")
}
logger.info("I need to use my variable to pass the build!: $map")
session.sendAndReceive<String>("hey I made it this far")
}
}
/**
* Has 5 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 6.
* Therefore this flow should reload 6 times when completed without errors or restarts.
*/
@InitiatedBy(ReloadFromCheckpointFlow::class)
class ReloadFromCheckpointResponder(private val session: FlowSession) : FlowLogic<Unit>() {
companion object {
var flowId: StateMachineRunId? = null
}
@Suspendable
override fun call() {
flowId = runId
val counterPartyHasDeserializationError = session.receive<Boolean>().unwrap { it }
session.send("hello there 12312311")
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
val map = if (counterPartyHasDeserializationError) {
BrokenMap(mutableMapOf("i dont want" to "this to work"))
} else {
mapOf("i dont want" to "this to work")
}
logger.info("I need to use my variable to pass the build!: $map")
session.receive<String>().unwrap { it }
session.send("sending back a message")
}
}
/**
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
* Therefore this flow should reload 5 times when completed without errors or restarts.
*/
@StartableByRPC
@InitiatingFlow
class MyIdempotentFlow(private val shouldHaveDeserializationError: Boolean) : FlowLogic<Unit>(), IdempotentFlow {
@Suspendable
override fun call() {
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
val map = if (shouldHaveDeserializationError) {
BrokenMap(mutableMapOf("i dont want" to "this to work"))
} else {
mapOf("i dont want" to "this to work")
}
logger.info("I need to use my variable to pass the build!: $map")
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
}
}
/**
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
* Therefore this flow should reload 5 times when completed without errors or restarts.
*/
@StartableByRPC
@InitiatingFlow
class MyTimedFlow : FlowLogic<Unit>(), TimedFlow {
companion object {
var thrown = false
}
override val isTimeoutEnabled: Boolean = true
@Suspendable
override fun call() {
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
if (!thrown) {
thrown = true
throw FlowTimeoutException()
}
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
}
}
@StartableByRPC
@InitiatingFlow
class TransientConnectionFailureFlow : FlowLogic<Unit>() {
companion object {
var retryCount = 0
}
@Suspendable
override fun call() {
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
if (retryCount < 3) {
retryCount += 1
throw SQLTransientConnectionException("Connection is not available")
}
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
}
}
/**
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
* Therefore this flow should reload 5 times when completed without errors or restarts.
*/
@StartableByRPC
@InitiatingFlow
class MyHospitalizingFlow : FlowLogic<Unit>() {
companion object {
var thrown = false
}
@Suspendable
override fun call() {
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
if (!thrown) {
thrown = true
throw HospitalizeFlowException("i want to try again")
}
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
}
}
/**
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
* Therefore this flow should reload 5 times when completed without errors or restarts.
*/
@StartableByRPC
@InitiatingFlow
class IdempotentHospitalizingFlow : FlowLogic<Unit>(), IdempotentFlow {
companion object {
var thrown = false
}
@Suspendable
override fun call() {
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
if (!thrown) {
thrown = true
throw HospitalizeFlowException("i want to try again")
}
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
}
}
}

View File

@ -1,12 +1,16 @@
package net.corda.node.flows
import co.paralleluniverse.fibers.Suspendable
import net.corda.client.rpc.CordaRPCClient
import net.corda.client.rpc.CordaRPCClientConfiguration
import net.corda.core.CordaRuntimeException
import net.corda.core.flows.*
import net.corda.core.flows.FlowExternalAsyncOperation
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowSession
import net.corda.core.flows.InitiatedBy
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.Party
import net.corda.core.internal.IdempotentFlow
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.serialization.CordaSerializable
import net.corda.core.utilities.ProgressTracker
@ -22,6 +26,7 @@ import net.corda.testing.core.singleIdentity
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.node.User
import net.corda.testing.node.internal.enclosedCordapp
import org.assertj.core.api.Assertions.assertThatExceptionOfType
import org.hibernate.exception.ConstraintViolationException
import org.junit.After
@ -32,7 +37,8 @@ import java.sql.SQLException
import java.sql.SQLTransientConnectionException
import java.time.Duration
import java.time.temporal.ChronoUnit
import java.util.*
import java.util.Collections
import java.util.HashSet
import java.util.concurrent.CompletableFuture
import java.util.concurrent.TimeoutException
import kotlin.test.assertEquals
@ -40,7 +46,11 @@ import kotlin.test.assertFailsWith
import kotlin.test.assertNotNull
class FlowRetryTest {
val config = CordaRPCClientConfiguration.DEFAULT.copy(connectionRetryIntervalMultiplier = 1.1)
private companion object {
val user = User("mark", "dadada", setOf(Permissions.all()))
val cordapps = listOf(enclosedCordapp())
}
@Before
fun resetCounters() {
@ -57,146 +67,134 @@ class FlowRetryTest {
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
}
@Test(timeout=300_000)
fun `flows continue despite errors`() {
@Test(timeout = 300_000)
fun `flows continue despite errors`() {
val numSessions = 2
val numIterations = 10
val user = User("mark", "dadada", setOf(Permissions.startFlow<InitiatorFlow>()))
val result: Any? = driver(DriverParameters(
startNodesInProcess = isQuasarAgentSpecified(),
notarySpecs = emptyList()
)) {
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
val result: Any? = driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList())) {
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
it.proxy.startFlow(::InitiatorFlow, numSessions, numIterations, nodeBHandle.nodeInfo.singleIdentity()).returnValue.getOrThrow()
}
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
.transpose()
.getOrThrow()
val result = nodeAHandle.rpc.startFlow(
::InitiatorFlow,
numSessions,
numIterations,
nodeBHandle.nodeInfo.singleIdentity()
).returnValue.getOrThrow()
result
}
assertNotNull(result)
assertEquals("$numSessions:$numIterations", result)
}
@Test(timeout=300_000)
fun `async operation deduplication id is stable accross retries`() {
val user = User("mark", "dadada", setOf(Permissions.startFlow<AsyncRetryFlow>()))
driver(DriverParameters(
startNodesInProcess = isQuasarAgentSpecified(),
notarySpecs = emptyList()
)) {
@Test(timeout = 300_000)
fun `async operation deduplication id is stable accross retries`() {
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
it.proxy.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
}
nodeAHandle.rpc.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
}
}
@Test(timeout=300_000)
fun `flow gives up after number of exceptions, even if this is the first line of the flow`() {
val user = User("mark", "dadada", setOf(Permissions.startFlow<RetryFlow>()))
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
driver(DriverParameters(
startNodesInProcess = isQuasarAgentSpecified(),
notarySpecs = emptyList()
)) {
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
it.proxy.startFlow(::RetryFlow).returnValue.getOrThrow()
}
result
}
}
}
@Test(timeout=300_000)
fun `flow that throws in constructor throw for the RPC client that attempted to start them`() {
val user = User("mark", "dadada", setOf(Permissions.startFlow<ThrowingFlow>()))
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
driver(DriverParameters(
startNodesInProcess = isQuasarAgentSpecified(),
notarySpecs = emptyList()
)) {
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
it.proxy.startFlow(::ThrowingFlow).returnValue.getOrThrow()
}
result
}
}
}
@Test(timeout=300_000)
fun `SQLTransientConnectionExceptions thrown by hikari are retried 3 times and then kept in the checkpoints table`() {
val user = User("mark", "dadada", setOf(Permissions.all()))
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
@Test(timeout = 300_000)
fun `flow gives up after number of exceptions, even if this is the first line of the flow`() {
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
assertFailsWith<TimeoutException> {
it.proxy.startFlow(::TransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
}
assertEquals(3, TransientConnectionFailureFlow.retryCount)
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get())
assertFailsWith<CordaRuntimeException> {
nodeAHandle.rpc.startFlow(::RetryFlow).returnValue.getOrThrow()
}
}
}
@Test(timeout=300_000)
fun `Specific exception still detected even if it is nested inside another exception`() {
val user = User("mark", "dadada", setOf(Permissions.all()))
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
@Test(timeout = 300_000)
fun `flow that throws in constructor throw for the RPC client that attempted to start them`() {
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
assertFailsWith<TimeoutException> {
it.proxy.startFlow(::WrappedTransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
}
assertEquals(3, WrappedTransientConnectionFailureFlow.retryCount)
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get())
assertFailsWith<CordaRuntimeException> {
nodeAHandle.rpc.startFlow(::ThrowingFlow).returnValue.getOrThrow()
}
}
}
@Test(timeout=300_000)
fun `General external exceptions are not retried and propagate`() {
val user = User("mark", "dadada", setOf(Permissions.all()))
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
@Test(timeout = 300_000)
fun `SQLTransientConnectionExceptions thrown by hikari are retried 3 times and then kept in the checkpoints table`() {
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
.transpose()
.getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
assertFailsWith<CordaRuntimeException> {
it.proxy.startFlow(::GeneralExternalFailureFlow, nodeBHandle.nodeInfo.singleIdentity()).returnValue.getOrThrow()
}
assertEquals(0, GeneralExternalFailureFlow.retryCount)
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.FAILED).returnValue.get())
assertFailsWith<TimeoutException> {
nodeAHandle.rpc.startFlow(::TransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
}
assertEquals(3, TransientConnectionFailureFlow.retryCount)
assertEquals(
1,
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get()
)
}
}
@Test(timeout=300_000)
fun `Permission exceptions are not retried and propagate`() {
@Test(timeout = 300_000)
fun `Specific exception still detected even if it is nested inside another exception`() {
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
.transpose()
.getOrThrow()
assertFailsWith<TimeoutException> {
nodeAHandle.rpc.startFlow(::WrappedTransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
}
assertEquals(3, WrappedTransientConnectionFailureFlow.retryCount)
assertEquals(
1,
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get()
)
}
}
@Test(timeout = 300_000)
fun `General external exceptions are not retried and propagate`() {
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
.transpose()
.getOrThrow()
assertFailsWith<CordaRuntimeException> {
nodeAHandle.rpc.startFlow(
::GeneralExternalFailureFlow,
nodeBHandle.nodeInfo.singleIdentity()
).returnValue.getOrThrow()
}
assertEquals(0, GeneralExternalFailureFlow.retryCount)
assertEquals(
1,
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.FAILED).returnValue.get()
)
}
}
@Test(timeout = 300_000)
fun `Permission exceptions are not retried and propagate`() {
val user = User("mark", "dadada", setOf())
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
it.proxy.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
}.withMessageStartingWith("User not authorized to perform RPC call")
// This stays at -1 since the flow never even got called
assertEquals(-1, GeneralExternalFailureFlow.retryCount)
}
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
nodeAHandle.rpc.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
}.withMessageStartingWith("User not authorized to perform RPC call")
// This stays at -1 since the flow never even got called
assertEquals(-1, GeneralExternalFailureFlow.retryCount)
}
}
}
@ -306,6 +304,10 @@ enum class Step { First, BeforeInitiate, AfterInitiate, AfterInitiateSendReceive
data class Visited(val sessionNum: Int, val iterationNum: Int, val step: Step)
class BrokenMap<K, V>(delegate: MutableMap<K, V> = mutableMapOf()) : MutableMap<K, V> by delegate {
override fun put(key: K, value: V): V? = throw IllegalStateException("Broken on purpose")
}
@StartableByRPC
class RetryFlow() : FlowLogic<String>(), IdempotentFlow {
companion object {
@ -333,7 +335,7 @@ class AsyncRetryFlow() : FlowLogic<String>(), IdempotentFlow {
val deduplicationIds = mutableSetOf<String>()
}
class RecordDeduplicationId: FlowExternalAsyncOperation<String> {
class RecordDeduplicationId : FlowExternalAsyncOperation<String> {
override fun execute(deduplicationId: String): CompletableFuture<String> {
val dedupeIdIsNew = deduplicationIds.add(deduplicationId)
if (dedupeIdIsNew) {
@ -414,8 +416,9 @@ class WrappedTransientConnectionFailureFlow(private val party: Party) : FlowLogi
// checkpoint will restart the flow after the send
retryCount += 1
throw IllegalStateException(
"wrapped error message",
IllegalStateException("another layer deep", SQLTransientConnectionException("Connection is not available")))
"wrapped error message",
IllegalStateException("another layer deep", SQLTransientConnectionException("Connection is not available"))
)
}
}
@ -456,12 +459,14 @@ class GeneralExternalFailureResponder(private val session: FlowSession) : FlowLo
@StartableByRPC
class GetCheckpointNumberOfStatusFlow(private val flowStatus: Checkpoint.FlowStatus) : FlowLogic<Long>() {
@Suspendable
override fun call(): Long {
val sqlStatement =
"select count(*) " +
"from node_checkpoints " +
"where status = ${flowStatus.ordinal} " +
"and flow_id != '${runId.uuid}' " // don't count in the checkpoint of the current flow
"select count(*) " +
"from node_checkpoints " +
"where status = ${flowStatus.ordinal} " +
"and flow_id != '${runId.uuid}' " // don't count in the checkpoint of the current flow
return serviceHub.jdbcSession().prepareStatement(sqlStatement).use { ps ->
ps.executeQuery().use { rs ->

View File

@ -0,0 +1,294 @@
package net.corda.node.flows
import co.paralleluniverse.fibers.Suspendable
import net.corda.client.rpc.CordaRPCClient
import net.corda.core.CordaRuntimeException
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowSession
import net.corda.core.flows.InitiatedBy
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.flows.UnexpectedFlowEndException
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.serialization.CordaSerializable
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.core.utilities.toNonEmptySet
import net.corda.core.utilities.unwrap
import net.corda.node.services.Permissions
import net.corda.node.services.statemachine.transitions.PrematureSessionCloseException
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.BOB_NAME
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.node.User
import net.corda.testing.node.internal.enclosedCordapp
import org.assertj.core.api.Assertions.assertThatThrownBy
import org.junit.Test
import java.sql.SQLTransientConnectionException
import kotlin.test.assertEquals
class FlowSessionCloseTest {
private val user = User("user", "pwd", setOf(Permissions.all()))
@Test(timeout=300_000)
fun `flow cannot close uninitialised session`() {
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
val (nodeAHandle, nodeBHandle) = listOf(
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
).transpose().getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
assertThatThrownBy { it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), true, null, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow() }
.isInstanceOf(CordaRuntimeException::class.java)
.hasMessageContaining(PrematureSessionCloseException::class.java.name)
.hasMessageContaining("The following session was closed before it was initialised")
}
}
}
@Test(timeout=300_000)
fun `flow cannot access closed session, unless it's a duplicate close which is handled gracefully`() {
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
val (nodeAHandle, nodeBHandle) = listOf(
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
).transpose().getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
InitiatorFlow.SessionAPI.values().forEach { sessionAPI ->
when (sessionAPI) {
InitiatorFlow.SessionAPI.CLOSE -> {
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, sessionAPI, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow()
}
else -> {
assertThatThrownBy { it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, sessionAPI, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow() }
.isInstanceOf(UnexpectedFlowEndException::class.java)
.hasMessageContaining("Tried to access ended session")
}
}
}
}
}
}
@Test(timeout=300_000)
fun `flow can close initialised session successfully`() {
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
val (nodeAHandle, nodeBHandle) = listOf(
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
).transpose().getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, null, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow()
}
}
}
@Test(timeout=300_000)
fun `flow can close initialised session successfully even in case of failures and replays`() {
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
val (nodeAHandle, nodeBHandle) = listOf(
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
).transpose().getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, null, InitiatorFlow.ResponderReaction.RETRY_CLOSE_FROM_CHECKPOINT).returnValue.getOrThrow()
}
}
}
@Test(timeout=300_000)
fun `flow can close multiple sessions successfully`() {
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
val (nodeAHandle, nodeBHandle) = listOf(
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
).transpose().getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
it.proxy.startFlow(::InitiatorMultipleSessionsFlow, nodeBHandle.nodeInfo.legalIdentities.first()).returnValue.getOrThrow()
}
}
}
/**
* This test ensures that when sessions are closed, the associated resources are eagerly cleaned up.
* If sessions are not closed, then the node will crash with an out-of-memory error.
* This can be confirmed by commenting out [FlowSession.close] operation in the invoked flow and re-run the test.
*/
@Test(timeout=300_000)
fun `flow looping over sessions can close them to release resources and avoid out-of-memory failures, when the other side does not finish early`() {
driver(DriverParameters(startNodesInProcess = false, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
val (nodeAHandle, nodeBHandle) = listOf(
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user), maximumHeapSize = "256m"),
startNode(providedName = BOB_NAME, rpcUsers = listOf(user), maximumHeapSize = "256m")
).transpose().getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
it.proxy.startFlow(::InitiatorLoopingFlow, nodeBHandle.nodeInfo.legalIdentities.first(), true).returnValue.getOrThrow()
}
}
}
@Test(timeout=300_000)
fun `flow looping over sessions will close sessions automatically, when the other side finishes early`() {
driver(DriverParameters(startNodesInProcess = false, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
val (nodeAHandle, nodeBHandle) = listOf(
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user), maximumHeapSize = "256m"),
startNode(providedName = BOB_NAME, rpcUsers = listOf(user), maximumHeapSize = "256m")
).transpose().getOrThrow()
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
it.proxy.startFlow(::InitiatorLoopingFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false).returnValue.getOrThrow()
}
}
}
@InitiatingFlow
@StartableByRPC
class InitiatorFlow(val party: Party, private val prematureClose: Boolean = false,
private val accessClosedSessionWithApi: SessionAPI? = null,
private val responderReaction: ResponderReaction): FlowLogic<Unit>() {
@CordaSerializable
enum class SessionAPI {
SEND,
SEND_AND_RECEIVE,
RECEIVE,
GET_FLOW_INFO,
CLOSE
}
@CordaSerializable
enum class ResponderReaction {
NORMAL_CLOSE,
RETRY_CLOSE_FROM_CHECKPOINT
}
@Suspendable
override fun call() {
val session = initiateFlow(party)
if (prematureClose) {
session.close()
}
session.send(responderReaction)
sleep(1.seconds)
if (accessClosedSessionWithApi != null) {
when(accessClosedSessionWithApi) {
SessionAPI.SEND -> session.send("dummy payload ")
SessionAPI.RECEIVE -> session.receive<String>()
SessionAPI.SEND_AND_RECEIVE -> session.sendAndReceive<String>("dummy payload")
SessionAPI.GET_FLOW_INFO -> session.getCounterpartyFlowInfo()
SessionAPI.CLOSE -> session.close()
}
}
}
}
@InitiatedBy(InitiatorFlow::class)
class InitiatedFlow(private val otherSideSession: FlowSession): FlowLogic<Unit>() {
companion object {
var thrown = false
}
@Suspendable
override fun call() {
val responderReaction = otherSideSession.receive<InitiatorFlow.ResponderReaction>()
.unwrap{ it }
when(responderReaction) {
InitiatorFlow.ResponderReaction.NORMAL_CLOSE -> {
otherSideSession.close()
}
InitiatorFlow.ResponderReaction.RETRY_CLOSE_FROM_CHECKPOINT -> {
otherSideSession.close()
// failing with a transient exception to force a replay of the close.
if (!thrown) {
thrown = true
throw SQLTransientConnectionException("Connection is not available")
}
}
}
}
}
@InitiatingFlow
@StartableByRPC
class InitiatorLoopingFlow(val party: Party, val blockingCounterparty: Boolean = false): FlowLogic<Unit>() {
@Suspendable
override fun call() {
for (i in 1..1_000) {
val session = initiateFlow(party)
session.sendAndReceive<String>(blockingCounterparty ).unwrap{ assertEquals("Got it", it) }
/**
* If the counterparty blocks, we need to eagerly close the session and release resources to avoid running out of memory.
* Otherwise, the session end messages from the other side will do that automatically.
*/
if (blockingCounterparty) {
session.close()
}
logger.info("Completed iteration $i")
}
}
}
@InitiatedBy(InitiatorLoopingFlow::class)
class InitiatedLoopingFlow(private val otherSideSession: FlowSession): FlowLogic<Unit>() {
@Suspendable
override fun call() {
val shouldBlock = otherSideSession.receive<Boolean>()
.unwrap{ it }
otherSideSession.send("Got it")
if (shouldBlock) {
otherSideSession.receive<String>()
}
}
}
@InitiatingFlow
@StartableByRPC
class InitiatorMultipleSessionsFlow(val party: Party): FlowLogic<Unit>() {
@Suspendable
override fun call() {
for (round in 1 .. 2) {
val sessions = mutableListOf<FlowSession>()
for (session_number in 1 .. 5) {
val session = initiateFlow(party)
sessions.add(session)
session.sendAndReceive<String>("What's up?").unwrap{ assertEquals("All good!", it) }
}
close(sessions.toNonEmptySet())
}
}
}
@InitiatedBy(InitiatorMultipleSessionsFlow::class)
class InitiatedMultipleSessionsFlow(private val otherSideSession: FlowSession): FlowLogic<Unit>() {
@Suspendable
override fun call() {
otherSideSession.receive<String>()
.unwrap{ assertEquals("What's up?", it) }
otherSideSession.send("All good!")
}
}
}

View File

@ -14,6 +14,7 @@ import net.corda.core.flows.StateMachineRunId
import net.corda.core.flows.UnexpectedFlowEndException
import net.corda.core.identity.CordaX500Name
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.CordaRPCOps
import net.corda.core.messaging.startFlow
import net.corda.core.node.services.StatesNotAvailableException
@ -68,9 +69,10 @@ class KillFlowTest {
@Test(timeout = 300_000)
fun `a killed flow will propagate the killed error to counter parties when it reaches the next suspension point`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.let { rpc ->
val handle = rpc.startFlow(
::AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriends,
@ -118,8 +120,10 @@ class KillFlowTest {
@Test(timeout = 300_000)
fun `killing a flow suspended in send + receive + sendAndReceive ends the flow immediately`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = false)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
val bobParty = bob.nodeInfo.singleIdentity()
bob.stop()
val terminated = (bob as OutOfProcess).process.waitFor(30, TimeUnit.SECONDS)
@ -192,9 +196,10 @@ class KillFlowTest {
@Test(timeout = 300_000)
fun `a killed flow will propagate the killed error to counter parties if it was suspended`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
alice.rpc.let { rpc ->
val handle = rpc.startFlow(
::AFlowThatGetsMurderedAndSomehowKillsItsFriends,
@ -224,9 +229,10 @@ class KillFlowTest {
@Test(timeout = 300_000)
fun `a killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
val bob = startNode(providedName = BOB_NAME).getOrThrow()
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
.map { startNode(providedName = it) }
.transpose()
.getOrThrow()
val handle = alice.rpc.startFlow(
::AFlowThatGetsMurderedByItsFriend,
listOf(bob.nodeInfo.singleIdentity(), charlie.nodeInfo.singleIdentity())

View File

@ -1,68 +0,0 @@
package net.corda.node.logging
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.internal.div
import net.corda.core.messaging.FlowHandle
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeHandle
import net.corda.testing.driver.driver
import org.assertj.core.api.Assertions.assertThat
import org.junit.Test
import java.io.File
class ErrorCodeLoggingTests {
@Test(timeout=300_000)
fun `log entries with a throwable and ERROR or WARN get an error code appended`() {
driver(DriverParameters(notarySpecs = emptyList())) {
val node = startNode(startInSameProcess = false).getOrThrow()
node.rpc.startFlow(::MyFlow).waitForCompletion()
val logFile = node.logFile()
val linesWithErrorCode = logFile.useLines { lines ->
lines.filter { line ->
line.contains("[errorCode=")
}.filter { line ->
line.contains("moreInformationAt=https://errors.corda.net/")
}.toList()
}
assertThat(linesWithErrorCode).isNotEmpty
}
}
// This is used to detect broken logging which can be caused by loggers being initialized
// before the initLogging() call is made
@Test(timeout=300_000)
fun `When logging is set to error level, there are no other levels logged after node startup`() {
driver(DriverParameters(notarySpecs = emptyList())) {
val node = startNode(startInSameProcess = false, logLevelOverride = "ERROR").getOrThrow()
val logFile = node.logFile()
val lengthAfterStart = logFile.length()
node.rpc.startFlow(::MyFlow).waitForCompletion()
// An exception thrown in a flow will log at the "INFO" level.
assertThat(logFile.length()).isEqualTo(lengthAfterStart)
}
}
@StartableByRPC
@InitiatingFlow
class MyFlow : FlowLogic<String>() {
override fun call(): String {
throw IllegalArgumentException("Mwahahahah")
}
}
}
private fun FlowHandle<*>.waitForCompletion() {
try {
returnValue.getOrThrow()
} catch (e: Exception) {
// This is expected to throw an exception, using getOrThrow() just to wait until done.
}
}
fun NodeHandle.logFile(): File = (baseDirectory / "logs").toFile().walk().filter { it.name.startsWith("node-") && it.extension == "log" }.single()

View File

@ -7,6 +7,7 @@ import net.corda.core.contracts.Command
import net.corda.core.contracts.StateAndContract
import net.corda.core.flows.*
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.internal.packageName
import net.corda.core.messaging.startFlow
import net.corda.core.transactions.SignedTransaction
@ -57,8 +58,10 @@ class FlowsDrainingModeContentionTest {
portAllocation = portAllocation,
extraCordappPackagesToScan = listOf(MessageState::class.packageName)
)) {
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it, rpcUsers = users) }
.transpose()
.getOrThrow()
val nodeARpcInfo = RpcInfo(nodeA.rpcAddress, user.username, user.password)
val flow = nodeA.rpc.startFlow(::ProposeTransactionAndWaitForCommit, message, nodeARpcInfo, nodeB.nodeInfo.singleIdentity(), defaultNotaryIdentity)

View File

@ -4,6 +4,7 @@ import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.*
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.map
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.getOrThrow
@ -53,8 +54,11 @@ class P2PFlowsDrainingModeTest {
@Test(timeout=300_000)
fun `flows draining mode suspends consumption of initial session messages`() {
driver(DriverParameters(startNodesInProcess = false, portAllocation = portAllocation, notarySpecs = emptyList())) {
val initiatedNode = startNode(providedName = ALICE_NAME).getOrThrow()
val initiating = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow().rpc
val (initiatedNode, bob) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it, rpcUsers = users) }
.transpose()
.getOrThrow()
val initiating = bob.rpc
val counterParty = initiatedNode.nodeInfo.singleIdentity()
val initiated = initiatedNode.rpc
@ -85,8 +89,10 @@ class P2PFlowsDrainingModeTest {
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it, rpcUsers = users) }
.transpose()
.getOrThrow()
var successful = false
val latch = CountDownLatch(1)
@ -133,8 +139,10 @@ class P2PFlowsDrainingModeTest {
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it, rpcUsers = users) }
.transpose()
.getOrThrow()
var successful = false
val latch = CountDownLatch(1)

View File

@ -1,10 +1,10 @@
package net.corda.node.services.config
import net.corda.core.utilities.getOrThrow
import net.corda.node.logging.logFile
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.driver.internal.incrementalPortAllocation
import net.corda.testing.driver.logFile
import org.junit.Assert.assertTrue
import org.junit.Test

View File

@ -0,0 +1,72 @@
package net.corda.node.services.messaging
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.Destination
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowSession
import net.corda.core.flows.InitiatedBy
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.unwrap
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.BOB_NAME
import net.corda.testing.core.singleIdentity
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import org.junit.Test
import kotlin.test.assertEquals
class MessagingSendAllTest {
@Test(timeout=300_000)
fun `flow can exchange messages with multiple sessions to the same party in parallel`() {
driver(DriverParameters(startNodesInProcess = true)) {
val (alice, bob) = listOf(
startNode(providedName = ALICE_NAME),
startNode(providedName = BOB_NAME)
).transpose().getOrThrow()
val bobIdentity = bob.nodeInfo.singleIdentity()
val messages = listOf(
bobIdentity to "hey bob 1",
bobIdentity to "hey bob 2"
)
alice.rpc.startFlow(::SenderFlow, messages).returnValue.getOrThrow()
}
}
@StartableByRPC
@InitiatingFlow
class SenderFlow(private val parties: List<Pair<Destination, String>>): FlowLogic<String>() {
@Suspendable
override fun call(): String {
val messagesPerSession = parties.toList().map { (party, messageType) ->
val session = initiateFlow(party)
Pair(session, messageType)
}.toMap()
sendAllMap(messagesPerSession)
val messages = receiveAll(String::class.java, messagesPerSession.keys.toList())
messages.map { it.unwrap { payload -> assertEquals("pong", payload) } }
return "ok"
}
}
@InitiatedBy(SenderFlow::class)
class RecipientFlow(private val otherPartySession: FlowSession): FlowLogic<String>() {
@Suspendable
override fun call(): String {
otherPartySession.receive<String>().unwrap { it }
otherPartySession.send("pong")
return "ok"
}
}
}

View File

@ -5,6 +5,7 @@ import net.corda.core.CordaRuntimeException
import net.corda.core.flows.*
import net.corda.core.identity.CordaX500Name
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.unwrap
@ -58,8 +59,10 @@ class RpcExceptionHandlingTest {
}
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
val devModeNode = startNode(params, BOB_NAME).getOrThrow()
val node = startNode(ALICE_NAME, devMode = false, parameters = params).getOrThrow()
val (devModeNode, node) = listOf(startNode(params, BOB_NAME),
startNode(ALICE_NAME, devMode = false, parameters = params))
.transpose()
.getOrThrow()
assertThatThrownExceptionIsReceivedUnwrapped(devModeNode)
assertThatThrownExceptionIsReceivedUnwrapped(node)
@ -77,8 +80,10 @@ class RpcExceptionHandlingTest {
}
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
val devModeNode = startNode(params, BOB_NAME).getOrThrow()
val node = startNode(ALICE_NAME, devMode = false, parameters = params).getOrThrow()
val (devModeNode, node) = listOf(startNode(params, BOB_NAME),
startNode(ALICE_NAME, devMode = false, parameters = params))
.transpose()
.getOrThrow()
assertThatThrownBy { devModeNode.throwExceptionFromFlow() }.isInstanceOfSatisfying(FlowException::class.java) { exception ->
assertThat(exception).hasNoCause()
@ -102,8 +107,10 @@ class RpcExceptionHandlingTest {
fun DriverDSL.scenario(nameA: CordaX500Name, nameB: CordaX500Name, devMode: Boolean) {
val nodeA = startNode(nameA, devMode, params).getOrThrow()
val nodeB = startNode(nameB, devMode, params).getOrThrow()
val (nodeA, nodeB) = listOf(nameA, nameB)
.map { startNode(it, devMode, params) }
.transpose()
.getOrThrow()
nodeA.rpc.startFlow(::InitFlow, nodeB.nodeInfo.singleIdentity()).returnValue.getOrThrow()
}

View File

@ -15,6 +15,7 @@ import net.corda.core.flows.NotaryException
import net.corda.core.flows.ReceiveFinalityFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.StateMachineUpdate
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.OpaqueBytes
@ -46,14 +47,20 @@ class FlowHospitalTest {
private val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
@Test(timeout=300_000)
fun `when double spend occurs, the flow is successfully deleted on the counterparty`() {
@Test(timeout = 300_000)
fun `when double spend occurs, the flow is successfully deleted on the counterparty`() {
driver(DriverParameters(cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts")))) {
val charlie = startNode(providedName = CHARLIE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
val charlieClient = CordaRPCClient(charlie.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val (charlieClient, aliceClient) = listOf(CHARLIE_NAME, ALICE_NAME)
.map {
startNode(providedName = it,
rpcUsers = listOf(rpcUser))
}
.transpose()
.getOrThrow()
.map {
CordaRPCClient(it.rpcAddress)
.start(rpcUser.username, rpcUser.password).proxy
}
val aliceParty = aliceClient.nodeInfo().legalIdentities.first()
@ -80,7 +87,7 @@ class FlowHospitalTest {
val secondStateAndRef = charlieClient.startFlow(::IssueFlow, defaultNotaryIdentity).returnValue.get()
charlieClient.startFlow(::SpendFlowWithCustomException, secondStateAndRef, aliceParty).returnValue.get()
val secondSubscription = aliceClient.stateMachinesFeed().updates.subscribe{
val secondSubscription = aliceClient.stateMachinesFeed().updates.subscribe {
if (it is StateMachineUpdate.Removed && it.result.isFailure)
secondLatch.countDown()
}
@ -95,75 +102,75 @@ class FlowHospitalTest {
}
}
@Test(timeout=300_000)
fun `HospitalizeFlowException thrown`() {
@Test(timeout = 300_000)
fun `HospitalizeFlowException thrown`() {
var observationCounter: Int = 0
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
++observationCounter
}
driver(
DriverParameters(
startNodesInProcess = true,
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
)
DriverParameters(
startNodesInProcess = true,
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
)
) {
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
assertFailsWith<TimeoutException> {
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, HospitalizeFlowException::class.java)
.returnValue.getOrThrow(5.seconds)
.returnValue.getOrThrow(5.seconds)
}
assertEquals(1, observationCounter)
}
}
@Test(timeout=300_000)
fun `Custom exception wrapping HospitalizeFlowException thrown`() {
@Test(timeout = 300_000)
fun `Custom exception wrapping HospitalizeFlowException thrown`() {
var observationCounter: Int = 0
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
++observationCounter
}
driver(
DriverParameters(
startNodesInProcess = true,
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
)
DriverParameters(
startNodesInProcess = true,
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
)
) {
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
assertFailsWith<TimeoutException> {
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, WrappingHospitalizeFlowException::class.java)
.returnValue.getOrThrow(5.seconds)
.returnValue.getOrThrow(5.seconds)
}
assertEquals(1, observationCounter)
}
}
@Test(timeout=300_000)
fun `Custom exception extending HospitalizeFlowException thrown`() {
@Test(timeout = 300_000)
fun `Custom exception extending HospitalizeFlowException thrown`() {
var observationCounter: Int = 0
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
++observationCounter
}
driver(
DriverParameters(
startNodesInProcess = true,
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
)
DriverParameters(
startNodesInProcess = true,
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
)
) {
// one node will be enough for this testing
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
assertFailsWith<TimeoutException> {
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, ExtendingHospitalizeFlowException::class.java)
.returnValue.getOrThrow(5.seconds)
.returnValue.getOrThrow(5.seconds)
}
assertEquals(1, observationCounter)
}
}
@Test(timeout=300_000)
fun `HospitalizeFlowException cloaking an important exception thrown`() {
@Test(timeout = 300_000)
fun `HospitalizeFlowException cloaking an important exception thrown`() {
var dischargedCounter = 0
var observationCounter: Int = 0
StaffedFlowHospital.onFlowDischarged.add { _, _ ->
@ -173,16 +180,16 @@ class FlowHospitalTest {
++observationCounter
}
driver(
DriverParameters(
startNodesInProcess = true,
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
)
DriverParameters(
startNodesInProcess = true,
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
)
) {
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
assertFailsWith<TimeoutException> {
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, CloakingHospitalizeFlowException::class.java)
.returnValue.getOrThrow(5.seconds)
.returnValue.getOrThrow(5.seconds)
}
assertEquals(0, observationCounter)
// Since the flow will keep getting discharged from hospital dischargedCounter will be > 1.
@ -191,7 +198,7 @@ class FlowHospitalTest {
}
@StartableByRPC
class IssueFlow(val notary: Party): FlowLogic<StateAndRef<SingleOwnerState>>() {
class IssueFlow(val notary: Party) : FlowLogic<StateAndRef<SingleOwnerState>>() {
@Suspendable
override fun call(): StateAndRef<SingleOwnerState> {
@ -201,12 +208,11 @@ class FlowHospitalTest {
val notarised = subFlow(FinalityFlow(signedTransaction, emptySet<FlowSession>()))
return notarised.coreTransaction.outRef(0)
}
}
@StartableByRPC
@InitiatingFlow
class SpendFlow(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party): FlowLogic<Unit>() {
class SpendFlow(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party) : FlowLogic<Unit>() {
@Suspendable
override fun call() {
@ -216,11 +222,10 @@ class FlowHospitalTest {
sessionWithCounterParty.sendAndReceive<String>("initial-message")
subFlow(FinalityFlow(signedTransaction, setOf(sessionWithCounterParty)))
}
}
@InitiatedBy(SpendFlow::class)
class AcceptSpendFlow(private val otherSide: FlowSession): FlowLogic<Unit>() {
class AcceptSpendFlow(private val otherSide: FlowSession) : FlowLogic<Unit>() {
@Suspendable
override fun call() {
@ -229,12 +234,11 @@ class FlowHospitalTest {
subFlow(ReceiveFinalityFlow(otherSide))
}
}
@StartableByRPC
@InitiatingFlow
class SpendFlowWithCustomException(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party):
class SpendFlowWithCustomException(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party) :
FlowLogic<Unit>() {
@Suspendable
@ -249,11 +253,10 @@ class FlowHospitalTest {
throw DoubleSpendException("double spend!", e)
}
}
}
@InitiatedBy(SpendFlowWithCustomException::class)
class AcceptSpendFlowWithCustomException(private val otherSide: FlowSession): FlowLogic<Unit>() {
class AcceptSpendFlowWithCustomException(private val otherSide: FlowSession) : FlowLogic<Unit>() {
@Suspendable
override fun call() {
@ -262,16 +265,15 @@ class FlowHospitalTest {
subFlow(ReceiveFinalityFlow(otherSide))
}
}
class DoubleSpendException(message: String, cause: Throwable): FlowException(message, cause)
class DoubleSpendException(message: String, cause: Throwable) : FlowException(message, cause)
@StartableByRPC
class ThrowingHospitalisedExceptionFlow(
// Starting this Flow from an RPC client: if we pass in an encapsulated exception within another exception then the wrapping
// exception, when deserialized, will get grounded into a CordaRuntimeException (this happens in ThrowableSerializer#fromProxy).
private val hospitalizeFlowExceptionClass: Class<*>): FlowLogic<Unit>() {
// Starting this Flow from an RPC client: if we pass in an encapsulated exception within another exception then the wrapping
// exception, when deserialized, will get grounded into a CordaRuntimeException (this happens in ThrowableSerializer#fromProxy).
private val hospitalizeFlowExceptionClass: Class<*>) : FlowLogic<Unit>() {
@Suspendable
override fun call() {
@ -282,7 +284,7 @@ class FlowHospitalTest {
}
}
class WrappingHospitalizeFlowException(cause: HospitalizeFlowException = HospitalizeFlowException()) : Exception(cause)
class WrappingHospitalizeFlowException(cause: HospitalizeFlowException = HospitalizeFlowException()) : Exception(cause)
class ExtendingHospitalizeFlowException : HospitalizeFlowException()
@ -294,5 +296,4 @@ class FlowHospitalTest {
setCause(SQLException("deadlock"))
}
}
}

View File

@ -16,6 +16,7 @@ import net.corda.core.flows.FlowLogic
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.Party
import net.corda.core.internal.concurrent.openFuture
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.node.services.Vault
import net.corda.core.node.services.vault.QueryCriteria
@ -24,7 +25,7 @@ import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.node.services.Permissions
import net.corda.node.services.statemachine.StaffedFlowHospital
import net.corda.node.services.transactions.PersistentUniquenessProvider
import net.corda.notary.jpa.JPAUniquenessProvider
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.BOB_NAME
import net.corda.testing.core.singleIdentity
@ -450,8 +451,11 @@ class VaultObserverExceptionTest {
findCordapp("com.r3.dbfailure.schemas")
),inMemoryDB = false)
) {
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it,
rpcUsers = listOf(user)) }
.transpose()
.getOrThrow()
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
val startErrorInObservableWhenConsumingState = {
@ -540,8 +544,11 @@ class VaultObserverExceptionTest {
),
inMemoryDB = false)
) {
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it,
rpcUsers = listOf(user)) }
.transpose()
.getOrThrow()
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
val startErrorInObservableWhenConsumingState = {
@ -622,8 +629,11 @@ class VaultObserverExceptionTest {
),
inMemoryDB = false)
) {
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it,
rpcUsers = listOf(user)) }
.transpose()
.getOrThrow()
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
val startErrorInObservableWhenCreatingSecondState = {
@ -699,8 +709,11 @@ class VaultObserverExceptionTest {
),
inMemoryDB = false)
) {
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
.map { startNode(providedName = it,
rpcUsers = listOf(user)) }
.transpose()
.getOrThrow()
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
val startErrorInObservableWhenConsumingState = {
@ -843,8 +856,8 @@ class VaultObserverExceptionTest {
override fun call(): List<String> {
return serviceHub.withEntityManager {
val criteriaQuery = this.criteriaBuilder.createQuery(String::class.java)
val root = criteriaQuery.from(PersistentUniquenessProvider.CommittedTransaction::class.java)
criteriaQuery.select(root.get<String>(PersistentUniquenessProvider.CommittedTransaction::transactionId.name))
val root = criteriaQuery.from(JPAUniquenessProvider.CommittedTransaction::class.java)
criteriaQuery.select(root.get(JPAUniquenessProvider.CommittedTransaction::transactionId.name))
val query = this.createQuery(criteriaQuery)
query.resultList
}

View File

@ -59,6 +59,8 @@ import net.corda.core.schemas.MappedSchema
import net.corda.core.serialization.SerializationWhitelist
import net.corda.core.serialization.SerializeAsToken
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
import net.corda.core.toFuture
import net.corda.core.transactions.LedgerTransaction
import net.corda.core.utilities.NetworkHostAndPort
@ -131,7 +133,6 @@ import net.corda.node.services.statemachine.StateMachineManager
import net.corda.node.services.transactions.BasicVerifierFactoryService
import net.corda.node.services.transactions.DeterministicVerifierFactoryService
import net.corda.node.services.transactions.InMemoryTransactionVerifierService
import net.corda.node.services.transactions.SimpleNotaryService
import net.corda.node.services.transactions.VerifierFactoryService
import net.corda.node.services.upgrade.ContractUpgradeServiceImpl
import net.corda.node.services.vault.NodeVaultService
@ -317,6 +318,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
} else {
BasicVerifierFactoryService()
}
private val attachmentsClassLoaderCache: AttachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(cacheFactory).tokenize()
val contractUpgradeService = ContractUpgradeServiceImpl(cacheFactory).tokenize()
val auditService = DummyAuditService().tokenize()
@Suppress("LeakingThis")
@ -611,11 +613,22 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
val myNotaryIdentity = configuration.notary?.let {
if (it.serviceLegalName != null) {
val (notaryIdentity, notaryIdentityKeyPair) = loadNotaryClusterIdentity(it.serviceLegalName)
val (notaryIdentity, notaryIdentityKeyPair) = loadNotaryServiceIdentity(it.serviceLegalName)
keyPairs += notaryIdentityKeyPair
notaryIdentity
} else {
// In case of a single notary service myNotaryIdentity will be the node's single identity.
// The only case where the myNotaryIdentity will be the node's legal identity is for existing single notary services running
// an older version. Current single notary services (V4.6+) sign requests using a separate notary service identity so the
// notary identity will be different from the node's legal identity.
// This check is here to ensure that a user does not accidentally/intentionally remove the serviceLegalName configuration
// parameter after a notary has been registered. If that was possible then notary would start and sign incoming requests
// with the node's legal identity key, corrupting the data.
check (!cryptoService.containsKey(DISTRIBUTED_NOTARY_KEY_ALIAS)) {
"The notary service key exists in the key store but no notary service legal name has been configured. " +
"Either include the relevant 'notary.serviceLegalName' configuration or validate this key is not necessary " +
"and remove from the key store."
}
identity
}
}
@ -778,10 +791,6 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
)
}
private fun isRunningSimpleNotaryService(configuration: NodeConfiguration): Boolean {
return configuration.notary != null && configuration.notary?.className == SimpleNotaryService::class.java.name
}
private class ServiceInstantiationException(cause: Throwable?) : CordaException("Service Instantiation Error", cause)
private fun installCordaServices() {
@ -1054,8 +1063,12 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
}
}
/** Loads pre-generated notary service cluster identity. */
private fun loadNotaryClusterIdentity(serviceLegalName: CordaX500Name): Pair<PartyAndCertificate, KeyPair> {
/**
* Loads notary service identity. In the case of the experimental RAFT and BFT notary clusters, this loads the pre-generated
* cluster identity that all worker nodes share. In the case of a simple single notary, this loads the notary service identity
* that is generated during initial registration and is used to sign notarisation requests.
* */
private fun loadNotaryServiceIdentity(serviceLegalName: CordaX500Name): Pair<PartyAndCertificate, KeyPair> {
val privateKeyAlias = "$DISTRIBUTED_NOTARY_KEY_ALIAS"
val compositeKeyAlias = "$DISTRIBUTED_NOTARY_COMPOSITE_KEY_ALIAS"
@ -1171,6 +1184,8 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
private lateinit var _myInfo: NodeInfo
override val myInfo: NodeInfo get() = _myInfo
override val attachmentsClassLoaderCache: AttachmentsClassLoaderCache get() = this@AbstractNode.attachmentsClassLoaderCache
private lateinit var _networkParameters: NetworkParameters
override val networkParameters: NetworkParameters get() = _networkParameters

View File

@ -86,6 +86,7 @@ class NetworkParametersReader(private val trustRoot: X509Certificate,
logger.info("No network-parameters file found. Expecting network parameters to be available from the network map.")
networkMapClient ?: throw Error.NetworkMapNotConfigured()
val signedParams = networkMapClient.getNetworkParameters(parametersHash)
signedParams.verifiedNetworkParametersCert(trustRoot)
signedParams.serialize().open().copyTo(baseDirectory / NETWORK_PARAMS_FILE_NAME)
return signedParams
}

View File

@ -644,8 +644,8 @@ open class Node(configuration: NodeConfiguration,
storageContext = AMQP_STORAGE_CONTEXT.withClassLoader(classloader),
checkpointSerializer = KryoCheckpointSerializer,
checkpointContext = KRYO_CHECKPOINT_CONTEXT.withClassLoader(classloader)
)
checkpointContext = KRYO_CHECKPOINT_CONTEXT.withClassLoader(classloader).withCheckpointCustomSerializers(cordappLoader.cordapps.flatMap { it.checkpointCustomSerializers })
)
}
/** Starts a blocking event loop for message dispatch. */

View File

@ -18,6 +18,7 @@ import net.corda.core.internal.notary.NotaryService
import net.corda.core.internal.notary.SinglePartyNotaryService
import net.corda.core.node.services.CordaService
import net.corda.core.schemas.MappedSchema
import net.corda.core.serialization.CheckpointCustomSerializer
import net.corda.core.serialization.SerializationCustomSerializer
import net.corda.core.serialization.SerializationWhitelist
import net.corda.core.serialization.SerializeAsToken
@ -185,6 +186,7 @@ class JarScanningCordappLoader private constructor(private val cordappJarPaths:
findServices(this),
findWhitelists(url),
findSerializers(this),
findCheckpointSerializers(this),
findCustomSchemas(this),
findAllFlows(this),
url.url,
@ -334,6 +336,10 @@ class JarScanningCordappLoader private constructor(private val cordappJarPaths:
return scanResult.getClassesImplementingWithClassVersionCheck(SerializationCustomSerializer::class)
}
private fun findCheckpointSerializers(scanResult: RestrictedScanResult): List<CheckpointCustomSerializer<*, *>> {
return scanResult.getClassesImplementingWithClassVersionCheck(CheckpointCustomSerializer::class)
}
private fun findCustomSchemas(scanResult: RestrictedScanResult): Set<MappedSchema> {
return scanResult.getClassesWithSuperclass(MappedSchema::class).instances().toSet()
}

View File

@ -6,12 +6,12 @@ import net.corda.core.flows.ContractUpgradeFlow
import net.corda.core.internal.cordapp.CordappImpl
import net.corda.core.internal.location
import net.corda.node.VersionInfo
import net.corda.node.services.transactions.NodeNotarySchemaV1
import net.corda.node.services.transactions.SimpleNotaryService
import net.corda.notary.experimental.bftsmart.BFTSmartNotarySchemaV1
import net.corda.notary.experimental.bftsmart.BFTSmartNotaryService
import net.corda.notary.experimental.raft.RaftNotarySchemaV1
import net.corda.notary.experimental.raft.RaftNotaryService
import net.corda.notary.jpa.JPANotarySchemaV1
import net.corda.notary.jpa.JPANotaryService
internal object VirtualCordapp {
/** A list of the core RPC flows present in Corda */
@ -32,6 +32,7 @@ internal object VirtualCordapp {
services = listOf(),
serializationWhitelists = listOf(),
serializationCustomSerializers = listOf(),
checkpointCustomSerializers = listOf(),
customSchemas = setOf(),
info = Cordapp.Info.Default("corda-core", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
allFlows = listOf(),
@ -45,7 +46,7 @@ internal object VirtualCordapp {
}
/** A Cordapp for the built-in notary service implementation. */
fun generateSimpleNotary(versionInfo: VersionInfo): CordappImpl {
fun generateJPANotary(versionInfo: VersionInfo): CordappImpl {
return CordappImpl(
contractClassNames = listOf(),
initiatedFlows = listOf(),
@ -55,15 +56,17 @@ internal object VirtualCordapp {
services = listOf(),
serializationWhitelists = listOf(),
serializationCustomSerializers = listOf(),
customSchemas = setOf(NodeNotarySchemaV1),
checkpointCustomSerializers = listOf(),
customSchemas = setOf(JPANotarySchemaV1),
info = Cordapp.Info.Default("corda-notary", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
allFlows = listOf(),
jarPath = SimpleNotaryService::class.java.location,
jarPath = JPANotaryService::class.java.location,
jarHash = SecureHash.allOnesHash,
minimumPlatformVersion = versionInfo.platformVersion,
targetPlatformVersion = versionInfo.platformVersion,
notaryService = SimpleNotaryService::class.java,
isLoaded = false
notaryService = JPANotaryService::class.java,
isLoaded = false,
isVirtual = true
)
}
@ -78,6 +81,7 @@ internal object VirtualCordapp {
services = listOf(),
serializationWhitelists = listOf(),
serializationCustomSerializers = listOf(),
checkpointCustomSerializers = listOf(),
customSchemas = setOf(RaftNotarySchemaV1),
info = Cordapp.Info.Default("corda-notary-raft", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
allFlows = listOf(),
@ -101,6 +105,7 @@ internal object VirtualCordapp {
services = listOf(),
serializationWhitelists = listOf(),
serializationCustomSerializers = listOf(),
checkpointCustomSerializers = listOf(),
customSchemas = setOf(BFTSmartNotarySchemaV1),
info = Cordapp.Info.Default("corda-notary-bft-smart", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
allFlows = listOf(),

View File

@ -37,6 +37,7 @@ class MigrationNamedCacheFactory(private val metricRegistry: MetricRegistry?,
"NodeAttachmentService_contractAttachmentVersions" -> caffeine.maximumSize(defaultCacheSize)
"NodeParametersStorage_networkParametersByHash" -> caffeine.maximumSize(defaultCacheSize)
"NodeAttachmentTrustCalculator_trustedKeysCache" -> caffeine.maximumSize(defaultCacheSize)
"AttachmentsClassLoader_cache" -> caffeine.maximumSize(defaultCacheSize)
else -> throw IllegalArgumentException("Unexpected cache name $name.")
}
}

View File

@ -15,6 +15,8 @@ import net.corda.core.node.services.NetworkParametersService
import net.corda.core.node.services.TransactionStorage
import net.corda.core.serialization.deserialize
import net.corda.core.serialization.internal.AttachmentsClassLoaderBuilder
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
import net.corda.core.transactions.ContractUpgradeLedgerTransaction
import net.corda.core.transactions.NotaryChangeLedgerTransaction
import net.corda.core.transactions.WireTransaction
@ -62,6 +64,8 @@ class MigrationServicesForResolution(
cacheFactory
)
private val attachmentsClassLoaderCache: AttachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(cacheFactory)
private fun defaultNetworkParameters(): NetworkParameters {
logger.warn("Using a dummy set of network parameters for migration.")
val clock = Clock.systemUTC()
@ -124,7 +128,8 @@ class MigrationServicesForResolution(
networkParameters,
tx.id,
attachmentTrustCalculator::calculate,
cordappLoader.appClassLoader) {
cordappLoader.appClassLoader,
attachmentsClassLoaderCache) {
deserialiseComponentGroup(tx.componentGroups, TransactionState::class, ComponentGroupEnum.OUTPUTS_GROUP, forceDeserialize = true)
}
states.filterIndexed {index, _ -> stateIndices.contains(index)}.toList()

View File

@ -93,6 +93,8 @@ interface NodeConfiguration : ConfigurationWithOptionsContainer {
val quasarExcludePackages: List<String>
val reloadCheckpointAfterSuspend: Boolean
companion object {
// default to at least 8MB and a bit extra for larger heap sizes
val defaultTransactionCacheSize: Long = 8.MB + getAdditionalCacheMemory()
@ -125,9 +127,13 @@ enum class JmxReporterType {
}
data class DevModeOptions(
val disableCheckpointChecker: Boolean = Defaults.disableCheckpointChecker,
val allowCompatibilityZone: Boolean = Defaults.allowCompatibilityZone,
val djvm: DJVMOptions? = null
@Deprecated(
"The checkpoint checker has been replaced by the ability to reload a checkpoint from the database after every suspend" +
"Use [NodeConfiguration.disableReloadCheckpointAfterSuspend] instead."
)
val disableCheckpointChecker: Boolean = Defaults.disableCheckpointChecker,
val allowCompatibilityZone: Boolean = Defaults.allowCompatibilityZone,
val djvm: DJVMOptions? = null
) {
internal object Defaults {
val disableCheckpointChecker = false
@ -140,10 +146,6 @@ data class DJVMOptions(
val cordaSource: List<String>
)
fun NodeConfiguration.shouldCheckCheckpoints(): Boolean {
return this.devMode && this.devModeOptions?.disableCheckpointChecker != true
}
fun NodeConfiguration.shouldStartSSHDaemon() = this.sshd != null
fun NodeConfiguration.shouldStartLocalShell() = !this.noLocalShell && System.console() != null && this.devMode
fun NodeConfiguration.shouldInitCrashShell() = shouldStartLocalShell() || shouldStartSSHDaemon()
@ -151,7 +153,7 @@ fun NodeConfiguration.shouldInitCrashShell() = shouldStartLocalShell() || should
data class NotaryConfig(
/** Specifies whether the notary validates transactions or not. */
val validating: Boolean,
/** The legal name of cluster in case of a distributed notary service. */
/** The legal name of the notary service identity. */
val serviceLegalName: CordaX500Name? = null,
/** The name of the notary service class to load. */
val className: String? = null,

View File

@ -84,7 +84,9 @@ data class NodeConfigurationImpl(
override val blacklistedAttachmentSigningKeys: List<String> = Defaults.blacklistedAttachmentSigningKeys,
override val configurationWithOptions: ConfigurationWithOptions,
override val flowExternalOperationThreadPoolSize: Int = Defaults.flowExternalOperationThreadPoolSize,
override val quasarExcludePackages: List<String> = Defaults.quasarExcludePackages
override val quasarExcludePackages: List<String> = Defaults.quasarExcludePackages,
override val reloadCheckpointAfterSuspend: Boolean = Defaults.reloadCheckpointAfterSuspend
) : NodeConfiguration {
internal object Defaults {
val jmxMonitoringHttpPort: Int? = null
@ -123,6 +125,7 @@ data class NodeConfigurationImpl(
val blacklistedAttachmentSigningKeys: List<String> = emptyList()
const val flowExternalOperationThreadPoolSize: Int = 1
val quasarExcludePackages: List<String> = emptyList()
val reloadCheckpointAfterSuspend: Boolean = System.getProperty("reloadCheckpointAfterSuspend", "false")!!.toBoolean()
fun cordappsDirectories(baseDirectory: Path) = listOf(baseDirectory / CORDAPPS_DIR_NAME_DEFAULT)

View File

@ -8,6 +8,7 @@ import net.corda.common.validation.internal.Validated.Companion.invalid
import net.corda.common.validation.internal.Validated.Companion.valid
import net.corda.node.services.config.*
import net.corda.node.services.config.NodeConfigurationImpl.Defaults
import net.corda.node.services.config.NodeConfigurationImpl.Defaults.reloadCheckpointAfterSuspend
import net.corda.node.services.config.schema.parsers.*
internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfiguration>("NodeConfiguration") {
@ -66,6 +67,7 @@ internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfig
.withDefaultValue(Defaults.networkParameterAcceptanceSettings)
private val flowExternalOperationThreadPoolSize by int().optional().withDefaultValue(Defaults.flowExternalOperationThreadPoolSize)
private val quasarExcludePackages by string().list().optional().withDefaultValue(Defaults.quasarExcludePackages)
private val reloadCheckpointAfterSuspend by boolean().optional().withDefaultValue(Defaults.reloadCheckpointAfterSuspend)
@Suppress("unused")
private val custom by nestedObject().optional()
@Suppress("unused")
@ -133,7 +135,8 @@ internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfig
networkParameterAcceptanceSettings = config[networkParameterAcceptanceSettings],
configurationWithOptions = ConfigurationWithOptions(configuration, Configuration.Options.defaults),
flowExternalOperationThreadPoolSize = config[flowExternalOperationThreadPoolSize],
quasarExcludePackages = config[quasarExcludePackages]
quasarExcludePackages = config[quasarExcludePackages],
reloadCheckpointAfterSuspend = config[reloadCheckpointAfterSuspend]
))
} catch (e: Exception) {
return when (e) {

View File

@ -54,8 +54,8 @@ class MessagingExecutor(
}
@Synchronized
fun sendAll(messages: Map<MessageRecipients, Message>) {
messages.forEach { recipients, message -> send(message, recipients) }
fun sendAll(messages: List<Pair<MessageRecipients, Message>>) {
messages.forEach { (recipients, message) -> send(message, recipients) }
}
@Synchronized

View File

@ -121,6 +121,7 @@ class P2PMessagingClient(val config: NodeConfiguration,
var bridgeSession: ClientSession? = null
var bridgeNotifyConsumer: ClientConsumer? = null
var networkChangeSubscription: Subscription? = null
var sessionFactory: ClientSessionFactory? = null
fun sendMessage(address: String, message: ClientMessage) = producer!!.send(address, message)
}
@ -172,7 +173,7 @@ class P2PMessagingClient(val config: NodeConfiguration,
minLargeMessageSize = maxMessageSize + JOURNAL_HEADER_SIZE
isUseGlobalPools = nodeSerializationEnv != null
}
val sessionFactory = locator!!.createSessionFactory().addFailoverListener(::failoverCallback)
sessionFactory = locator!!.createSessionFactory().addFailoverListener(::failoverCallback)
// Login using the node username. The broker will authenticate us as its node (as opposed to another peer)
// using our TLS certificate.
// Note that the acknowledgement of messages is not flushed to the Artermis journal until the default buffer
@ -490,8 +491,10 @@ class P2PMessagingClient(val config: NodeConfiguration,
// Wait for the main loop to notice the consumer has gone and finish up.
shutdownLatch.await()
}
// Only first caller to gets running true to protect against double stop, which seems to happen in some integration tests.
state.locked {
sessionFactory?.close()
locator?.close()
}
}

View File

@ -2,6 +2,7 @@ package net.corda.node.services.network
import net.corda.core.crypto.SecureHash
import net.corda.core.crypto.SignedData
import net.corda.core.crypto.sha256
import net.corda.core.internal.openHttpConnection
import net.corda.core.internal.post
import net.corda.core.internal.responseAs
@ -13,6 +14,7 @@ import net.corda.core.utilities.seconds
import net.corda.core.utilities.trace
import net.corda.node.VersionInfo
import net.corda.node.utilities.registration.cacheControl
import net.corda.node.utilities.registration.cordaServerVersion
import net.corda.nodeapi.internal.SignedNodeInfo
import net.corda.nodeapi.internal.network.NetworkMap
import net.corda.nodeapi.internal.network.SignedNetworkMap
@ -61,8 +63,9 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
val signedNetworkMap = connection.responseAs<SignedNetworkMap>()
val networkMap = signedNetworkMap.verifiedNetworkMapCert(trustRoot)
val timeout = connection.cacheControl.maxAgeSeconds().seconds
val version = connection.cordaServerVersion
logger.trace { "Fetched network map update from $url successfully: $networkMap" }
return NetworkMapResponse(networkMap, timeout)
return NetworkMapResponse(networkMap, timeout, version)
}
fun getNodeInfo(nodeInfoHash: SecureHash): NodeInfo {
@ -81,6 +84,23 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
return networkParameter
}
fun getNodeInfos(): List<NodeInfo> {
val url = URL("$networkMapUrl/node-infos")
logger.trace { "Fetching node infos from $url." }
val verifiedNodeInfo = url.openHttpConnection().responseAs<Pair<SignedNetworkMap, List<SignedNodeInfo>>>()
.also {
val verifiedNodeInfoHashes = it.first.verifiedNetworkMapCert(trustRoot).nodeInfoHashes
val nodeInfoHashes = it.second.map { signedNodeInfo -> signedNodeInfo.verified().serialize().sha256() }
require(
verifiedNodeInfoHashes.containsAll(nodeInfoHashes) &&
verifiedNodeInfoHashes.size == nodeInfoHashes.size
)
}
.second.map { it.verified() }
logger.trace { "Fetched node infos successfully. Node Infos size: ${verifiedNodeInfo.size}" }
return verifiedNodeInfo
}
fun myPublicHostname(): String {
val url = URL("$networkMapUrl/my-hostname")
logger.trace { "Resolving public hostname from '$url'." }
@ -90,4 +110,4 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
}
}
data class NetworkMapResponse(val payload: NetworkMap, val cacheMaxAge: Duration)
data class NetworkMapResponse(val payload: NetworkMap, val cacheMaxAge: Duration, val serverVersion: String)

View File

@ -4,6 +4,7 @@ import com.google.common.util.concurrent.MoreExecutors
import net.corda.core.CordaRuntimeException
import net.corda.core.crypto.SecureHash
import net.corda.core.crypto.SignedData
import net.corda.core.crypto.sha256
import net.corda.core.internal.NetworkParametersStorage
import net.corda.core.internal.VisibleForTesting
import net.corda.core.internal.copyTo
@ -65,6 +66,7 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
companion object {
private val logger = contextLogger()
private val defaultRetryInterval = 1.minutes
private const val bulkNodeInfoFetchThreshold = 50
}
private val parametersUpdatesTrack = PublishSubject.create<ParametersUpdateInfo>()
@ -173,17 +175,9 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
if (networkMapClient == null) {
throw CordaRuntimeException("Network map cache can be updated only if network map/compatibility zone URL is specified")
}
val (globalNetworkMap, cacheTimeout) = networkMapClient.getNetworkMap()
val (globalNetworkMap, cacheTimeout, version) = networkMapClient.getNetworkMap()
globalNetworkMap.parametersUpdate?.let { handleUpdateNetworkParameters(networkMapClient, it) }
val additionalHashes = extraNetworkMapKeys.flatMap {
try {
networkMapClient.getNetworkMap(it).payload.nodeInfoHashes
} catch (e: Exception) {
// Failure to retrieve one network map using UUID shouldn't stop the whole update.
logger.warn("Error encountered when downloading network map with uuid '$it', skipping...", e)
emptyList<SecureHash>()
}
}
val additionalHashes = getPrivateNetworkNodeHashes(version)
val allHashesFromNetworkMap = (globalNetworkMap.nodeInfoHashes + additionalHashes).toSet()
if (currentParametersHash != globalNetworkMap.networkParameterHash) {
exitOnParametersMismatch(globalNetworkMap)
@ -194,6 +188,37 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
val allNodeHashes = networkMapCache.allNodeHashes
val nodeHashesToBeDeleted = (allNodeHashes - allHashesFromNetworkMap - nodeInfoWatcher.processedNodeInfoHashes)
.filter { it != ourNodeInfoHash }
// enforce bulk fetch when no other nodes are known or unknown nodes count is less than threshold
if (version == "1" || (allNodeHashes.size > 1 && (allHashesFromNetworkMap - allNodeHashes).size < bulkNodeInfoFetchThreshold))
updateNodeInfosV1(allHashesFromNetworkMap, allNodeHashes, networkMapClient)
else
updateNodeInfos(allHashesFromNetworkMap)
// NOTE: We remove nodes after any new/updates because updated nodes will have a new hash and, therefore, any
// nodes that we can actually pull out of the cache (with the old hashes) should be a truly removed node.
nodeHashesToBeDeleted.mapNotNull { networkMapCache.getNodeByHash(it) }.forEach(networkMapCache::removeNode)
// Mark the network map cache as ready on a successful poll of the HTTP network map, even on the odd chance that
// it's empty
networkMapCache.nodeReady.set(null)
return cacheTimeout
}
private fun updateNodeInfos(allHashesFromNetworkMap: Set<SecureHash>) {
val networkMapDownloadStartTime = System.currentTimeMillis()
val nodeInfos = try {
networkMapClient!!.getNodeInfos()
} catch (e: Exception) {
logger.warn("Error encountered when downloading node infos", e)
emptyList<NodeInfo>()
}
(allHashesFromNetworkMap - nodeInfos.map { it.serialize().sha256() }).forEach {
logger.warn("Error encountered when downloading node info '$it', skipping...")
}
networkMapCache.addOrUpdateNodes(nodeInfos)
logger.info("Fetched: ${nodeInfos.size} using 1 bulk request in ${System.currentTimeMillis() - networkMapDownloadStartTime}ms")
}
private fun updateNodeInfosV1(allHashesFromNetworkMap: Set<SecureHash>, allNodeHashes: List<SecureHash>, networkMapClient: NetworkMapClient) {
//at the moment we use a blocking HTTP library - but under the covers, the OS will interleave threads waiting for IO
//as HTTP GET is mostly IO bound, use more threads than CPU's
//maximum threads to use = 24, as if we did not limit this on large machines it could result in 100's of concurrent requests
@ -230,14 +255,25 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
executorToUseForInsertionIntoDB.shutdown()
}.getOrThrow()
}
// NOTE: We remove nodes after any new/updates because updated nodes will have a new hash and, therefore, any
// nodes that we can actually pull out of the cache (with the old hashes) should be a truly removed node.
nodeHashesToBeDeleted.mapNotNull { networkMapCache.getNodeByHash(it) }.forEach(networkMapCache::removeNode)
}
// Mark the network map cache as ready on a successful poll of the HTTP network map, even on the odd chance that
// it's empty
networkMapCache.nodeReady.set(null)
return cacheTimeout
private fun getPrivateNetworkNodeHashes(version: String): List<SecureHash> {
// private networks are not supported by latest versions of Network Map
// for compatibility reasons, this call is still present for new nodes that communicate with old Network Map service versions
// but can be omitted if we know that the version of the Network Map is recent enough
return if (version == "1") {
extraNetworkMapKeys.flatMap {
try {
networkMapClient!!.getNetworkMap(it).payload.nodeInfoHashes
} catch (e: Exception) {
// Failure to retrieve one network map using UUID shouldn't stop the whole update.
logger.warn("Error encountered when downloading network map with uuid '$it', skipping...", e)
emptyList<SecureHash>()
}
}
} else {
emptyList()
}
}
private fun exitOnParametersMismatch(networkMap: NetworkMap) {

View File

@ -60,13 +60,11 @@ import net.corda.nodeapi.internal.lifecycle.NodeLifecycleObserver.Companion.repo
import net.corda.node.internal.NodeStartup
import net.corda.node.services.api.CheckpointStorage
import net.corda.node.services.statemachine.Checkpoint
import net.corda.node.services.statemachine.DataSessionMessage
import net.corda.node.services.statemachine.ErrorState
import net.corda.node.services.statemachine.FlowError
import net.corda.node.services.statemachine.ExistingSessionMessagePayload
import net.corda.node.services.statemachine.FlowSessionImpl
import net.corda.node.services.statemachine.FlowState
import net.corda.node.services.statemachine.FlowStateMachineImpl
import net.corda.node.services.statemachine.InitiatedSessionState
import net.corda.node.services.statemachine.SessionId
import net.corda.node.services.statemachine.SessionState
import net.corda.node.services.statemachine.SubFlow
@ -325,6 +323,7 @@ class CheckpointDumperImpl(private val checkpointStorage: CheckpointStorage, pri
val send: List<SendJson>? = null,
val receive: NonEmptySet<FlowSession>? = null,
val sendAndReceive: List<SendJson>? = null,
val closeSessions: NonEmptySet<FlowSession>? = null,
val waitForLedgerCommit: SecureHash? = null,
val waitForStateConsumption: Set<StateRef>? = null,
val getFlowInfo: NonEmptySet<FlowSession>? = null,
@ -352,6 +351,7 @@ class CheckpointDumperImpl(private val checkpointStorage: CheckpointStorage, pri
is FlowIORequest.Send -> SuspendedOn(send = sessionToMessage.toJson())
is FlowIORequest.Receive -> SuspendedOn(receive = sessions)
is FlowIORequest.SendAndReceive -> SuspendedOn(sendAndReceive = sessionToMessage.toJson())
is FlowIORequest.CloseSessions -> SuspendedOn(closeSessions = sessions)
is FlowIORequest.WaitForLedgerCommit -> SuspendedOn(waitForLedgerCommit = hash)
is FlowIORequest.GetFlowInfo -> SuspendedOn(getFlowInfo = sessions)
is FlowIORequest.Sleep -> SuspendedOn(sleepTill = wakeUpAfter)
@ -379,16 +379,14 @@ class CheckpointDumperImpl(private val checkpointStorage: CheckpointStorage, pri
private class ActiveSession(
val peer: Party,
val ourSessionId: SessionId,
val receivedMessages: List<DataSessionMessage>,
val errors: List<FlowError>,
val receivedMessages: List<ExistingSessionMessagePayload>,
val peerFlowInfo: FlowInfo,
val peerSessionId: SessionId?
)
private fun SessionState.toActiveSession(sessionId: SessionId): ActiveSession? {
return if (this is SessionState.Initiated) {
val peerSessionId = (initiatedState as? InitiatedSessionState.Live)?.peerSinkSessionId
ActiveSession(peerParty, sessionId, receivedMessages, errors, peerFlowInfo, peerSessionId)
ActiveSession(peerParty, sessionId, receivedMessages, peerFlowInfo, peerSinkSessionId)
} else {
null
}

View File

@ -66,7 +66,6 @@ class NodeSchemaService(private val extraSchemas: Set<MappedSchema> = emptySet()
// when mapped schemas from the finance module are present, they are considered as internal ones
schema::class.qualifiedName == "net.corda.finance.schemas.CashSchemaV1" ||
schema::class.qualifiedName == "net.corda.finance.schemas.CommercialPaperSchemaV1" ||
schema::class.qualifiedName == "net.corda.node.services.transactions.NodeNotarySchemaV1" ||
schema::class.qualifiedName?.startsWith("net.corda.notary.") ?: false
}

Some files were not shown because too many files have changed in this diff Show More