mirror of
https://github.com/corda/corda.git
synced 2025-01-31 16:35:43 +00:00
Merge pull request #6529 from corda/os_4.6-feature_pass_in_client_id_when_starting_a_flow-merge
NOTICK - OS 4.6 to feature/pass_in_client_id_when_starting_a_flow merge
This commit is contained in:
commit
35bfa6945f
@ -5398,6 +5398,10 @@ public interface net.corda.core.schemas.QueryableState extends net.corda.core.co
|
|||||||
##
|
##
|
||||||
public interface net.corda.core.schemas.StatePersistable
|
public interface net.corda.core.schemas.StatePersistable
|
||||||
##
|
##
|
||||||
|
public interface net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
|
public abstract OBJ fromProxy(PROXY)
|
||||||
|
public abstract PROXY toProxy(OBJ)
|
||||||
|
##
|
||||||
public interface net.corda.core.serialization.ClassWhitelist
|
public interface net.corda.core.serialization.ClassWhitelist
|
||||||
public abstract boolean hasListed(Class<?>)
|
public abstract boolean hasListed(Class<?>)
|
||||||
##
|
##
|
||||||
|
@ -1,3 +1,14 @@
|
|||||||
|
#!groovy
|
||||||
|
/**
|
||||||
|
* Jenkins pipeline to build Corda OS release with JDK11
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kill already started job.
|
||||||
|
* Assume new commit takes precendence and results from previous
|
||||||
|
* unfinished builds are not required.
|
||||||
|
* This feature doesn't play well with disableConcurrentBuilds() option
|
||||||
|
*/
|
||||||
@Library('corda-shared-build-pipeline-steps')
|
@Library('corda-shared-build-pipeline-steps')
|
||||||
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
|
||||||
@ -19,16 +30,16 @@ if (isReleaseTag) {
|
|||||||
switch (env.TAG_NAME) {
|
switch (env.TAG_NAME) {
|
||||||
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
||||||
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
||||||
default: nexusIqStage = "operate"
|
default: nexusIqStage = "release"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pipeline {
|
pipeline {
|
||||||
agent {
|
agent { label 'k8s' }
|
||||||
label 'k8s'
|
|
||||||
}
|
|
||||||
options {
|
options {
|
||||||
timestamps()
|
timestamps()
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
|
|
||||||
environment {
|
environment {
|
||||||
@ -37,6 +48,8 @@ pipeline {
|
|||||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish JDK 11 Release to Artifactory".replaceAll("/", "::")
|
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish JDK 11 Release to Artifactory".replaceAll("/", "::")
|
||||||
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
}
|
}
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
@ -45,14 +58,15 @@ pipeline {
|
|||||||
sh "./gradlew --no-daemon clean jar"
|
sh "./gradlew --no-daemon clean jar"
|
||||||
script {
|
script {
|
||||||
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
|
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
|
||||||
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: //'").trim()
|
/* every build related to Corda X.Y (GA, RC, HC, patch or snapshot) uses the same NexusIQ application */
|
||||||
|
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
|
||||||
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
|
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
|
||||||
def artifactId = 'corda'
|
def artifactId = 'corda'
|
||||||
nexusAppId = "jenkins-${groupId}-${artifactId}-jdk11-${version}"
|
nexusAppId = "jenkins-${groupId}-${artifactId}-jdk11-${version}"
|
||||||
}
|
}
|
||||||
nexusPolicyEvaluation (
|
nexusPolicyEvaluation (
|
||||||
failBuildOnNetworkError: false,
|
failBuildOnNetworkError: false,
|
||||||
iqApplication: manualApplication(nexusAppId),
|
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
|
||||||
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
|
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
|
||||||
iqStage: nexusIqStage
|
iqStage: nexusIqStage
|
||||||
)
|
)
|
||||||
@ -68,6 +82,8 @@ pipeline {
|
|||||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\" " +
|
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\" " +
|
||||||
"-Ddocker.buildbase.tag=11latest " +
|
"-Ddocker.buildbase.tag=11latest " +
|
||||||
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||||
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||||
"-Ddocker.dockerfile=DockerfileJDK11Azul" +
|
"-Ddocker.dockerfile=DockerfileJDK11Azul" +
|
||||||
" clean pushBuildImage preAllocateForParallelRegressionTest preAllocateForAllParallelSlowIntegrationTest --stacktrace"
|
" clean pushBuildImage preAllocateForParallelRegressionTest preAllocateForAllParallelSlowIntegrationTest --stacktrace"
|
||||||
}
|
}
|
||||||
@ -126,7 +142,7 @@ pipeline {
|
|||||||
rtGradleDeployer(
|
rtGradleDeployer(
|
||||||
id: 'deployer',
|
id: 'deployer',
|
||||||
serverId: 'R3-Artifactory',
|
serverId: 'R3-Artifactory',
|
||||||
repo: 'r3-corda-releases'
|
repo: 'corda-releases'
|
||||||
)
|
)
|
||||||
rtGradleRun(
|
rtGradleRun(
|
||||||
usesPlugin: true,
|
usesPlugin: true,
|
||||||
@ -147,7 +163,7 @@ pipeline {
|
|||||||
post {
|
post {
|
||||||
always {
|
always {
|
||||||
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
|
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
|
||||||
junit '**/build/test-results-xml/**/*.xml'
|
junit testResults: '**/build/test-results-xml/**/*.xml', keepLongStdio: true
|
||||||
}
|
}
|
||||||
cleanup {
|
cleanup {
|
||||||
deleteDir() /* clean up our workspace */
|
deleteDir() /* clean up our workspace */
|
||||||
|
@ -14,6 +14,7 @@ pipeline {
|
|||||||
options {
|
options {
|
||||||
timestamps()
|
timestamps()
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
@ -35,4 +36,4 @@ pipeline {
|
|||||||
deleteDir() /* clean up our workspace */
|
deleteDir() /* clean up our workspace */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
5
.ci/dev/mswin/Jenkinsfile
vendored
5
.ci/dev/mswin/Jenkinsfile
vendored
@ -28,6 +28,7 @@ pipeline {
|
|||||||
ansiColor('xterm')
|
ansiColor('xterm')
|
||||||
timestamps()
|
timestamps()
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* a bit awkward to read
|
* a bit awkward to read
|
||||||
@ -64,7 +65,7 @@ pipeline {
|
|||||||
post {
|
post {
|
||||||
always {
|
always {
|
||||||
archiveArtifacts allowEmptyArchive: true, artifacts: '**/logs/**/*.log'
|
archiveArtifacts allowEmptyArchive: true, artifacts: '**/logs/**/*.log'
|
||||||
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
|
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true
|
||||||
bat '.ci/kill_corda_procs.cmd'
|
bat '.ci/kill_corda_procs.cmd'
|
||||||
}
|
}
|
||||||
cleanup {
|
cleanup {
|
||||||
@ -86,7 +87,7 @@ pipeline {
|
|||||||
post {
|
post {
|
||||||
always {
|
always {
|
||||||
archiveArtifacts allowEmptyArchive: true, artifacts: '**/logs/**/*.log'
|
archiveArtifacts allowEmptyArchive: true, artifacts: '**/logs/**/*.log'
|
||||||
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
|
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true
|
||||||
bat '.ci/kill_corda_procs.cmd'
|
bat '.ci/kill_corda_procs.cmd'
|
||||||
}
|
}
|
||||||
cleanup {
|
cleanup {
|
||||||
|
9
.ci/dev/nightly-regression/Jenkinsfile
vendored
9
.ci/dev/nightly-regression/Jenkinsfile
vendored
@ -9,6 +9,7 @@ pipeline {
|
|||||||
timestamps()
|
timestamps()
|
||||||
overrideIndexTriggers(false)
|
overrideIndexTriggers(false)
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
triggers {
|
triggers {
|
||||||
pollSCM ignorePostCommitHooks: true, scmpoll_spec: '@midnight'
|
pollSCM ignorePostCommitHooks: true, scmpoll_spec: '@midnight'
|
||||||
@ -19,6 +20,8 @@ pipeline {
|
|||||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
}
|
}
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
@ -35,6 +38,8 @@ pipeline {
|
|||||||
"-Dkubenetize=true " +
|
"-Dkubenetize=true " +
|
||||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||||
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
" clean pushBuildImage --stacktrace"
|
" clean pushBuildImage --stacktrace"
|
||||||
}
|
}
|
||||||
@ -74,15 +79,13 @@ pipeline {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
post {
|
post {
|
||||||
always {
|
always {
|
||||||
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
|
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
|
||||||
junit testResults: '**/build/test-results-xml/**/*.xml', allowEmptyResults: true, keepLongStdio: true
|
junit testResults: '**/build/test-results-xml/**/*.xml', keepLongStdio: true
|
||||||
}
|
}
|
||||||
cleanup {
|
cleanup {
|
||||||
deleteDir() /* clean up our workspace */
|
deleteDir() /* clean up our workspace */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
3
.ci/dev/pr-code-checks/Jenkinsfile
vendored
3
.ci/dev/pr-code-checks/Jenkinsfile
vendored
@ -4,10 +4,11 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
|||||||
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||||
|
|
||||||
pipeline {
|
pipeline {
|
||||||
agent { label 'k8s' }
|
agent { label 'standard' }
|
||||||
options {
|
options {
|
||||||
timestamps()
|
timestamps()
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
|
|
||||||
environment {
|
environment {
|
||||||
|
46
.ci/dev/publish-api-docs/Jenkinsfile
vendored
Normal file
46
.ci/dev/publish-api-docs/Jenkinsfile
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
#!groovy
|
||||||
|
/**
|
||||||
|
* Jenkins pipeline to build Corda OS KDoc & Javadoc archive
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kill already started job.
|
||||||
|
* Assume new commit takes precendence and results from previous
|
||||||
|
* unfinished builds are not required.
|
||||||
|
* This feature doesn't play well with disableConcurrentBuilds() option
|
||||||
|
*/
|
||||||
|
@Library('corda-shared-build-pipeline-steps')
|
||||||
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
|
||||||
|
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||||
|
|
||||||
|
pipeline {
|
||||||
|
agent { label 'standard' }
|
||||||
|
options {
|
||||||
|
ansiColor('xterm')
|
||||||
|
timestamps()
|
||||||
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
|
}
|
||||||
|
|
||||||
|
environment {
|
||||||
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
|
}
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage('Publish Archived API Docs to Artifactory') {
|
||||||
|
when { tag pattern: /^docs-release-os-V(\d+\.\d+)(\.\d+){0,1}(-GA){0,1}(-\d{4}-\d\d-\d\d-\d{4}){0,1}$/, comparator: 'REGEXP' }
|
||||||
|
steps {
|
||||||
|
sh "./gradlew :clean :docs:artifactoryPublish -DpublishApiDocs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
cleanup {
|
||||||
|
deleteDir() /* clean up our workspace */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,17 +1,34 @@
|
|||||||
#!groovy
|
#!groovy
|
||||||
|
/**
|
||||||
|
* Jenkins pipeline to build Corda OS nightly snapshots
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Kill already started job.
|
||||||
|
* Assume new commit takes precendence and results from previous
|
||||||
|
* unfinished builds are not required.
|
||||||
|
* This feature doesn't play well with disableConcurrentBuilds() option
|
||||||
|
*/
|
||||||
@Library('corda-shared-build-pipeline-steps')
|
@Library('corda-shared-build-pipeline-steps')
|
||||||
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||||
|
|
||||||
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||||
|
|
||||||
|
/*
|
||||||
|
** calculate the stage for NexusIQ evaluation
|
||||||
|
** * build for snapshots
|
||||||
|
*/
|
||||||
|
def nexusIqStage = "build"
|
||||||
|
|
||||||
pipeline {
|
pipeline {
|
||||||
agent { label 'k8s' }
|
agent { label 'standard' }
|
||||||
|
|
||||||
options {
|
options {
|
||||||
timestamps()
|
timestamps()
|
||||||
ansiColor('xterm')
|
ansiColor('xterm')
|
||||||
overrideIndexTriggers(false)
|
overrideIndexTriggers(false)
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
|
|
||||||
triggers {
|
triggers {
|
||||||
@ -27,6 +44,26 @@ pipeline {
|
|||||||
}
|
}
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
|
stage('Sonatype Check') {
|
||||||
|
steps {
|
||||||
|
sh "./gradlew --no-daemon clean jar"
|
||||||
|
script {
|
||||||
|
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
|
||||||
|
/* every build related to Corda X.Y (GA, RC, HC, patch or snapshot) uses the same NexusIQ application */
|
||||||
|
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
|
||||||
|
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
|
||||||
|
def artifactId = 'corda'
|
||||||
|
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
|
||||||
|
}
|
||||||
|
nexusPolicyEvaluation (
|
||||||
|
failBuildOnNetworkError: false,
|
||||||
|
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
|
||||||
|
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
|
||||||
|
iqStage: nexusIqStage
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
stage('Publish to Artifactory') {
|
stage('Publish to Artifactory') {
|
||||||
steps {
|
steps {
|
||||||
rtServer (
|
rtServer (
|
||||||
|
@ -12,6 +12,7 @@ pipeline {
|
|||||||
ansiColor('xterm')
|
ansiColor('xterm')
|
||||||
overrideIndexTriggers(false)
|
overrideIndexTriggers(false)
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
|
|
||||||
environment {
|
environment {
|
||||||
|
18
.ci/dev/regression/Jenkinsfile
vendored
18
.ci/dev/regression/Jenkinsfile
vendored
@ -18,6 +18,7 @@ killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
|||||||
* Sense environment
|
* Sense environment
|
||||||
*/
|
*/
|
||||||
boolean isReleaseTag = (env.TAG_NAME =~ /^release-.*(?<!_JDK11)$/)
|
boolean isReleaseTag = (env.TAG_NAME =~ /^release-.*(?<!_JDK11)$/)
|
||||||
|
boolean isInternalRelease = (env.TAG_NAME =~ /^internal-release-.*$/)
|
||||||
/*
|
/*
|
||||||
** calculate the stage for NexusIQ evaluation
|
** calculate the stage for NexusIQ evaluation
|
||||||
** * build for snapshots
|
** * build for snapshots
|
||||||
@ -29,7 +30,7 @@ if (isReleaseTag) {
|
|||||||
switch (env.TAG_NAME) {
|
switch (env.TAG_NAME) {
|
||||||
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
||||||
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
||||||
default: nexusIqStage = "operate"
|
default: nexusIqStage = "release"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,6 +40,7 @@ pipeline {
|
|||||||
timestamps()
|
timestamps()
|
||||||
disableConcurrentBuilds()
|
disableConcurrentBuilds()
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
|
|
||||||
environment {
|
environment {
|
||||||
@ -48,6 +50,8 @@ pipeline {
|
|||||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish Release to Artifactory".replaceAll("/", "::")
|
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish Release to Artifactory".replaceAll("/", "::")
|
||||||
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
}
|
}
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
@ -56,14 +60,15 @@ pipeline {
|
|||||||
sh "./gradlew --no-daemon clean jar"
|
sh "./gradlew --no-daemon clean jar"
|
||||||
script {
|
script {
|
||||||
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
|
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
|
||||||
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: //'").trim()
|
/* every build related to Corda X.Y (GA, RC, HC, patch or snapshot) uses the same NexusIQ application */
|
||||||
|
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
|
||||||
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
|
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
|
||||||
def artifactId = 'corda'
|
def artifactId = 'corda'
|
||||||
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
|
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
|
||||||
}
|
}
|
||||||
nexusPolicyEvaluation (
|
nexusPolicyEvaluation (
|
||||||
failBuildOnNetworkError: false,
|
failBuildOnNetworkError: false,
|
||||||
iqApplication: manualApplication(nexusAppId),
|
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
|
||||||
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
|
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
|
||||||
iqStage: nexusIqStage
|
iqStage: nexusIqStage
|
||||||
)
|
)
|
||||||
@ -83,6 +88,8 @@ pipeline {
|
|||||||
"-Dkubenetize=true " +
|
"-Dkubenetize=true " +
|
||||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||||
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
" clean preAllocateForParallelRegressionTest preAllocateForAllParallelSlowIntegrationTest pushBuildImage --stacktrace"
|
" clean preAllocateForParallelRegressionTest preAllocateForAllParallelSlowIntegrationTest pushBuildImage --stacktrace"
|
||||||
}
|
}
|
||||||
@ -134,7 +141,7 @@ pipeline {
|
|||||||
rtGradleDeployer(
|
rtGradleDeployer(
|
||||||
id: 'deployer',
|
id: 'deployer',
|
||||||
serverId: 'R3-Artifactory',
|
serverId: 'R3-Artifactory',
|
||||||
repo: 'r3-corda-releases'
|
repo: 'corda-releases'
|
||||||
)
|
)
|
||||||
rtGradleRun(
|
rtGradleRun(
|
||||||
usesPlugin: true,
|
usesPlugin: true,
|
||||||
@ -153,7 +160,7 @@ pipeline {
|
|||||||
|
|
||||||
stage('Publish Release to Docker Hub') {
|
stage('Publish Release to Docker Hub') {
|
||||||
when {
|
when {
|
||||||
expression { isReleaseTag }
|
expression { !isInternalRelease && isReleaseTag }
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
withCredentials([
|
withCredentials([
|
||||||
@ -166,7 +173,6 @@ pipeline {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
post {
|
post {
|
||||||
always {
|
always {
|
||||||
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
|
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
|
||||||
|
5
Jenkinsfile
vendored
5
Jenkinsfile
vendored
@ -9,6 +9,7 @@ pipeline {
|
|||||||
options {
|
options {
|
||||||
timestamps()
|
timestamps()
|
||||||
timeout(time: 3, unit: 'HOURS')
|
timeout(time: 3, unit: 'HOURS')
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||||
}
|
}
|
||||||
|
|
||||||
environment {
|
environment {
|
||||||
@ -16,6 +17,8 @@ pipeline {
|
|||||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||||
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
|
||||||
|
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
|
||||||
|
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
|
||||||
}
|
}
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
@ -26,6 +29,8 @@ pipeline {
|
|||||||
"-Dkubenetize=true " +
|
"-Dkubenetize=true " +
|
||||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||||
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
|
||||||
|
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
|
||||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||||
" clean preAllocateForAllParallelUnitTest preAllocateForAllParallelIntegrationTest pushBuildImage --stacktrace"
|
" clean preAllocateForAllParallelUnitTest preAllocateForAllParallelIntegrationTest pushBuildImage --stacktrace"
|
||||||
}
|
}
|
||||||
|
78
build.gradle
78
build.gradle
@ -62,14 +62,14 @@ buildscript {
|
|||||||
|
|
||||||
ext.asm_version = '7.1'
|
ext.asm_version = '7.1'
|
||||||
ext.artemis_version = '2.6.2'
|
ext.artemis_version = '2.6.2'
|
||||||
// TODO Upgrade Jackson only when corda is using kotlin 1.3.10
|
// TODO Upgrade to Jackson 2.10+ only when corda is using kotlin 1.3.10
|
||||||
ext.jackson_version = '2.9.7'
|
ext.jackson_version = '2.9.8'
|
||||||
ext.jetty_version = '9.4.19.v20190610'
|
ext.jetty_version = '9.4.19.v20190610'
|
||||||
ext.jersey_version = '2.25'
|
ext.jersey_version = '2.25'
|
||||||
ext.servlet_version = '4.0.1'
|
ext.servlet_version = '4.0.1'
|
||||||
ext.assertj_version = '3.12.2'
|
ext.assertj_version = '3.12.2'
|
||||||
ext.slf4j_version = '1.7.26'
|
ext.slf4j_version = '1.7.30'
|
||||||
ext.log4j_version = '2.11.2'
|
ext.log4j_version = '2.13.3'
|
||||||
ext.bouncycastle_version = constants.getProperty("bouncycastleVersion")
|
ext.bouncycastle_version = constants.getProperty("bouncycastleVersion")
|
||||||
ext.guava_version = constants.getProperty("guavaVersion")
|
ext.guava_version = constants.getProperty("guavaVersion")
|
||||||
ext.caffeine_version = constants.getProperty("caffeineVersion")
|
ext.caffeine_version = constants.getProperty("caffeineVersion")
|
||||||
@ -155,22 +155,39 @@ buildscript {
|
|||||||
ext.corda_docs_link = "https://docs.corda.net/docs/corda-os/$baseVersion"
|
ext.corda_docs_link = "https://docs.corda.net/docs/corda-os/$baseVersion"
|
||||||
repositories {
|
repositories {
|
||||||
mavenLocal()
|
mavenLocal()
|
||||||
mavenCentral()
|
// Use system environment to activate caching with Artifactory,
|
||||||
jcenter()
|
// because it is actually easier to pass that during parallel build.
|
||||||
maven {
|
// NOTE: it has to be a name of a virtual repository with all
|
||||||
url 'https://kotlin.bintray.com/kotlinx'
|
// required remote or local repositories!
|
||||||
}
|
if (System.getenv("CORDA_USE_CACHE")) {
|
||||||
maven {
|
maven {
|
||||||
url "$artifactory_contextUrl/corda-dependencies-dev"
|
name "R3 Maven remote repositories"
|
||||||
}
|
url "${artifactory_contextUrl}/${System.getenv("CORDA_USE_CACHE")}"
|
||||||
maven {
|
authentication {
|
||||||
url "$artifactory_contextUrl/corda-releases"
|
basic(BasicAuthentication)
|
||||||
|
}
|
||||||
|
credentials {
|
||||||
|
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
|
||||||
|
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mavenCentral()
|
||||||
|
jcenter()
|
||||||
|
maven {
|
||||||
|
url 'https://kotlin.bintray.com/kotlinx'
|
||||||
|
}
|
||||||
|
maven {
|
||||||
|
url "${artifactory_contextUrl}/corda-dependencies-dev"
|
||||||
|
}
|
||||||
|
maven {
|
||||||
|
url "${artifactory_contextUrl}/corda-releases"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dependencies {
|
dependencies {
|
||||||
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
|
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
|
||||||
classpath "org.jetbrains.kotlin:kotlin-allopen:$kotlin_version"
|
classpath "org.jetbrains.kotlin:kotlin-allopen:$kotlin_version"
|
||||||
classpath 'com.jfrog.bintray.gradle:gradle-bintray-plugin:1.4'
|
|
||||||
classpath "net.corda.plugins:publish-utils:$gradle_plugins_version"
|
classpath "net.corda.plugins:publish-utils:$gradle_plugins_version"
|
||||||
classpath "net.corda.plugins:quasar-utils:$gradle_plugins_version"
|
classpath "net.corda.plugins:quasar-utils:$gradle_plugins_version"
|
||||||
classpath "net.corda.plugins:cordformation:$gradle_plugins_version"
|
classpath "net.corda.plugins:cordformation:$gradle_plugins_version"
|
||||||
@ -204,7 +221,6 @@ plugins {
|
|||||||
apply plugin: 'project-report'
|
apply plugin: 'project-report'
|
||||||
apply plugin: 'com.github.ben-manes.versions'
|
apply plugin: 'com.github.ben-manes.versions'
|
||||||
apply plugin: 'net.corda.plugins.publish-utils'
|
apply plugin: 'net.corda.plugins.publish-utils'
|
||||||
apply plugin: 'maven-publish'
|
|
||||||
apply plugin: 'com.jfrog.artifactory'
|
apply plugin: 'com.jfrog.artifactory'
|
||||||
apply plugin: "com.bmuschko.docker-remote-api"
|
apply plugin: "com.bmuschko.docker-remote-api"
|
||||||
apply plugin: "com.r3.dependx.dependxies"
|
apply plugin: "com.r3.dependx.dependxies"
|
||||||
@ -357,11 +373,29 @@ allprojects {
|
|||||||
|
|
||||||
repositories {
|
repositories {
|
||||||
mavenLocal()
|
mavenLocal()
|
||||||
mavenCentral()
|
// Use system environment to activate caching with Artifactory,
|
||||||
jcenter()
|
// because it is actually easier to pass that during parallel build.
|
||||||
maven { url "$artifactory_contextUrl/corda-dependencies" }
|
// NOTE: it has to be a name of a virtual repository with all
|
||||||
maven { url 'https://repo.gradle.org/gradle/libs-releases' }
|
// required remote or local repositories!
|
||||||
maven { url "$artifactory_contextUrl/corda-dev" }
|
if (System.getenv("CORDA_USE_CACHE")) {
|
||||||
|
maven {
|
||||||
|
name "R3 Maven remote repositories"
|
||||||
|
url "${artifactory_contextUrl}/${System.getenv("CORDA_USE_CACHE")}"
|
||||||
|
authentication {
|
||||||
|
basic(BasicAuthentication)
|
||||||
|
}
|
||||||
|
credentials {
|
||||||
|
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
|
||||||
|
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mavenCentral()
|
||||||
|
jcenter()
|
||||||
|
maven { url "${artifactory_contextUrl}/corda-dependencies" }
|
||||||
|
maven { url 'https://repo.gradle.org/gradle/libs-releases' }
|
||||||
|
maven { url "${artifactory_contextUrl}/corda-dev" }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
configurations {
|
configurations {
|
||||||
@ -626,7 +660,7 @@ dependxiesModule {
|
|||||||
skipTasks = "test,integrationTest,smokeTest,slowIntegrationTest"
|
skipTasks = "test,integrationTest,smokeTest,slowIntegrationTest"
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.register('generateApi', net.corda.plugins.GenerateApi) {
|
tasks.register('generateApi', net.corda.plugins.apiscanner.GenerateApi) {
|
||||||
baseName = "api-corda"
|
baseName = "api-corda"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,6 +292,7 @@ class ReconnectingCordaRPCOps private constructor(
|
|||||||
}
|
}
|
||||||
private class ErrorInterceptingHandler(val reconnectingRPCConnection: ReconnectingRPCConnection) : InvocationHandler {
|
private class ErrorInterceptingHandler(val reconnectingRPCConnection: ReconnectingRPCConnection) : InvocationHandler {
|
||||||
private fun Method.isStartFlow() = name.startsWith("startFlow") || name.startsWith("startTrackedFlow")
|
private fun Method.isStartFlow() = name.startsWith("startFlow") || name.startsWith("startTrackedFlow")
|
||||||
|
private fun Method.isShutdown() = name == "shutdown" || name == "gracefulShutdown" || name == "terminate"
|
||||||
|
|
||||||
private fun checkIfIsStartFlow(method: Method, e: InvocationTargetException) {
|
private fun checkIfIsStartFlow(method: Method, e: InvocationTargetException) {
|
||||||
if (method.isStartFlow()) {
|
if (method.isStartFlow()) {
|
||||||
@ -306,7 +307,7 @@ class ReconnectingCordaRPCOps private constructor(
|
|||||||
*
|
*
|
||||||
* A negative number for [maxNumberOfAttempts] means an unlimited number of retries will be performed.
|
* A negative number for [maxNumberOfAttempts] means an unlimited number of retries will be performed.
|
||||||
*/
|
*/
|
||||||
@Suppress("ThrowsCount", "ComplexMethod")
|
@Suppress("ThrowsCount", "ComplexMethod", "NestedBlockDepth")
|
||||||
private fun doInvoke(method: Method, args: Array<out Any>?, maxNumberOfAttempts: Int): Any? {
|
private fun doInvoke(method: Method, args: Array<out Any>?, maxNumberOfAttempts: Int): Any? {
|
||||||
checkIfClosed()
|
checkIfClosed()
|
||||||
var remainingAttempts = maxNumberOfAttempts
|
var remainingAttempts = maxNumberOfAttempts
|
||||||
@ -318,20 +319,20 @@ class ReconnectingCordaRPCOps private constructor(
|
|||||||
log.debug { "RPC $method invoked successfully." }
|
log.debug { "RPC $method invoked successfully." }
|
||||||
}
|
}
|
||||||
} catch (e: InvocationTargetException) {
|
} catch (e: InvocationTargetException) {
|
||||||
if (method.name.equals("shutdown", true)) {
|
|
||||||
log.debug("Shutdown invoked, stop reconnecting.", e)
|
|
||||||
reconnectingRPCConnection.notifyServerAndClose()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
when (e.targetException) {
|
when (e.targetException) {
|
||||||
is RejectedCommandException -> {
|
is RejectedCommandException -> {
|
||||||
log.warn("Node is being shutdown. Operation ${method.name} rejected. Shutting down...", e)
|
log.warn("Node is being shutdown. Operation ${method.name} rejected. Shutting down...", e)
|
||||||
throw e.targetException
|
throw e.targetException
|
||||||
}
|
}
|
||||||
is ConnectionFailureException -> {
|
is ConnectionFailureException -> {
|
||||||
log.warn("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)
|
if (method.isShutdown()) {
|
||||||
reconnectingRPCConnection.reconnectOnError(e)
|
log.debug("Shutdown invoked, stop reconnecting.", e)
|
||||||
checkIfIsStartFlow(method, e)
|
reconnectingRPCConnection.notifyServerAndClose()
|
||||||
|
} else {
|
||||||
|
log.warn("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)
|
||||||
|
reconnectingRPCConnection.reconnectOnError(e)
|
||||||
|
checkIfIsStartFlow(method, e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
is RPCException -> {
|
is RPCException -> {
|
||||||
rethrowIfUnrecoverable(e.targetException as RPCException)
|
rethrowIfUnrecoverable(e.targetException as RPCException)
|
||||||
|
@ -1,28 +0,0 @@
|
|||||||
package net.corda.common.logging
|
|
||||||
|
|
||||||
import org.apache.logging.log4j.core.Core
|
|
||||||
import org.apache.logging.log4j.core.LogEvent
|
|
||||||
import org.apache.logging.log4j.core.appender.rewrite.RewritePolicy
|
|
||||||
import org.apache.logging.log4j.core.config.plugins.Plugin
|
|
||||||
import org.apache.logging.log4j.core.config.plugins.PluginFactory
|
|
||||||
import org.apache.logging.log4j.core.impl.Log4jLogEvent
|
|
||||||
|
|
||||||
@Plugin(name = "ErrorCodeRewritePolicy", category = Core.CATEGORY_NAME, elementType = "rewritePolicy", printObject = false)
|
|
||||||
class ErrorCodeRewritePolicy : RewritePolicy {
|
|
||||||
override fun rewrite(source: LogEvent): LogEvent? {
|
|
||||||
val newMessage = source.message?.withErrorCodeFor(source.thrown, source.level)
|
|
||||||
return if (newMessage == source.message) {
|
|
||||||
source
|
|
||||||
} else {
|
|
||||||
Log4jLogEvent.Builder(source).setMessage(newMessage).build()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
companion object {
|
|
||||||
@JvmStatic
|
|
||||||
@PluginFactory
|
|
||||||
fun createPolicy(): ErrorCodeRewritePolicy {
|
|
||||||
return ErrorCodeRewritePolicy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,5 +1,5 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Configuration status="info" packages="net.corda.common.logging" shutdownHook="disable">
|
<Configuration status="info" shutdownHook="disable">
|
||||||
|
|
||||||
<Properties>
|
<Properties>
|
||||||
<Property name="log-path">${sys:log-path:-logs}</Property>
|
<Property name="log-path">${sys:log-path:-logs}</Property>
|
||||||
@ -172,21 +172,17 @@
|
|||||||
|
|
||||||
<Rewrite name="Console-ErrorCode-Selector">
|
<Rewrite name="Console-ErrorCode-Selector">
|
||||||
<AppenderRef ref="Console-Selector"/>
|
<AppenderRef ref="Console-Selector"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
|
|
||||||
<Rewrite name="Console-ErrorCode-Appender-Println">
|
<Rewrite name="Console-ErrorCode-Appender-Println">
|
||||||
<AppenderRef ref="Console-Appender-Println"/>
|
<AppenderRef ref="Console-Appender-Println"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
|
|
||||||
<Rewrite name="RollingFile-ErrorCode-Appender">
|
<Rewrite name="RollingFile-ErrorCode-Appender">
|
||||||
<AppenderRef ref="RollingFile-Appender"/>
|
<AppenderRef ref="RollingFile-Appender"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
<Rewrite name="Diagnostic-RollingFile-ErrorCode-Appender">
|
<Rewrite name="Diagnostic-RollingFile-ErrorCode-Appender">
|
||||||
<AppenderRef ref="Diagnostic-RollingFile-Appender"/>
|
<AppenderRef ref="Diagnostic-RollingFile-Appender"/>
|
||||||
<ErrorCodeRewritePolicy/>
|
|
||||||
</Rewrite>
|
</Rewrite>
|
||||||
</Appenders>
|
</Appenders>
|
||||||
|
|
||||||
|
@ -4,14 +4,14 @@
|
|||||||
|
|
||||||
cordaVersion=4.6
|
cordaVersion=4.6
|
||||||
versionSuffix=SNAPSHOT
|
versionSuffix=SNAPSHOT
|
||||||
gradlePluginsVersion=5.0.9
|
gradlePluginsVersion=5.0.11
|
||||||
kotlinVersion=1.2.71
|
kotlinVersion=1.2.71
|
||||||
java8MinUpdateVersion=171
|
java8MinUpdateVersion=171
|
||||||
# ***************************************************************#
|
# ***************************************************************#
|
||||||
# When incrementing platformVersion make sure to update #
|
# When incrementing platformVersion make sure to update #
|
||||||
# net.corda.core.internal.CordaUtilsKt.PLATFORM_VERSION as well. #
|
# net.corda.core.internal.CordaUtilsKt.PLATFORM_VERSION as well. #
|
||||||
# ***************************************************************#
|
# ***************************************************************#
|
||||||
platformVersion=7
|
platformVersion=8
|
||||||
guavaVersion=28.0-jre
|
guavaVersion=28.0-jre
|
||||||
# Quasar version to use with Java 8:
|
# Quasar version to use with Java 8:
|
||||||
quasarVersion=0.7.12_r3
|
quasarVersion=0.7.12_r3
|
||||||
@ -20,12 +20,12 @@ quasarClassifier=jdk8
|
|||||||
quasarVersion11=0.8.0_r3
|
quasarVersion11=0.8.0_r3
|
||||||
jdkClassifier11=jdk11
|
jdkClassifier11=jdk11
|
||||||
proguardVersion=6.1.1
|
proguardVersion=6.1.1
|
||||||
bouncycastleVersion=1.60
|
bouncycastleVersion=1.66
|
||||||
classgraphVersion=4.8.78
|
classgraphVersion=4.8.78
|
||||||
disruptorVersion=3.4.2
|
disruptorVersion=3.4.2
|
||||||
typesafeConfigVersion=1.3.4
|
typesafeConfigVersion=1.3.4
|
||||||
jsr305Version=3.0.2
|
jsr305Version=3.0.2
|
||||||
artifactoryPluginVersion=4.7.3
|
artifactoryPluginVersion=4.16.1
|
||||||
snakeYamlVersion=1.19
|
snakeYamlVersion=1.19
|
||||||
caffeineVersion=2.7.0
|
caffeineVersion=2.7.0
|
||||||
metricsVersion=4.1.0
|
metricsVersion=4.1.0
|
||||||
|
@ -54,8 +54,8 @@ tasks.named('jar', Jar) {
|
|||||||
enabled = false
|
enabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
def coreJarTask = tasks.getByPath(':core:jar')
|
def coreJarTask = project(':core').tasks.named('jar', Jar)
|
||||||
def originalJar = coreJarTask.outputs.files.singleFile
|
def originalJar = coreJarTask.map { it.outputs.files.singleFile }
|
||||||
|
|
||||||
def patchCore = tasks.register('patchCore', Zip) {
|
def patchCore = tasks.register('patchCore', Zip) {
|
||||||
dependsOn coreJarTask
|
dependsOn coreJarTask
|
||||||
@ -132,7 +132,7 @@ def jarFilter = tasks.register('jarFilter', JarFilterTask) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
task determinise(type: ProGuardTask) {
|
def determinise = tasks.register('determinise', ProGuardTask) {
|
||||||
injars jarFilter
|
injars jarFilter
|
||||||
outjars file("$buildDir/proguard/$jarBaseName-${project.version}.jar")
|
outjars file("$buildDir/proguard/$jarBaseName-${project.version}.jar")
|
||||||
|
|
||||||
@ -166,17 +166,20 @@ task determinise(type: ProGuardTask) {
|
|||||||
keepclassmembers 'class net.corda.core.** { public synthetic <methods>; }'
|
keepclassmembers 'class net.corda.core.** { public synthetic <methods>; }'
|
||||||
}
|
}
|
||||||
|
|
||||||
task metafix(type: MetaFixerTask) {
|
def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask)
|
||||||
|
|
||||||
|
def metafix = tasks.register('metafix', MetaFixerTask) {
|
||||||
outputDir file("$buildDir/libs")
|
outputDir file("$buildDir/libs")
|
||||||
jars determinise
|
jars determinise
|
||||||
suffix ""
|
suffix ""
|
||||||
|
|
||||||
// Strip timestamps from the JAR to make it reproducible.
|
// Strip timestamps from the JAR to make it reproducible.
|
||||||
preserveTimestamps = false
|
preserveTimestamps = false
|
||||||
|
finalizedBy checkDeterminism
|
||||||
}
|
}
|
||||||
|
|
||||||
// DOCSTART 01
|
// DOCSTART 01
|
||||||
def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask) {
|
checkDeterminism.configure {
|
||||||
dependsOn jdkTask
|
dependsOn jdkTask
|
||||||
injars metafix
|
injars metafix
|
||||||
|
|
||||||
@ -197,14 +200,17 @@ def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask) {
|
|||||||
// DOCEND 01
|
// DOCEND 01
|
||||||
|
|
||||||
defaultTasks "determinise"
|
defaultTasks "determinise"
|
||||||
determinise.finalizedBy metafix
|
determinise.configure {
|
||||||
metafix.finalizedBy checkDeterminism
|
finalizedBy metafix
|
||||||
assemble.dependsOn checkDeterminism
|
}
|
||||||
|
tasks.named('assemble') {
|
||||||
|
dependsOn checkDeterminism
|
||||||
|
}
|
||||||
|
|
||||||
def deterministicJar = metafix.outputs.files.singleFile
|
def deterministicJar = metafix.map { it.outputs.files.singleFile }
|
||||||
artifacts {
|
artifacts {
|
||||||
deterministicArtifacts file: deterministicJar, name: jarBaseName, type: 'jar', extension: 'jar', builtBy: metafix
|
deterministicArtifacts deterministicJar
|
||||||
publish file: deterministicJar, name: jarBaseName, type: 'jar', extension: 'jar', builtBy: metafix
|
publish deterministicJar
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.named('sourceJar', Jar) {
|
tasks.named('sourceJar', Jar) {
|
||||||
|
@ -8,6 +8,7 @@ import net.corda.core.identity.Party;
|
|||||||
import net.corda.core.utilities.KotlinUtilsKt;
|
import net.corda.core.utilities.KotlinUtilsKt;
|
||||||
import net.corda.testing.core.TestConstants;
|
import net.corda.testing.core.TestConstants;
|
||||||
import net.corda.testing.core.TestUtils;
|
import net.corda.testing.core.TestUtils;
|
||||||
|
import net.corda.testing.driver.DriverDSL;
|
||||||
import net.corda.testing.driver.DriverParameters;
|
import net.corda.testing.driver.DriverParameters;
|
||||||
import net.corda.testing.driver.NodeHandle;
|
import net.corda.testing.driver.NodeHandle;
|
||||||
import net.corda.testing.driver.NodeParameters;
|
import net.corda.testing.driver.NodeParameters;
|
||||||
@ -19,8 +20,11 @@ import org.slf4j.LoggerFactory;
|
|||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.time.temporal.ChronoUnit;
|
import java.time.temporal.ChronoUnit;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static net.corda.testing.driver.Driver.driver;
|
import static net.corda.testing.driver.Driver.driver;
|
||||||
|
|
||||||
@ -29,14 +33,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
|
|||||||
@Test
|
@Test
|
||||||
public void awaitFlowExternalOperationInJava() {
|
public void awaitFlowExternalOperationInJava() {
|
||||||
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
||||||
NodeHandle alice = KotlinUtilsKt.getOrThrow(
|
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
|
NodeHandle alice = aliceAndBob.get(0);
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
NodeHandle bob = aliceAndBob.get(1);
|
||||||
);
|
|
||||||
NodeHandle bob = KotlinUtilsKt.getOrThrow(
|
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
|
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
|
||||||
);
|
|
||||||
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
||||||
FlowWithExternalOperationInJava.class,
|
FlowWithExternalOperationInJava.class,
|
||||||
TestUtils.singleIdentity(bob.getNodeInfo())
|
TestUtils.singleIdentity(bob.getNodeInfo())
|
||||||
@ -47,14 +46,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
|
|||||||
@Test
|
@Test
|
||||||
public void awaitFlowExternalAsyncOperationInJava() {
|
public void awaitFlowExternalAsyncOperationInJava() {
|
||||||
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
||||||
NodeHandle alice = KotlinUtilsKt.getOrThrow(
|
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
|
NodeHandle alice = aliceAndBob.get(0);
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
NodeHandle bob = aliceAndBob.get(1);
|
||||||
);
|
|
||||||
NodeHandle bob = KotlinUtilsKt.getOrThrow(
|
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
|
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
|
||||||
);
|
|
||||||
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
return KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
||||||
FlowWithExternalAsyncOperationInJava.class,
|
FlowWithExternalAsyncOperationInJava.class,
|
||||||
TestUtils.singleIdentity(bob.getNodeInfo())
|
TestUtils.singleIdentity(bob.getNodeInfo())
|
||||||
@ -65,14 +59,9 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
|
|||||||
@Test
|
@Test
|
||||||
public void awaitFlowExternalOperationInJavaCanBeRetried() {
|
public void awaitFlowExternalOperationInJavaCanBeRetried() {
|
||||||
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
driver(new DriverParameters().withStartNodesInProcess(true), driver -> {
|
||||||
NodeHandle alice = KotlinUtilsKt.getOrThrow(
|
List<NodeHandle> aliceAndBob = aliceAndBob(driver);
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.ALICE_NAME)),
|
NodeHandle alice = aliceAndBob.get(0);
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
NodeHandle bob = aliceAndBob.get(1);
|
||||||
);
|
|
||||||
NodeHandle bob = KotlinUtilsKt.getOrThrow(
|
|
||||||
driver.startNode(new NodeParameters().withProvidedName(TestConstants.BOB_NAME)),
|
|
||||||
Duration.of(1, ChronoUnit.MINUTES)
|
|
||||||
);
|
|
||||||
KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
KotlinUtilsKt.getOrThrow(alice.getRpc().startFlowDynamic(
|
||||||
FlowWithExternalOperationThatGetsRetriedInJava.class,
|
FlowWithExternalOperationThatGetsRetriedInJava.class,
|
||||||
TestUtils.singleIdentity(bob.getNodeInfo())
|
TestUtils.singleIdentity(bob.getNodeInfo())
|
||||||
@ -190,4 +179,15 @@ public class FlowExternalOperationInJavaTest extends AbstractFlowExternalOperati
|
|||||||
return operation.apply(futureService, deduplicationId);
|
return operation.apply(futureService, deduplicationId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private List<NodeHandle> aliceAndBob(DriverDSL driver) {
|
||||||
|
return Arrays.asList(TestConstants.ALICE_NAME, TestConstants.BOB_NAME)
|
||||||
|
.stream()
|
||||||
|
.map(nm -> driver.startNode(new NodeParameters().withProvidedName(nm)))
|
||||||
|
.collect(Collectors.toList())
|
||||||
|
.stream()
|
||||||
|
.map(future -> KotlinUtilsKt.getOrThrow(future,
|
||||||
|
Duration.of(1, ChronoUnit.MINUTES)))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
}
|
||||||
}
|
}
|
@ -4,6 +4,7 @@ import co.paralleluniverse.fibers.Suspendable
|
|||||||
import net.corda.core.flows.HospitalizeFlowException
|
import net.corda.core.flows.HospitalizeFlowException
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.minutes
|
import net.corda.core.utilities.minutes
|
||||||
@ -24,8 +25,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation`() {
|
fun `external async operation`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowWithExternalAsyncOperation, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowWithExternalAsyncOperation, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
@ -35,8 +38,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that checks deduplicationId is not rerun when flow is retried`() {
|
fun `external async operation that checks deduplicationId is not rerun when flow is retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<DuplicatedProcessException> {
|
assertFailsWith<DuplicatedProcessException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationWithDeduplication,
|
::FlowWithExternalAsyncOperationWithDeduplication,
|
||||||
@ -50,8 +55,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation propagates exception to calling flow`() {
|
fun `external async operation propagates exception to calling flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<MyCordaException> {
|
assertFailsWith<MyCordaException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationPropagatesException,
|
::FlowWithExternalAsyncOperationPropagatesException,
|
||||||
@ -66,8 +73,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation exception can be caught in flow`() {
|
fun `external async operation exception can be caught in flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val result = alice.rpc.startFlow(
|
val result = alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationThatThrowsExceptionAndCaughtInFlow,
|
::FlowWithExternalAsyncOperationThatThrowsExceptionAndCaughtInFlow,
|
||||||
bob.nodeInfo.singleIdentity()
|
bob.nodeInfo.singleIdentity()
|
||||||
@ -80,8 +89,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation with exception that hospital keeps for observation does not fail`() {
|
fun `external async operation with exception that hospital keeps for observation does not fail`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationPropagatesException,
|
::FlowWithExternalAsyncOperationPropagatesException,
|
||||||
@ -96,8 +107,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation with exception that hospital discharges is retried and runs the future again`() {
|
fun `external async operation with exception that hospital discharges is retried and runs the future again`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationPropagatesException,
|
::FlowWithExternalAsyncOperationPropagatesException,
|
||||||
@ -112,8 +125,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that throws exception rather than completing future exceptionally fails with internal exception`() {
|
fun `external async operation that throws exception rather than completing future exceptionally fails with internal exception`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<StateTransitionException> {
|
assertFailsWith<StateTransitionException> {
|
||||||
alice.rpc.startFlow(::FlowWithExternalAsyncOperationUnhandledException, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowWithExternalAsyncOperationUnhandledException, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
@ -125,8 +140,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that passes serviceHub into process can be retried`() {
|
fun `external async operation that passes serviceHub into process can be retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationThatPassesInServiceHubCanRetry,
|
::FlowWithExternalAsyncOperationThatPassesInServiceHubCanRetry,
|
||||||
@ -140,8 +157,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
|
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<DirectlyAccessedServiceHubException> {
|
assertFailsWith<DirectlyAccessedServiceHubException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalAsyncOperationThatDirectlyAccessesServiceHubFailsRetry,
|
::FlowWithExternalAsyncOperationThatDirectlyAccessesServiceHubFailsRetry,
|
||||||
@ -155,8 +174,10 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `starting multiple futures and joining on their results`() {
|
fun `starting multiple futures and joining on their results`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowThatStartsMultipleFuturesAndJoins, bob.nodeInfo.singleIdentity()).returnValue.getOrThrow(1.minutes)
|
alice.rpc.startFlow(::FlowThatStartsMultipleFuturesAndJoins, bob.nodeInfo.singleIdentity()).returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
}
|
}
|
||||||
@ -167,7 +188,7 @@ class FlowExternalAsyncOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun testCode(): Any =
|
override fun testCode(): Any =
|
||||||
await(ExternalAsyncOperation(serviceHub) { _, _ ->
|
await(ExternalAsyncOperation(serviceHub) { serviceHub, _ ->
|
||||||
serviceHub.cordaService(FutureService::class.java).createFuture()
|
serviceHub.cordaService(FutureService::class.java).createFuture()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package net.corda.coretests.flows
|
|||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.minutes
|
import net.corda.core.utilities.minutes
|
||||||
@ -18,8 +19,10 @@ class FlowExternalOperationStartFlowTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `starting a flow inside of a flow that starts a future will succeed`() {
|
fun `starting a flow inside of a flow that starts a future will succeed`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowThatStartsAnotherFlowInAnExternalOperation, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowThatStartsAnotherFlowInAnExternalOperation, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
@ -29,8 +32,10 @@ class FlowExternalOperationStartFlowTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `multiple flows can be started and their futures joined from inside a flow`() {
|
fun `multiple flows can be started and their futures joined from inside a flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::ForkJoinFlows, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::ForkJoinFlows, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
|
@ -5,6 +5,7 @@ import net.corda.core.flows.FlowLogic
|
|||||||
import net.corda.core.flows.HospitalizeFlowException
|
import net.corda.core.flows.HospitalizeFlowException
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.internal.packageName
|
import net.corda.core.internal.packageName
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.services.queryBy
|
import net.corda.core.node.services.queryBy
|
||||||
@ -29,8 +30,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation`() {
|
fun `external operation`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowWithExternalOperation, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowWithExternalOperation, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
@ -40,8 +43,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation that checks deduplicationId is not rerun when flow is retried`() {
|
fun `external operation that checks deduplicationId is not rerun when flow is retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<DuplicatedProcessException> {
|
assertFailsWith<DuplicatedProcessException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationWithDeduplication,
|
::FlowWithExternalOperationWithDeduplication,
|
||||||
@ -55,8 +60,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation propagates exception to calling flow`() {
|
fun `external operation propagates exception to calling flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<MyCordaException> {
|
assertFailsWith<MyCordaException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationPropagatesException,
|
::FlowWithExternalOperationPropagatesException,
|
||||||
@ -71,8 +78,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation exception can be caught in flow`() {
|
fun `external operation exception can be caught in flow`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.startFlow(::FlowWithExternalOperationThatThrowsExceptionAndCaughtInFlow, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::FlowWithExternalOperationThatThrowsExceptionAndCaughtInFlow, bob.nodeInfo.singleIdentity())
|
||||||
.returnValue.getOrThrow(1.minutes)
|
.returnValue.getOrThrow(1.minutes)
|
||||||
assertHospitalCounters(0, 0)
|
assertHospitalCounters(0, 0)
|
||||||
@ -82,8 +91,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation with exception that hospital keeps for observation does not fail`() {
|
fun `external operation with exception that hospital keeps for observation does not fail`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationPropagatesException,
|
::FlowWithExternalOperationPropagatesException,
|
||||||
@ -98,8 +109,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation with exception that hospital discharges is retried and runs the external operation again`() {
|
fun `external operation with exception that hospital discharges is retried and runs the external operation again`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationPropagatesException,
|
::FlowWithExternalOperationPropagatesException,
|
||||||
@ -114,8 +127,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that passes serviceHub into process can be retried`() {
|
fun `external async operation that passes serviceHub into process can be retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
blockUntilFlowKeptInForObservation {
|
blockUntilFlowKeptInForObservation {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationThatPassesInServiceHubCanRetry,
|
::FlowWithExternalOperationThatPassesInServiceHubCanRetry,
|
||||||
@ -129,8 +144,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
|
fun `external async operation that accesses serviceHub from flow directly will fail when retried`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
assertFailsWith<DirectlyAccessedServiceHubException> {
|
assertFailsWith<DirectlyAccessedServiceHubException> {
|
||||||
alice.rpc.startFlow(
|
alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationThatDirectlyAccessesServiceHubFailsRetry,
|
::FlowWithExternalOperationThatDirectlyAccessesServiceHubFailsRetry,
|
||||||
@ -199,8 +216,10 @@ class FlowExternalOperationTest : AbstractFlowExternalOperationTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `external operation can be retried when an error occurs inside of database transaction`() {
|
fun `external operation can be retried when an error occurs inside of database transaction`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val success = alice.rpc.startFlow(
|
val success = alice.rpc.startFlow(
|
||||||
::FlowWithExternalOperationThatErrorsInsideOfDatabaseTransaction,
|
::FlowWithExternalOperationThatErrorsInsideOfDatabaseTransaction,
|
||||||
bob.nodeInfo.singleIdentity()
|
bob.nodeInfo.singleIdentity()
|
||||||
|
@ -10,6 +10,7 @@ import net.corda.core.flows.StartableByRPC
|
|||||||
import net.corda.core.flows.StateMachineRunId
|
import net.corda.core.flows.StateMachineRunId
|
||||||
import net.corda.core.flows.UnexpectedFlowEndException
|
import net.corda.core.flows.UnexpectedFlowEndException
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.minutes
|
import net.corda.core.utilities.minutes
|
||||||
@ -56,9 +57,10 @@ class FlowIsKilledTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `manually handled killed flows propagate error to counter parties`() {
|
fun `manually handled killed flows propagate error to counter parties`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.let { rpc ->
|
alice.rpc.let { rpc ->
|
||||||
val handle = rpc.startFlow(
|
val handle = rpc.startFlow(
|
||||||
::AFlowThatWantsToDieAndKillsItsFriends,
|
::AFlowThatWantsToDieAndKillsItsFriends,
|
||||||
@ -85,8 +87,11 @@ class FlowIsKilledTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `a manually killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
fun `a manually killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val handle = alice.rpc.startFlow(
|
val handle = alice.rpc.startFlow(
|
||||||
::AFlowThatGetsMurderedByItsFriend,
|
::AFlowThatGetsMurderedByItsFriend,
|
||||||
bob.nodeInfo.singleIdentity()
|
bob.nodeInfo.singleIdentity()
|
||||||
|
@ -7,6 +7,7 @@ import net.corda.core.flows.InitiatedBy
|
|||||||
import net.corda.core.flows.InitiatingFlow
|
import net.corda.core.flows.InitiatingFlow
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.minutes
|
import net.corda.core.utilities.minutes
|
||||||
@ -53,8 +54,10 @@ class FlowSleepTest {
|
|||||||
fun `flow can sleep and perform other suspending functions`() {
|
fun `flow can sleep and perform other suspending functions`() {
|
||||||
// ensures that events received while the flow is sleeping are not processed
|
// ensures that events received while the flow is sleeping are not processed
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val (start, finish) = alice.rpc.startFlow(
|
val (start, finish) = alice.rpc.startFlow(
|
||||||
::SleepAndInteractWithPartyFlow,
|
::SleepAndInteractWithPartyFlow,
|
||||||
bob.nodeInfo.singleIdentity()
|
bob.nodeInfo.singleIdentity()
|
||||||
|
@ -53,7 +53,7 @@ class ReceiveFinalityFlowTest {
|
|||||||
|
|
||||||
val paymentReceiverId = paymentReceiverFuture.getOrThrow()
|
val paymentReceiverId = paymentReceiverFuture.getOrThrow()
|
||||||
assertThat(bob.services.vaultService.queryBy<FungibleAsset<*>>().states).isEmpty()
|
assertThat(bob.services.vaultService.queryBy<FungibleAsset<*>>().states).isEmpty()
|
||||||
bob.assertFlowSentForObservationDueToConstraintError(paymentReceiverId)
|
bob.assertFlowSentForObservationDueToUntrustedAttachmentsException(paymentReceiverId)
|
||||||
|
|
||||||
// Restart Bob with the contracts CorDapp so that it can recover from the error
|
// Restart Bob with the contracts CorDapp so that it can recover from the error
|
||||||
bob = mockNet.restartNode(bob, parameters = InternalMockNodeParameters(additionalCordapps = listOf(FINANCE_CONTRACTS_CORDAPP)))
|
bob = mockNet.restartNode(bob, parameters = InternalMockNodeParameters(additionalCordapps = listOf(FINANCE_CONTRACTS_CORDAPP)))
|
||||||
@ -69,7 +69,7 @@ class ReceiveFinalityFlowTest {
|
|||||||
.ofType(R::class.java)
|
.ofType(R::class.java)
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun TestStartedNode.assertFlowSentForObservationDueToConstraintError(runId: StateMachineRunId) {
|
private fun TestStartedNode.assertFlowSentForObservationDueToUntrustedAttachmentsException(runId: StateMachineRunId) {
|
||||||
val observation = medicalRecordsOfType<Flow>()
|
val observation = medicalRecordsOfType<Flow>()
|
||||||
.filter { it.flowId == runId }
|
.filter { it.flowId == runId }
|
||||||
.toBlocking()
|
.toBlocking()
|
||||||
@ -77,6 +77,6 @@ class ReceiveFinalityFlowTest {
|
|||||||
assertThat(observation.outcome).isEqualTo(Outcome.OVERNIGHT_OBSERVATION)
|
assertThat(observation.outcome).isEqualTo(Outcome.OVERNIGHT_OBSERVATION)
|
||||||
assertThat(observation.by).contains(FinalityDoctor)
|
assertThat(observation.by).contains(FinalityDoctor)
|
||||||
val error = observation.errors.single()
|
val error = observation.errors.single()
|
||||||
assertThat(error).isInstanceOf(TransactionVerificationException.ContractConstraintRejection::class.java)
|
assertThat(error).isInstanceOf(TransactionVerificationException.UntrustedAttachmentsException::class.java)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ class AttachmentsClassLoaderSerializationTests {
|
|||||||
arrayOf(isolatedId, att1, att2).map { storage.openAttachment(it)!! },
|
arrayOf(isolatedId, att1, att2).map { storage.openAttachment(it)!! },
|
||||||
testNetworkParameters(),
|
testNetworkParameters(),
|
||||||
SecureHash.zeroHash,
|
SecureHash.zeroHash,
|
||||||
{ attachmentTrustCalculator.calculate(it) }) { classLoader ->
|
{ attachmentTrustCalculator.calculate(it) }, attachmentsClassLoaderCache = null) { classLoader ->
|
||||||
val contractClass = Class.forName(ISOLATED_CONTRACT_CLASS_NAME, true, classLoader)
|
val contractClass = Class.forName(ISOLATED_CONTRACT_CLASS_NAME, true, classLoader)
|
||||||
val contract = contractClass.getDeclaredConstructor().newInstance() as Contract
|
val contract = contractClass.getDeclaredConstructor().newInstance() as Contract
|
||||||
assertEquals("helloworld", contract.declaredField<Any?>("magicString").value)
|
assertEquals("helloworld", contract.declaredField<Any?>("magicString").value)
|
||||||
|
@ -23,6 +23,7 @@ import net.corda.core.internal.inputStream
|
|||||||
import net.corda.core.node.NetworkParameters
|
import net.corda.core.node.NetworkParameters
|
||||||
import net.corda.core.node.services.AttachmentId
|
import net.corda.core.node.services.AttachmentId
|
||||||
import net.corda.core.serialization.internal.AttachmentsClassLoader
|
import net.corda.core.serialization.internal.AttachmentsClassLoader
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
|
||||||
import net.corda.testing.common.internal.testNetworkParameters
|
import net.corda.testing.common.internal.testNetworkParameters
|
||||||
import net.corda.node.services.attachments.NodeAttachmentTrustCalculator
|
import net.corda.node.services.attachments.NodeAttachmentTrustCalculator
|
||||||
import net.corda.testing.contracts.DummyContract
|
import net.corda.testing.contracts.DummyContract
|
||||||
@ -521,6 +522,7 @@ class AttachmentsClassLoaderTests {
|
|||||||
val id = SecureHash.randomSHA256()
|
val id = SecureHash.randomSHA256()
|
||||||
val timeWindow: TimeWindow? = null
|
val timeWindow: TimeWindow? = null
|
||||||
val privacySalt = PrivacySalt()
|
val privacySalt = PrivacySalt()
|
||||||
|
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(cacheFactory)
|
||||||
val transaction = createLedgerTransaction(
|
val transaction = createLedgerTransaction(
|
||||||
inputs,
|
inputs,
|
||||||
outputs,
|
outputs,
|
||||||
@ -532,7 +534,8 @@ class AttachmentsClassLoaderTests {
|
|||||||
privacySalt,
|
privacySalt,
|
||||||
testNetworkParameters(),
|
testNetworkParameters(),
|
||||||
emptyList(),
|
emptyList(),
|
||||||
isAttachmentTrusted = { true }
|
isAttachmentTrusted = { true },
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
transaction.verify()
|
transaction.verify()
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import net.corda.core.internal.AbstractAttachment
|
|||||||
import net.corda.core.internal.TESTDSL_UPLOADER
|
import net.corda.core.internal.TESTDSL_UPLOADER
|
||||||
import net.corda.core.internal.createLedgerTransaction
|
import net.corda.core.internal.createLedgerTransaction
|
||||||
import net.corda.core.node.NotaryInfo
|
import net.corda.core.node.NotaryInfo
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
import net.corda.core.transactions.WireTransaction
|
import net.corda.core.transactions.WireTransaction
|
||||||
import net.corda.testing.common.internal.testNetworkParameters
|
import net.corda.testing.common.internal.testNetworkParameters
|
||||||
@ -18,6 +19,7 @@ import net.corda.testing.core.*
|
|||||||
import net.corda.testing.internal.createWireTransaction
|
import net.corda.testing.internal.createWireTransaction
|
||||||
import net.corda.testing.internal.fakeAttachment
|
import net.corda.testing.internal.fakeAttachment
|
||||||
import net.corda.coretesting.internal.rigorousMock
|
import net.corda.coretesting.internal.rigorousMock
|
||||||
|
import net.corda.testing.internal.TestingNamedCacheFactory
|
||||||
import org.junit.Rule
|
import org.junit.Rule
|
||||||
import org.junit.Test
|
import org.junit.Test
|
||||||
import java.math.BigInteger
|
import java.math.BigInteger
|
||||||
@ -131,6 +133,7 @@ class TransactionTests {
|
|||||||
val id = SecureHash.randomSHA256()
|
val id = SecureHash.randomSHA256()
|
||||||
val timeWindow: TimeWindow? = null
|
val timeWindow: TimeWindow? = null
|
||||||
val privacySalt = PrivacySalt()
|
val privacySalt = PrivacySalt()
|
||||||
|
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(TestingNamedCacheFactory())
|
||||||
val transaction = createLedgerTransaction(
|
val transaction = createLedgerTransaction(
|
||||||
inputs,
|
inputs,
|
||||||
outputs,
|
outputs,
|
||||||
@ -142,7 +145,8 @@ class TransactionTests {
|
|||||||
privacySalt,
|
privacySalt,
|
||||||
testNetworkParameters(),
|
testNetworkParameters(),
|
||||||
emptyList(),
|
emptyList(),
|
||||||
isAttachmentTrusted = { true }
|
isAttachmentTrusted = { true },
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
|
|
||||||
transaction.verify()
|
transaction.verify()
|
||||||
@ -183,6 +187,7 @@ class TransactionTests {
|
|||||||
val id = SecureHash.randomSHA256()
|
val id = SecureHash.randomSHA256()
|
||||||
val timeWindow: TimeWindow? = null
|
val timeWindow: TimeWindow? = null
|
||||||
val privacySalt = PrivacySalt()
|
val privacySalt = PrivacySalt()
|
||||||
|
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(TestingNamedCacheFactory())
|
||||||
|
|
||||||
fun buildTransaction() = createLedgerTransaction(
|
fun buildTransaction() = createLedgerTransaction(
|
||||||
inputs,
|
inputs,
|
||||||
@ -195,7 +200,8 @@ class TransactionTests {
|
|||||||
privacySalt,
|
privacySalt,
|
||||||
testNetworkParameters(notaries = listOf(NotaryInfo(DUMMY_NOTARY, true))),
|
testNetworkParameters(notaries = listOf(NotaryInfo(DUMMY_NOTARY, true))),
|
||||||
emptyList(),
|
emptyList(),
|
||||||
isAttachmentTrusted = { true }
|
isAttachmentTrusted = { true },
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
|
|
||||||
assertFailsWith<TransactionVerificationException.NotaryChangeInWrongTransactionType> { buildTransaction().verify() }
|
assertFailsWith<TransactionVerificationException.NotaryChangeInWrongTransactionType> { buildTransaction().verify() }
|
||||||
|
@ -89,6 +89,7 @@ interface OwnableState : ContractState {
|
|||||||
// DOCEND 3
|
// DOCEND 3
|
||||||
|
|
||||||
/** Something which is scheduled to happen at a point in time. */
|
/** Something which is scheduled to happen at a point in time. */
|
||||||
|
@KeepForDJVM
|
||||||
interface Scheduled {
|
interface Scheduled {
|
||||||
val scheduledAt: Instant
|
val scheduledAt: Instant
|
||||||
}
|
}
|
||||||
@ -101,6 +102,7 @@ interface Scheduled {
|
|||||||
* lifecycle processing needs to take place. e.g. a fixing or a late payment etc.
|
* lifecycle processing needs to take place. e.g. a fixing or a late payment etc.
|
||||||
*/
|
*/
|
||||||
@CordaSerializable
|
@CordaSerializable
|
||||||
|
@KeepForDJVM
|
||||||
data class ScheduledStateRef(val ref: StateRef, override val scheduledAt: Instant) : Scheduled
|
data class ScheduledStateRef(val ref: StateRef, override val scheduledAt: Instant) : Scheduled
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -115,7 +117,7 @@ data class ScheduledStateRef(val ref: StateRef, override val scheduledAt: Instan
|
|||||||
* for a particular [ContractState] have been processed/fired etc. If the activity is not "on ledger" then the
|
* for a particular [ContractState] have been processed/fired etc. If the activity is not "on ledger" then the
|
||||||
* scheduled activity shouldn't be either.
|
* scheduled activity shouldn't be either.
|
||||||
*/
|
*/
|
||||||
@DeleteForDJVM
|
@KeepForDJVM
|
||||||
data class ScheduledActivity(val logicRef: FlowLogicRef, override val scheduledAt: Instant) : Scheduled
|
data class ScheduledActivity(val logicRef: FlowLogicRef, override val scheduledAt: Instant) : Scheduled
|
||||||
|
|
||||||
// DOCSTART 2
|
// DOCSTART 2
|
||||||
@ -134,7 +136,7 @@ interface LinearState : ContractState {
|
|||||||
val linearId: UniqueIdentifier
|
val linearId: UniqueIdentifier
|
||||||
}
|
}
|
||||||
// DOCEND 2
|
// DOCEND 2
|
||||||
@DeleteForDJVM
|
@KeepForDJVM
|
||||||
interface SchedulableState : ContractState {
|
interface SchedulableState : ContractState {
|
||||||
/**
|
/**
|
||||||
* Indicate whether there is some activity to be performed at some future point in time with respect to this
|
* Indicate whether there is some activity to be performed at some future point in time with respect to this
|
||||||
|
@ -7,6 +7,7 @@ import net.corda.core.crypto.SecureHash
|
|||||||
import net.corda.core.flows.FlowLogic
|
import net.corda.core.flows.FlowLogic
|
||||||
import net.corda.core.internal.cordapp.CordappImpl.Companion.UNKNOWN_VALUE
|
import net.corda.core.internal.cordapp.CordappImpl.Companion.UNKNOWN_VALUE
|
||||||
import net.corda.core.schemas.MappedSchema
|
import net.corda.core.schemas.MappedSchema
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
import net.corda.core.serialization.SerializationCustomSerializer
|
import net.corda.core.serialization.SerializationCustomSerializer
|
||||||
import net.corda.core.serialization.SerializationWhitelist
|
import net.corda.core.serialization.SerializationWhitelist
|
||||||
import net.corda.core.serialization.SerializeAsToken
|
import net.corda.core.serialization.SerializeAsToken
|
||||||
@ -29,6 +30,7 @@ import java.net.URL
|
|||||||
* @property services List of RPC services
|
* @property services List of RPC services
|
||||||
* @property serializationWhitelists List of Corda plugin registries
|
* @property serializationWhitelists List of Corda plugin registries
|
||||||
* @property serializationCustomSerializers List of serializers
|
* @property serializationCustomSerializers List of serializers
|
||||||
|
* @property checkpointCustomSerializers List of serializers for checkpoints
|
||||||
* @property customSchemas List of custom schemas
|
* @property customSchemas List of custom schemas
|
||||||
* @property allFlows List of all flow classes
|
* @property allFlows List of all flow classes
|
||||||
* @property jarPath The path to the JAR for this CorDapp
|
* @property jarPath The path to the JAR for this CorDapp
|
||||||
@ -49,6 +51,7 @@ interface Cordapp {
|
|||||||
val services: List<Class<out SerializeAsToken>>
|
val services: List<Class<out SerializeAsToken>>
|
||||||
val serializationWhitelists: List<SerializationWhitelist>
|
val serializationWhitelists: List<SerializationWhitelist>
|
||||||
val serializationCustomSerializers: List<SerializationCustomSerializer<*, *>>
|
val serializationCustomSerializers: List<SerializationCustomSerializer<*, *>>
|
||||||
|
val checkpointCustomSerializers: List<CheckpointCustomSerializer<*, *>>
|
||||||
val customSchemas: Set<MappedSchema>
|
val customSchemas: Set<MappedSchema>
|
||||||
val allFlows: List<Class<out FlowLogic<*>>>
|
val allFlows: List<Class<out FlowLogic<*>>>
|
||||||
val jarPath: URL
|
val jarPath: URL
|
||||||
|
@ -25,6 +25,7 @@ import net.corda.core.node.NodeInfo
|
|||||||
import net.corda.core.node.ServiceHub
|
import net.corda.core.node.ServiceHub
|
||||||
import net.corda.core.serialization.CordaSerializable
|
import net.corda.core.serialization.CordaSerializable
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
|
import net.corda.core.utilities.NonEmptySet
|
||||||
import net.corda.core.utilities.ProgressTracker
|
import net.corda.core.utilities.ProgressTracker
|
||||||
import net.corda.core.utilities.UntrustworthyData
|
import net.corda.core.utilities.UntrustworthyData
|
||||||
import net.corda.core.utilities.debug
|
import net.corda.core.utilities.debug
|
||||||
@ -378,6 +379,22 @@ abstract class FlowLogic<out T> {
|
|||||||
stateMachine.suspend(request, maySkipCheckpoint)
|
stateMachine.suspend(request, maySkipCheckpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Closes the provided sessions and performs cleanup of any resources tied to these sessions.
|
||||||
|
*
|
||||||
|
* Note that sessions are closed automatically when the corresponding top-level flow terminates.
|
||||||
|
* So, it's beneficial to eagerly close them in long-lived flows that might have many open sessions that are not needed anymore and consume resources (e.g. memory, disk etc.).
|
||||||
|
* A closed session cannot be used anymore, e.g. to send or receive messages. So, you have to ensure you are calling this method only when the provided sessions are not going to be used anymore.
|
||||||
|
* As a result, any operations on a closed session will fail with an [UnexpectedFlowEndException].
|
||||||
|
* When a session is closed, the other side is informed and the session is closed there too eventually.
|
||||||
|
* To prevent misuse of the API, if there is an attempt to close an uninitialised session the invocation will fail with an [IllegalStateException].
|
||||||
|
*/
|
||||||
|
@Suspendable
|
||||||
|
fun close(sessions: NonEmptySet<FlowSession>) {
|
||||||
|
val request = FlowIORequest.CloseSessions(sessions)
|
||||||
|
stateMachine.suspend(request, false)
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes the given subflow. This function returns once the subflow completes successfully with the result
|
* Invokes the given subflow. This function returns once the subflow completes successfully with the result
|
||||||
* returned by that subflow's [call] method. If the subflow has a progress tracker, it is attached to the
|
* returned by that subflow's [call] method. If the subflow has a progress tracker, it is attached to the
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
package net.corda.core.flows
|
package net.corda.core.flows
|
||||||
|
|
||||||
import net.corda.core.CordaInternal
|
import net.corda.core.CordaInternal
|
||||||
|
import net.corda.core.DeleteForDJVM
|
||||||
import net.corda.core.DoNotImplement
|
import net.corda.core.DoNotImplement
|
||||||
|
import net.corda.core.KeepForDJVM
|
||||||
import net.corda.core.serialization.CordaSerializable
|
import net.corda.core.serialization.CordaSerializable
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -11,11 +13,13 @@ import net.corda.core.serialization.CordaSerializable
|
|||||||
* the flow to run at the scheduled time.
|
* the flow to run at the scheduled time.
|
||||||
*/
|
*/
|
||||||
@DoNotImplement
|
@DoNotImplement
|
||||||
|
@KeepForDJVM
|
||||||
interface FlowLogicRefFactory {
|
interface FlowLogicRefFactory {
|
||||||
/**
|
/**
|
||||||
* Construct a FlowLogicRef. This is intended for cases where the calling code has the relevant class already
|
* Construct a FlowLogicRef. This is intended for cases where the calling code has the relevant class already
|
||||||
* and can provide it directly.
|
* and can provide it directly.
|
||||||
*/
|
*/
|
||||||
|
@DeleteForDJVM
|
||||||
fun create(flowClass: Class<out FlowLogic<*>>, vararg args: Any?): FlowLogicRef
|
fun create(flowClass: Class<out FlowLogic<*>>, vararg args: Any?): FlowLogicRef
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -30,12 +34,14 @@ interface FlowLogicRefFactory {
|
|||||||
* [SchedulableFlow] annotation.
|
* [SchedulableFlow] annotation.
|
||||||
*/
|
*/
|
||||||
@CordaInternal
|
@CordaInternal
|
||||||
|
@DeleteForDJVM
|
||||||
fun createForRPC(flowClass: Class<out FlowLogic<*>>, vararg args: Any?): FlowLogicRef
|
fun createForRPC(flowClass: Class<out FlowLogic<*>>, vararg args: Any?): FlowLogicRef
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Converts a [FlowLogicRef] object that was obtained from the calls above into a [FlowLogic], after doing some
|
* Converts a [FlowLogicRef] object that was obtained from the calls above into a [FlowLogic], after doing some
|
||||||
* validation to ensure it points to a legitimate flow class.
|
* validation to ensure it points to a legitimate flow class.
|
||||||
*/
|
*/
|
||||||
|
@DeleteForDJVM
|
||||||
fun toFlowLogic(ref: FlowLogicRef): FlowLogic<*>
|
fun toFlowLogic(ref: FlowLogicRef): FlowLogic<*>
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,4 +65,5 @@ class IllegalFlowLogicException(val type: String, msg: String) :
|
|||||||
// TODO: align this with the existing [FlowRef] in the bank-side API (probably replace some of the API classes)
|
// TODO: align this with the existing [FlowRef] in the bank-side API (probably replace some of the API classes)
|
||||||
@CordaSerializable
|
@CordaSerializable
|
||||||
@DoNotImplement
|
@DoNotImplement
|
||||||
|
@KeepForDJVM
|
||||||
interface FlowLogicRef
|
interface FlowLogicRef
|
@ -191,6 +191,19 @@ abstract class FlowSession {
|
|||||||
*/
|
*/
|
||||||
@Suspendable
|
@Suspendable
|
||||||
abstract fun send(payload: Any)
|
abstract fun send(payload: Any)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Closes this session and performs cleanup of any resources tied to this session.
|
||||||
|
*
|
||||||
|
* Note that sessions are closed automatically when the corresponding top-level flow terminates.
|
||||||
|
* So, it's beneficial to eagerly close them in long-lived flows that might have many open sessions that are not needed anymore and consume resources (e.g. memory, disk etc.).
|
||||||
|
* A closed session cannot be used anymore, e.g. to send or receive messages. So, you have to ensure you are calling this method only when the session is not going to be used anymore.
|
||||||
|
* As a result, any operations on a closed session will fail with an [UnexpectedFlowEndException].
|
||||||
|
* When a session is closed, the other side is informed and the session is closed there too eventually.
|
||||||
|
* To prevent misuse of the API, if there is an attempt to close an uninitialised session the invocation will fail with an [IllegalStateException].
|
||||||
|
*/
|
||||||
|
@Suspendable
|
||||||
|
abstract fun close()
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -28,7 +28,7 @@ import java.util.jar.JarInputStream
|
|||||||
|
|
||||||
// *Internal* Corda-specific utilities.
|
// *Internal* Corda-specific utilities.
|
||||||
|
|
||||||
const val PLATFORM_VERSION = 7
|
const val PLATFORM_VERSION = 8
|
||||||
|
|
||||||
fun ServicesForResolution.ensureMinimumPlatformVersion(requiredMinPlatformVersion: Int, feature: String) {
|
fun ServicesForResolution.ensureMinimumPlatformVersion(requiredMinPlatformVersion: Int, feature: String) {
|
||||||
checkMinimumPlatformVersion(networkParameters.minimumPlatformVersion, requiredMinPlatformVersion, feature)
|
checkMinimumPlatformVersion(networkParameters.minimumPlatformVersion, requiredMinPlatformVersion, feature)
|
||||||
|
@ -55,6 +55,13 @@ sealed class FlowIORequest<out R : Any> {
|
|||||||
}}, shouldRetrySend=$shouldRetrySend)"
|
}}, shouldRetrySend=$shouldRetrySend)"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Closes the specified sessions.
|
||||||
|
*
|
||||||
|
* @property sessions the sessions to be closed.
|
||||||
|
*/
|
||||||
|
data class CloseSessions(val sessions: NonEmptySet<FlowSession>): FlowIORequest<Unit>()
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for a transaction to be committed to the database.
|
* Wait for a transaction to be committed to the database.
|
||||||
*
|
*
|
||||||
|
@ -5,6 +5,7 @@ import net.corda.core.DeleteForDJVM
|
|||||||
import net.corda.core.internal.notary.NotaryService
|
import net.corda.core.internal.notary.NotaryService
|
||||||
import net.corda.core.node.ServiceHub
|
import net.corda.core.node.ServiceHub
|
||||||
import net.corda.core.node.StatesToRecord
|
import net.corda.core.node.StatesToRecord
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
|
||||||
import java.util.concurrent.ExecutorService
|
import java.util.concurrent.ExecutorService
|
||||||
|
|
||||||
// TODO: This should really be called ServiceHubInternal but that name is already taken by net.corda.node.services.api.ServiceHubInternal.
|
// TODO: This should really be called ServiceHubInternal but that name is already taken by net.corda.node.services.api.ServiceHubInternal.
|
||||||
@ -21,6 +22,8 @@ interface ServiceHubCoreInternal : ServiceHub {
|
|||||||
val notaryService: NotaryService?
|
val notaryService: NotaryService?
|
||||||
|
|
||||||
fun createTransactionsResolver(flow: ResolveTransactionsFlow): TransactionsResolver
|
fun createTransactionsResolver(flow: ResolveTransactionsFlow): TransactionsResolver
|
||||||
|
|
||||||
|
val attachmentsClassLoaderCache: AttachmentsClassLoaderCache
|
||||||
}
|
}
|
||||||
|
|
||||||
interface TransactionsResolver {
|
interface TransactionsResolver {
|
||||||
|
@ -9,6 +9,7 @@ import net.corda.core.internal.VisibleForTesting
|
|||||||
import net.corda.core.internal.notary.NotaryService
|
import net.corda.core.internal.notary.NotaryService
|
||||||
import net.corda.core.internal.toPath
|
import net.corda.core.internal.toPath
|
||||||
import net.corda.core.schemas.MappedSchema
|
import net.corda.core.schemas.MappedSchema
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
import net.corda.core.serialization.SerializationCustomSerializer
|
import net.corda.core.serialization.SerializationCustomSerializer
|
||||||
import net.corda.core.serialization.SerializationWhitelist
|
import net.corda.core.serialization.SerializationWhitelist
|
||||||
import net.corda.core.serialization.SerializeAsToken
|
import net.corda.core.serialization.SerializeAsToken
|
||||||
@ -25,6 +26,7 @@ data class CordappImpl(
|
|||||||
override val services: List<Class<out SerializeAsToken>>,
|
override val services: List<Class<out SerializeAsToken>>,
|
||||||
override val serializationWhitelists: List<SerializationWhitelist>,
|
override val serializationWhitelists: List<SerializationWhitelist>,
|
||||||
override val serializationCustomSerializers: List<SerializationCustomSerializer<*, *>>,
|
override val serializationCustomSerializers: List<SerializationCustomSerializer<*, *>>,
|
||||||
|
override val checkpointCustomSerializers: List<CheckpointCustomSerializer<*, *>>,
|
||||||
override val customSchemas: Set<MappedSchema>,
|
override val customSchemas: Set<MappedSchema>,
|
||||||
override val allFlows: List<Class<out FlowLogic<*>>>,
|
override val allFlows: List<Class<out FlowLogic<*>>>,
|
||||||
override val jarPath: URL,
|
override val jarPath: URL,
|
||||||
@ -79,6 +81,7 @@ data class CordappImpl(
|
|||||||
services = emptyList(),
|
services = emptyList(),
|
||||||
serializationWhitelists = emptyList(),
|
serializationWhitelists = emptyList(),
|
||||||
serializationCustomSerializers = emptyList(),
|
serializationCustomSerializers = emptyList(),
|
||||||
|
checkpointCustomSerializers = emptyList(),
|
||||||
customSchemas = emptySet(),
|
customSchemas = emptySet(),
|
||||||
jarPath = Paths.get("").toUri().toURL(),
|
jarPath = Paths.get("").toUri().toURL(),
|
||||||
info = UNKNOWN_INFO,
|
info = UNKNOWN_INFO,
|
||||||
|
@ -25,3 +25,26 @@ interface SerializationCustomSerializer<OBJ, PROXY> {
|
|||||||
*/
|
*/
|
||||||
fun fromProxy(proxy: PROXY): OBJ
|
fun fromProxy(proxy: PROXY): OBJ
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allows CorDapps to provide custom serializers for classes that do not serialize successfully during a checkpoint.
|
||||||
|
* In this case, a proxy serializer can be written that implements this interface whose purpose is to move between
|
||||||
|
* unserializable types and an intermediate representation.
|
||||||
|
*
|
||||||
|
* NOTE: Only implement this interface if you have a class that triggers an error during normal checkpoint
|
||||||
|
* serialization/deserialization.
|
||||||
|
*/
|
||||||
|
@KeepForDJVM
|
||||||
|
interface CheckpointCustomSerializer<OBJ, PROXY> {
|
||||||
|
/**
|
||||||
|
* Should facilitate the conversion of the third party object into the serializable
|
||||||
|
* local class specified by [PROXY]
|
||||||
|
*/
|
||||||
|
fun toProxy(obj: OBJ): PROXY
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Should facilitate the conversion of the proxy object into a new instance of the
|
||||||
|
* unserializable type
|
||||||
|
*/
|
||||||
|
fun fromProxy(proxy: PROXY): OBJ
|
||||||
|
}
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
package net.corda.core.serialization.internal
|
package net.corda.core.serialization.internal
|
||||||
|
|
||||||
|
import com.github.benmanes.caffeine.cache.Cache
|
||||||
|
import com.github.benmanes.caffeine.cache.Caffeine
|
||||||
|
import net.corda.core.DeleteForDJVM
|
||||||
import net.corda.core.contracts.Attachment
|
import net.corda.core.contracts.Attachment
|
||||||
import net.corda.core.contracts.ContractAttachment
|
import net.corda.core.contracts.ContractAttachment
|
||||||
import net.corda.core.contracts.TransactionVerificationException
|
import net.corda.core.contracts.TransactionVerificationException
|
||||||
@ -21,6 +24,7 @@ import java.lang.ref.WeakReference
|
|||||||
import java.net.*
|
import java.net.*
|
||||||
import java.security.Permission
|
import java.security.Permission
|
||||||
import java.util.*
|
import java.util.*
|
||||||
|
import java.util.function.Function
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A custom ClassLoader that knows how to load classes from a set of attachments. The attachments themselves only
|
* A custom ClassLoader that knows how to load classes from a set of attachments. The attachments themselves only
|
||||||
@ -289,31 +293,27 @@ class AttachmentsClassLoader(attachments: List<Attachment>,
|
|||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
object AttachmentsClassLoaderBuilder {
|
object AttachmentsClassLoaderBuilder {
|
||||||
private const val CACHE_SIZE = 1000
|
const val CACHE_SIZE = 16
|
||||||
|
|
||||||
// We use a set here because the ordering of attachments doesn't affect code execution, due to the no
|
private val fallBackCache: AttachmentsClassLoaderCache = AttachmentsClassLoaderSimpleCacheImpl(CACHE_SIZE)
|
||||||
// overlap rule, and attachments don't have any particular ordering enforced by the builders. So we
|
|
||||||
// can just do unordered comparisons here. But the same attachments run with different network parameters
|
|
||||||
// may behave differently, so that has to be a part of the cache key.
|
|
||||||
private data class Key(val hashes: Set<SecureHash>, val params: NetworkParameters)
|
|
||||||
|
|
||||||
// This runs in the DJVM so it can't use caffeine.
|
|
||||||
private val cache: MutableMap<Key, SerializationContext> = createSimpleCache<Key, SerializationContext>(CACHE_SIZE).toSynchronised()
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Runs the given block with serialization execution context set up with a (possibly cached) attachments classloader.
|
* Runs the given block with serialization execution context set up with a (possibly cached) attachments classloader.
|
||||||
*
|
*
|
||||||
* @param txId The transaction ID that triggered this request; it's unused except for error messages and exceptions that can occur during setup.
|
* @param txId The transaction ID that triggered this request; it's unused except for error messages and exceptions that can occur during setup.
|
||||||
*/
|
*/
|
||||||
|
@Suppress("LongParameterList")
|
||||||
fun <T> withAttachmentsClassloaderContext(attachments: List<Attachment>,
|
fun <T> withAttachmentsClassloaderContext(attachments: List<Attachment>,
|
||||||
params: NetworkParameters,
|
params: NetworkParameters,
|
||||||
txId: SecureHash,
|
txId: SecureHash,
|
||||||
isAttachmentTrusted: (Attachment) -> Boolean,
|
isAttachmentTrusted: (Attachment) -> Boolean,
|
||||||
parent: ClassLoader = ClassLoader.getSystemClassLoader(),
|
parent: ClassLoader = ClassLoader.getSystemClassLoader(),
|
||||||
|
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?,
|
||||||
block: (ClassLoader) -> T): T {
|
block: (ClassLoader) -> T): T {
|
||||||
val attachmentIds = attachments.map(Attachment::id).toSet()
|
val attachmentIds = attachments.map(Attachment::id).toSet()
|
||||||
|
|
||||||
val serializationContext = cache.computeIfAbsent(Key(attachmentIds, params)) {
|
val cache = attachmentsClassLoaderCache ?: fallBackCache
|
||||||
|
val serializationContext = cache.computeIfAbsent(AttachmentsClassLoaderKey(attachmentIds, params), Function {
|
||||||
// Create classloader and load serializers, whitelisted classes
|
// Create classloader and load serializers, whitelisted classes
|
||||||
val transactionClassLoader = AttachmentsClassLoader(attachments, params, txId, isAttachmentTrusted, parent)
|
val transactionClassLoader = AttachmentsClassLoader(attachments, params, txId, isAttachmentTrusted, parent)
|
||||||
val serializers = try {
|
val serializers = try {
|
||||||
@ -336,7 +336,7 @@ object AttachmentsClassLoaderBuilder {
|
|||||||
.withWhitelist(whitelistedClasses)
|
.withWhitelist(whitelistedClasses)
|
||||||
.withCustomSerializers(serializers)
|
.withCustomSerializers(serializers)
|
||||||
.withoutCarpenter()
|
.withoutCarpenter()
|
||||||
}
|
})
|
||||||
|
|
||||||
// Deserialize all relevant classes in the transaction classloader.
|
// Deserialize all relevant classes in the transaction classloader.
|
||||||
return SerializationFactory.defaultFactory.withCurrentContext(serializationContext) {
|
return SerializationFactory.defaultFactory.withCurrentContext(serializationContext) {
|
||||||
@ -420,6 +420,36 @@ private class AttachmentsHolderImpl : AttachmentsHolder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
interface AttachmentsClassLoaderCache {
|
||||||
|
fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext
|
||||||
|
}
|
||||||
|
|
||||||
|
@DeleteForDJVM
|
||||||
|
class AttachmentsClassLoaderCacheImpl(cacheFactory: NamedCacheFactory) : SingletonSerializeAsToken(), AttachmentsClassLoaderCache {
|
||||||
|
|
||||||
|
private val cache: Cache<AttachmentsClassLoaderKey, SerializationContext> = cacheFactory.buildNamed(Caffeine.newBuilder(), "AttachmentsClassLoader_cache")
|
||||||
|
|
||||||
|
override fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext {
|
||||||
|
return cache.get(key, mappingFunction) ?: throw NullPointerException("null returned from cache mapping function")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class AttachmentsClassLoaderSimpleCacheImpl(cacheSize: Int) : AttachmentsClassLoaderCache {
|
||||||
|
|
||||||
|
private val cache: MutableMap<AttachmentsClassLoaderKey, SerializationContext>
|
||||||
|
= createSimpleCache<AttachmentsClassLoaderKey, SerializationContext>(cacheSize).toSynchronised()
|
||||||
|
|
||||||
|
override fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext {
|
||||||
|
return cache.computeIfAbsent(key, mappingFunction)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We use a set here because the ordering of attachments doesn't affect code execution, due to the no
|
||||||
|
// overlap rule, and attachments don't have any particular ordering enforced by the builders. So we
|
||||||
|
// can just do unordered comparisons here. But the same attachments run with different network parameters
|
||||||
|
// may behave differently, so that has to be a part of the cache key.
|
||||||
|
data class AttachmentsClassLoaderKey(val hashes: Set<SecureHash>, val params: NetworkParameters)
|
||||||
|
|
||||||
private class AttachmentURLConnection(url: URL, private val attachment: Attachment) : URLConnection(url) {
|
private class AttachmentURLConnection(url: URL, private val attachment: Attachment) : URLConnection(url) {
|
||||||
override fun getContentLengthLong(): Long = attachment.size.toLong()
|
override fun getContentLengthLong(): Long = attachment.size.toLong()
|
||||||
override fun getInputStream(): InputStream = attachment.open()
|
override fun getInputStream(): InputStream = attachment.open()
|
||||||
|
@ -56,6 +56,10 @@ interface CheckpointSerializationContext {
|
|||||||
* otherwise they appear as new copies of the object.
|
* otherwise they appear as new copies of the object.
|
||||||
*/
|
*/
|
||||||
val objectReferencesEnabled: Boolean
|
val objectReferencesEnabled: Boolean
|
||||||
|
/**
|
||||||
|
* User defined custom serializers for use in checkpoint serialization.
|
||||||
|
*/
|
||||||
|
val checkpointCustomSerializers: Iterable<CheckpointCustomSerializer<*,*>>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper method to return a new context based on this context with the property added.
|
* Helper method to return a new context based on this context with the property added.
|
||||||
@ -86,6 +90,11 @@ interface CheckpointSerializationContext {
|
|||||||
* A shallow copy of this context but with the given encoding whitelist.
|
* A shallow copy of this context but with the given encoding whitelist.
|
||||||
*/
|
*/
|
||||||
fun withEncodingWhitelist(encodingWhitelist: EncodingWhitelist): CheckpointSerializationContext
|
fun withEncodingWhitelist(encodingWhitelist: EncodingWhitelist): CheckpointSerializationContext
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A shallow copy of this context but with the given custom serializers.
|
||||||
|
*/
|
||||||
|
fun withCheckpointCustomSerializers(checkpointCustomSerializers: Iterable<CheckpointCustomSerializer<*, *>>): CheckpointSerializationContext
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -153,7 +153,8 @@ data class ContractUpgradeWireTransaction(
|
|||||||
listOf(legacyAttachment, upgradedAttachment),
|
listOf(legacyAttachment, upgradedAttachment),
|
||||||
params,
|
params,
|
||||||
id,
|
id,
|
||||||
{ (services as ServiceHubCoreInternal).attachmentTrustCalculator.calculate(it) }) { transactionClassLoader ->
|
{ (services as ServiceHubCoreInternal).attachmentTrustCalculator.calculate(it) },
|
||||||
|
attachmentsClassLoaderCache = (services as ServiceHubCoreInternal).attachmentsClassLoaderCache) { transactionClassLoader ->
|
||||||
val resolvedInput = binaryInput.deserialize()
|
val resolvedInput = binaryInput.deserialize()
|
||||||
val upgradedContract = upgradedContract(upgradedContractClassName, transactionClassLoader)
|
val upgradedContract = upgradedContract(upgradedContractClassName, transactionClassLoader)
|
||||||
val outputState = calculateUpgradedState(resolvedInput, upgradedContract, upgradedAttachment)
|
val outputState = calculateUpgradedState(resolvedInput, upgradedContract, upgradedAttachment)
|
||||||
|
@ -26,6 +26,7 @@ import net.corda.core.internal.deserialiseComponentGroup
|
|||||||
import net.corda.core.internal.isUploaderTrusted
|
import net.corda.core.internal.isUploaderTrusted
|
||||||
import net.corda.core.internal.uncheckedCast
|
import net.corda.core.internal.uncheckedCast
|
||||||
import net.corda.core.node.NetworkParameters
|
import net.corda.core.node.NetworkParameters
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
|
||||||
import net.corda.core.serialization.internal.AttachmentsClassLoaderBuilder
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderBuilder
|
||||||
import net.corda.core.utilities.contextLogger
|
import net.corda.core.utilities.contextLogger
|
||||||
import java.util.Collections.unmodifiableList
|
import java.util.Collections.unmodifiableList
|
||||||
@ -87,7 +88,8 @@ private constructor(
|
|||||||
private val serializedInputs: List<SerializedStateAndRef>?,
|
private val serializedInputs: List<SerializedStateAndRef>?,
|
||||||
private val serializedReferences: List<SerializedStateAndRef>?,
|
private val serializedReferences: List<SerializedStateAndRef>?,
|
||||||
private val isAttachmentTrusted: (Attachment) -> Boolean,
|
private val isAttachmentTrusted: (Attachment) -> Boolean,
|
||||||
private val verifierFactory: (LedgerTransaction, ClassLoader) -> Verifier
|
private val verifierFactory: (LedgerTransaction, ClassLoader) -> Verifier,
|
||||||
|
private val attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
|
||||||
) : FullTransaction() {
|
) : FullTransaction() {
|
||||||
|
|
||||||
init {
|
init {
|
||||||
@ -124,7 +126,8 @@ private constructor(
|
|||||||
componentGroups: List<ComponentGroup>? = null,
|
componentGroups: List<ComponentGroup>? = null,
|
||||||
serializedInputs: List<SerializedStateAndRef>? = null,
|
serializedInputs: List<SerializedStateAndRef>? = null,
|
||||||
serializedReferences: List<SerializedStateAndRef>? = null,
|
serializedReferences: List<SerializedStateAndRef>? = null,
|
||||||
isAttachmentTrusted: (Attachment) -> Boolean
|
isAttachmentTrusted: (Attachment) -> Boolean,
|
||||||
|
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
|
||||||
): LedgerTransaction {
|
): LedgerTransaction {
|
||||||
return LedgerTransaction(
|
return LedgerTransaction(
|
||||||
inputs = inputs,
|
inputs = inputs,
|
||||||
@ -141,7 +144,8 @@ private constructor(
|
|||||||
serializedInputs = protect(serializedInputs),
|
serializedInputs = protect(serializedInputs),
|
||||||
serializedReferences = protect(serializedReferences),
|
serializedReferences = protect(serializedReferences),
|
||||||
isAttachmentTrusted = isAttachmentTrusted,
|
isAttachmentTrusted = isAttachmentTrusted,
|
||||||
verifierFactory = ::BasicVerifier
|
verifierFactory = ::BasicVerifier,
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,7 +180,8 @@ private constructor(
|
|||||||
serializedInputs = null,
|
serializedInputs = null,
|
||||||
serializedReferences = null,
|
serializedReferences = null,
|
||||||
isAttachmentTrusted = { true },
|
isAttachmentTrusted = { true },
|
||||||
verifierFactory = ::BasicVerifier
|
verifierFactory = ::BasicVerifier,
|
||||||
|
attachmentsClassLoaderCache = null
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -218,7 +223,8 @@ private constructor(
|
|||||||
txAttachments,
|
txAttachments,
|
||||||
getParamsWithGoo(),
|
getParamsWithGoo(),
|
||||||
id,
|
id,
|
||||||
isAttachmentTrusted = isAttachmentTrusted) { transactionClassLoader ->
|
isAttachmentTrusted = isAttachmentTrusted,
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache) { transactionClassLoader ->
|
||||||
// Create a copy of the outer LedgerTransaction which deserializes all fields inside the [transactionClassLoader].
|
// Create a copy of the outer LedgerTransaction which deserializes all fields inside the [transactionClassLoader].
|
||||||
// Only the copy will be used for verification, and the outer shell will be discarded.
|
// Only the copy will be used for verification, and the outer shell will be discarded.
|
||||||
// This artifice is required to preserve backwards compatibility.
|
// This artifice is required to preserve backwards compatibility.
|
||||||
@ -254,7 +260,8 @@ private constructor(
|
|||||||
serializedInputs = serializedInputs,
|
serializedInputs = serializedInputs,
|
||||||
serializedReferences = serializedReferences,
|
serializedReferences = serializedReferences,
|
||||||
isAttachmentTrusted = isAttachmentTrusted,
|
isAttachmentTrusted = isAttachmentTrusted,
|
||||||
verifierFactory = alternateVerifier
|
verifierFactory = alternateVerifier,
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
|
|
||||||
// Read network parameters with backwards compatibility goo.
|
// Read network parameters with backwards compatibility goo.
|
||||||
@ -320,7 +327,8 @@ private constructor(
|
|||||||
serializedInputs = serializedInputs,
|
serializedInputs = serializedInputs,
|
||||||
serializedReferences = serializedReferences,
|
serializedReferences = serializedReferences,
|
||||||
isAttachmentTrusted = isAttachmentTrusted,
|
isAttachmentTrusted = isAttachmentTrusted,
|
||||||
verifierFactory = verifierFactory
|
verifierFactory = verifierFactory,
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
// This branch is only present for backwards compatibility.
|
// This branch is only present for backwards compatibility.
|
||||||
@ -704,7 +712,8 @@ private constructor(
|
|||||||
serializedInputs = null,
|
serializedInputs = null,
|
||||||
serializedReferences = null,
|
serializedReferences = null,
|
||||||
isAttachmentTrusted = { it.isUploaderTrusted() },
|
isAttachmentTrusted = { it.isUploaderTrusted() },
|
||||||
verifierFactory = ::BasicVerifier
|
verifierFactory = ::BasicVerifier,
|
||||||
|
attachmentsClassLoaderCache = null
|
||||||
)
|
)
|
||||||
|
|
||||||
@Deprecated("LedgerTransaction should not be created directly, use WireTransaction.toLedgerTransaction instead.")
|
@Deprecated("LedgerTransaction should not be created directly, use WireTransaction.toLedgerTransaction instead.")
|
||||||
@ -733,7 +742,8 @@ private constructor(
|
|||||||
serializedInputs = null,
|
serializedInputs = null,
|
||||||
serializedReferences = null,
|
serializedReferences = null,
|
||||||
isAttachmentTrusted = { it.isUploaderTrusted() },
|
isAttachmentTrusted = { it.isUploaderTrusted() },
|
||||||
verifierFactory = ::BasicVerifier
|
verifierFactory = ::BasicVerifier,
|
||||||
|
attachmentsClassLoaderCache = null
|
||||||
)
|
)
|
||||||
|
|
||||||
@Deprecated("LedgerTransactions should not be created directly, use WireTransaction.toLedgerTransaction instead.")
|
@Deprecated("LedgerTransactions should not be created directly, use WireTransaction.toLedgerTransaction instead.")
|
||||||
@ -761,7 +771,8 @@ private constructor(
|
|||||||
serializedInputs = serializedInputs,
|
serializedInputs = serializedInputs,
|
||||||
serializedReferences = serializedReferences,
|
serializedReferences = serializedReferences,
|
||||||
isAttachmentTrusted = isAttachmentTrusted,
|
isAttachmentTrusted = isAttachmentTrusted,
|
||||||
verifierFactory = verifierFactory
|
verifierFactory = verifierFactory,
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -791,7 +802,8 @@ private constructor(
|
|||||||
serializedInputs = serializedInputs,
|
serializedInputs = serializedInputs,
|
||||||
serializedReferences = serializedReferences,
|
serializedReferences = serializedReferences,
|
||||||
isAttachmentTrusted = isAttachmentTrusted,
|
isAttachmentTrusted = isAttachmentTrusted,
|
||||||
verifierFactory = verifierFactory
|
verifierFactory = verifierFactory,
|
||||||
|
attachmentsClassLoaderCache = attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@ import net.corda.core.node.ServicesForResolution
|
|||||||
import net.corda.core.node.services.AttachmentId
|
import net.corda.core.node.services.AttachmentId
|
||||||
import net.corda.core.serialization.CordaSerializable
|
import net.corda.core.serialization.CordaSerializable
|
||||||
import net.corda.core.serialization.SerializedBytes
|
import net.corda.core.serialization.SerializedBytes
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
|
||||||
import net.corda.core.serialization.serialize
|
import net.corda.core.serialization.serialize
|
||||||
import net.corda.core.utilities.OpaqueBytes
|
import net.corda.core.utilities.OpaqueBytes
|
||||||
import java.security.PublicKey
|
import java.security.PublicKey
|
||||||
@ -109,7 +110,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
|
|||||||
services.networkParametersService.lookup(hashToResolve)
|
services.networkParametersService.lookup(hashToResolve)
|
||||||
},
|
},
|
||||||
// `as?` is used due to [MockServices] not implementing [ServiceHubCoreInternal]
|
// `as?` is used due to [MockServices] not implementing [ServiceHubCoreInternal]
|
||||||
isAttachmentTrusted = { (services as? ServiceHubCoreInternal)?.attachmentTrustCalculator?.calculate(it) ?: true }
|
isAttachmentTrusted = { (services as? ServiceHubCoreInternal)?.attachmentTrustCalculator?.calculate(it) ?: true },
|
||||||
|
attachmentsClassLoaderCache = (services as? ServiceHubCoreInternal)?.attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -145,7 +147,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
|
|||||||
resolveAttachment,
|
resolveAttachment,
|
||||||
{ stateRef -> resolveStateRef(stateRef)?.serialize() },
|
{ stateRef -> resolveStateRef(stateRef)?.serialize() },
|
||||||
{ null },
|
{ null },
|
||||||
{ it.isUploaderTrusted() }
|
{ it.isUploaderTrusted() },
|
||||||
|
null
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,16 +164,19 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
|
|||||||
resolveAttachment,
|
resolveAttachment,
|
||||||
{ stateRef -> resolveStateRef(stateRef)?.serialize() },
|
{ stateRef -> resolveStateRef(stateRef)?.serialize() },
|
||||||
resolveParameters,
|
resolveParameters,
|
||||||
{ true } // Any attachment loaded through the DJVM should be trusted
|
{ true }, // Any attachment loaded through the DJVM should be trusted
|
||||||
|
null
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Suppress("LongParameterList", "ThrowsCount")
|
||||||
private fun toLedgerTransactionInternal(
|
private fun toLedgerTransactionInternal(
|
||||||
resolveIdentity: (PublicKey) -> Party?,
|
resolveIdentity: (PublicKey) -> Party?,
|
||||||
resolveAttachment: (SecureHash) -> Attachment?,
|
resolveAttachment: (SecureHash) -> Attachment?,
|
||||||
resolveStateRefAsSerialized: (StateRef) -> SerializedBytes<TransactionState<ContractState>>?,
|
resolveStateRefAsSerialized: (StateRef) -> SerializedBytes<TransactionState<ContractState>>?,
|
||||||
resolveParameters: (SecureHash?) -> NetworkParameters?,
|
resolveParameters: (SecureHash?) -> NetworkParameters?,
|
||||||
isAttachmentTrusted: (Attachment) -> Boolean
|
isAttachmentTrusted: (Attachment) -> Boolean,
|
||||||
|
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
|
||||||
): LedgerTransaction {
|
): LedgerTransaction {
|
||||||
// Look up public keys to authenticated identities.
|
// Look up public keys to authenticated identities.
|
||||||
val authenticatedCommands = commands.lazyMapped { cmd, _ ->
|
val authenticatedCommands = commands.lazyMapped { cmd, _ ->
|
||||||
@ -206,7 +212,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
|
|||||||
componentGroups,
|
componentGroups,
|
||||||
serializedResolvedInputs,
|
serializedResolvedInputs,
|
||||||
serializedResolvedReferences,
|
serializedResolvedReferences,
|
||||||
isAttachmentTrusted
|
isAttachmentTrusted,
|
||||||
|
attachmentsClassLoaderCache
|
||||||
)
|
)
|
||||||
|
|
||||||
checkTransactionSize(ltx, resolvedNetworkParameters.maxTransactionSize, serializedResolvedInputs, serializedResolvedReferences)
|
checkTransactionSize(ltx, resolvedNetworkParameters.maxTransactionSize, serializedResolvedInputs, serializedResolvedReferences)
|
||||||
|
@ -4,6 +4,7 @@ import net.corda.core.contracts.*
|
|||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.node.NetworkParameters
|
import net.corda.core.node.NetworkParameters
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
|
||||||
import net.corda.core.transactions.ComponentGroup
|
import net.corda.core.transactions.ComponentGroup
|
||||||
import net.corda.core.transactions.LedgerTransaction
|
import net.corda.core.transactions.LedgerTransaction
|
||||||
import net.corda.core.transactions.WireTransaction
|
import net.corda.core.transactions.WireTransaction
|
||||||
@ -17,6 +18,7 @@ fun WireTransaction.accessGroupHashes() = this.groupHashes
|
|||||||
fun WireTransaction.accessGroupMerkleRoots() = this.groupsMerkleRoots
|
fun WireTransaction.accessGroupMerkleRoots() = this.groupsMerkleRoots
|
||||||
fun WireTransaction.accessAvailableComponentHashes() = this.availableComponentHashes
|
fun WireTransaction.accessAvailableComponentHashes() = this.availableComponentHashes
|
||||||
|
|
||||||
|
@Suppress("LongParameterList")
|
||||||
fun createLedgerTransaction(
|
fun createLedgerTransaction(
|
||||||
inputs: List<StateAndRef<ContractState>>,
|
inputs: List<StateAndRef<ContractState>>,
|
||||||
outputs: List<TransactionState<ContractState>>,
|
outputs: List<TransactionState<ContractState>>,
|
||||||
@ -31,8 +33,9 @@ fun createLedgerTransaction(
|
|||||||
componentGroups: List<ComponentGroup>? = null,
|
componentGroups: List<ComponentGroup>? = null,
|
||||||
serializedInputs: List<SerializedStateAndRef>? = null,
|
serializedInputs: List<SerializedStateAndRef>? = null,
|
||||||
serializedReferences: List<SerializedStateAndRef>? = null,
|
serializedReferences: List<SerializedStateAndRef>? = null,
|
||||||
isAttachmentTrusted: (Attachment) -> Boolean
|
isAttachmentTrusted: (Attachment) -> Boolean,
|
||||||
): LedgerTransaction = LedgerTransaction.create(inputs, outputs, commands, attachments, id, notary, timeWindow, privacySalt, networkParameters, references, componentGroups, serializedInputs, serializedReferences, isAttachmentTrusted)
|
attachmentsClassLoaderCache: AttachmentsClassLoaderCache
|
||||||
|
): LedgerTransaction = LedgerTransaction.create(inputs, outputs, commands, attachments, id, notary, timeWindow, privacySalt, networkParameters, references, componentGroups, serializedInputs, serializedReferences, isAttachmentTrusted, attachmentsClassLoaderCache)
|
||||||
|
|
||||||
fun createContractCreationError(txId: SecureHash, contractClass: String, cause: Throwable) = TransactionVerificationException.ContractCreationError(txId, contractClass, cause)
|
fun createContractCreationError(txId: SecureHash, contractClass: String, cause: Throwable) = TransactionVerificationException.ContractCreationError(txId, contractClass, cause)
|
||||||
fun createContractRejection(txId: SecureHash, contract: Contract, cause: Throwable) = TransactionVerificationException.ContractRejection(txId, contract, cause)
|
fun createContractRejection(txId: SecureHash, contract: Contract, cause: Throwable) = TransactionVerificationException.ContractRejection(txId, contract, cause)
|
||||||
|
@ -1398,7 +1398,7 @@
|
|||||||
<ID>ThrowsCount:JarScanningCordappLoader.kt$JarScanningCordappLoader$private fun parseVersion(versionStr: String?, attributeName: String): Int</ID>
|
<ID>ThrowsCount:JarScanningCordappLoader.kt$JarScanningCordappLoader$private fun parseVersion(versionStr: String?, attributeName: String): Int</ID>
|
||||||
<ID>ThrowsCount:LedgerDSLInterpreter.kt$Verifies$ fun failsWith(expectedMessage: String?): EnforceVerifyOrFail</ID>
|
<ID>ThrowsCount:LedgerDSLInterpreter.kt$Verifies$ fun failsWith(expectedMessage: String?): EnforceVerifyOrFail</ID>
|
||||||
<ID>ThrowsCount:MockServices.kt$ fun <T : SerializeAsToken> createMockCordaService(serviceHub: MockServices, serviceConstructor: (AppServiceHub) -> T): T</ID>
|
<ID>ThrowsCount:MockServices.kt$ fun <T : SerializeAsToken> createMockCordaService(serviceHub: MockServices, serviceConstructor: (AppServiceHub) -> T): T</ID>
|
||||||
<ID>ThrowsCount:NetworkRegistrationHelper.kt$NetworkRegistrationHelper$private fun validateCertificates(registeringPublicKey: PublicKey, certificates: List<X509Certificate>)</ID>
|
<ID>ThrowsCount:NetworkRegistrationHelper.kt$NetworkRegistrationHelper$private fun validateCertificates( registeringPublicKey: PublicKey, registeringLegalName: CordaX500Name, expectedCertRole: CertRole, certificates: List<X509Certificate> )</ID>
|
||||||
<ID>ThrowsCount:NodeInfoFilesCopier.kt$NodeInfoFilesCopier$private fun atomicCopy(source: Path, destination: Path)</ID>
|
<ID>ThrowsCount:NodeInfoFilesCopier.kt$NodeInfoFilesCopier$private fun atomicCopy(source: Path, destination: Path)</ID>
|
||||||
<ID>ThrowsCount:NodeVaultService.kt$NodeVaultService$@Throws(VaultQueryException::class) private fun <T : ContractState> _queryBy(criteria: QueryCriteria, paging_: PageSpecification, sorting: Sort, contractStateType: Class<out T>, skipPagingChecks: Boolean): Vault.Page<T></ID>
|
<ID>ThrowsCount:NodeVaultService.kt$NodeVaultService$@Throws(VaultQueryException::class) private fun <T : ContractState> _queryBy(criteria: QueryCriteria, paging_: PageSpecification, sorting: Sort, contractStateType: Class<out T>, skipPagingChecks: Boolean): Vault.Page<T></ID>
|
||||||
<ID>ThrowsCount:NodeVaultService.kt$NodeVaultService$private fun makeUpdates(batch: Iterable<CoreTransaction>, statesToRecord: StatesToRecord, previouslySeen: Boolean): List<Vault.Update<ContractState>></ID>
|
<ID>ThrowsCount:NodeVaultService.kt$NodeVaultService$private fun makeUpdates(batch: Iterable<CoreTransaction>, statesToRecord: StatesToRecord, previouslySeen: Boolean): List<Vault.Update<ContractState>></ID>
|
||||||
|
@ -11,7 +11,7 @@ evaluationDependsOn(':jdk8u-deterministic')
|
|||||||
def jdk8uDeterministic = project(':jdk8u-deterministic')
|
def jdk8uDeterministic = project(':jdk8u-deterministic')
|
||||||
|
|
||||||
ext {
|
ext {
|
||||||
jdkTask = jdk8uDeterministic.assemble
|
jdkTask = jdk8uDeterministic.tasks.named('assemble')
|
||||||
deterministic_jdk_home = jdk8uDeterministic.jdk_home
|
deterministic_jdk_home = jdk8uDeterministic.jdk_home
|
||||||
deterministic_rt_jar = jdk8uDeterministic.rt_jar
|
deterministic_rt_jar = jdk8uDeterministic.rt_jar
|
||||||
}
|
}
|
||||||
|
122
docs/build.gradle
Normal file
122
docs/build.gradle
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
import org.apache.tools.ant.taskdefs.condition.Os
|
||||||
|
|
||||||
|
apply plugin: 'org.jetbrains.dokka'
|
||||||
|
apply plugin: 'net.corda.plugins.publish-utils'
|
||||||
|
apply plugin: 'maven-publish'
|
||||||
|
apply plugin: 'com.jfrog.artifactory'
|
||||||
|
|
||||||
|
def internalPackagePrefixes(sourceDirs) {
|
||||||
|
def prefixes = []
|
||||||
|
// Kotlin allows packages to deviate from the directory structure, but let's assume they don't:
|
||||||
|
sourceDirs.collect { sourceDir ->
|
||||||
|
sourceDir.traverse(type: groovy.io.FileType.DIRECTORIES) {
|
||||||
|
if (it.name == 'internal') {
|
||||||
|
prefixes.add sourceDir.toPath().relativize(it.toPath()).toString().replace(File.separator, '.')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prefixes
|
||||||
|
}
|
||||||
|
|
||||||
|
ext {
|
||||||
|
// TODO: Add '../client/jfx/src/main/kotlin' and '../client/mock/src/main/kotlin' if we decide to make them into public API
|
||||||
|
dokkaSourceDirs = files('../core/src/main/kotlin', '../client/rpc/src/main/kotlin', '../finance/workflows/src/main/kotlin', '../finance/contracts/src/main/kotlin', '../client/jackson/src/main/kotlin',
|
||||||
|
'../testing/test-utils/src/main/kotlin', '../testing/node-driver/src/main/kotlin')
|
||||||
|
internalPackagePrefixes = internalPackagePrefixes(dokkaSourceDirs)
|
||||||
|
archivedApiDocsBaseFilename = 'api-docs'
|
||||||
|
}
|
||||||
|
|
||||||
|
dokka {
|
||||||
|
outputDirectory = file("${rootProject.rootDir}/docs/build/html/api/kotlin")
|
||||||
|
}
|
||||||
|
|
||||||
|
task dokkaJavadoc(type: org.jetbrains.dokka.gradle.DokkaTask) {
|
||||||
|
outputFormat = "javadoc"
|
||||||
|
outputDirectory = file("${rootProject.rootDir}/docs/build/html/api/javadoc")
|
||||||
|
}
|
||||||
|
|
||||||
|
[dokka, dokkaJavadoc].collect {
|
||||||
|
it.configure {
|
||||||
|
moduleName = 'corda'
|
||||||
|
processConfigurations = ['compile']
|
||||||
|
sourceDirs = dokkaSourceDirs
|
||||||
|
includes = ['packages.md']
|
||||||
|
jdkVersion = 8
|
||||||
|
externalDocumentationLink {
|
||||||
|
url = new URL("http://fasterxml.github.io/jackson-core/javadoc/2.9/")
|
||||||
|
}
|
||||||
|
externalDocumentationLink {
|
||||||
|
url = new URL("https://docs.oracle.com/javafx/2/api/")
|
||||||
|
}
|
||||||
|
externalDocumentationLink {
|
||||||
|
url = new URL("http://www.bouncycastle.org/docs/docs1.5on/")
|
||||||
|
}
|
||||||
|
internalPackagePrefixes.collect { packagePrefix ->
|
||||||
|
packageOptions {
|
||||||
|
prefix = packagePrefix
|
||||||
|
suppress = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task apidocs(dependsOn: ['dokka', 'dokkaJavadoc']) {
|
||||||
|
group "Documentation"
|
||||||
|
description "Build API documentation"
|
||||||
|
}
|
||||||
|
|
||||||
|
task makeHTMLDocs(type: Exec){
|
||||||
|
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||||
|
commandLine "docker", "run", "--rm", "-v", "${project.projectDir}:/opt/docs_builder", "-v", "${project.projectDir}/..:/opt", "corda/docs-builder:latest", "bash", "-c", "make-docsite-html.sh"
|
||||||
|
} else {
|
||||||
|
commandLine "bash", "-c", "docker run --rm --user \$(id -u):\$(id -g) -v ${project.projectDir}:/opt/docs_builder -v ${project.projectDir}/..:/opt corda/docs-builder:latest bash -c make-docsite-html.sh"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task makePDFDocs(type: Exec){
|
||||||
|
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||||
|
commandLine "docker", "run", "--rm", "-v", "${project.projectDir}:/opt/docs_builder", "-v", "${project.projectDir}/..:/opt", "corda/docs-builder:latest", "bash", "-c", "make-docsite-pdf.sh"
|
||||||
|
} else {
|
||||||
|
commandLine "bash", "-c", "docker run --rm --user \$(id -u):\$(id -g) -v ${project.projectDir}:/opt/docs_builder -v ${project.projectDir}/..:/opt corda/docs-builder:latest bash -c make-docsite-pdf.sh"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task makeDocs(dependsOn: ['makeHTMLDocs', 'makePDFDocs'])
|
||||||
|
apidocs.shouldRunAfter makeDocs
|
||||||
|
|
||||||
|
task archiveApiDocs(type: Tar) {
|
||||||
|
dependsOn apidocs
|
||||||
|
from buildDir
|
||||||
|
include 'html/**'
|
||||||
|
extension 'tgz'
|
||||||
|
compression Compression.GZIP
|
||||||
|
}
|
||||||
|
|
||||||
|
publishing {
|
||||||
|
publications {
|
||||||
|
if (System.getProperty('publishApiDocs') != null) {
|
||||||
|
archivedApiDocs(MavenPublication) {
|
||||||
|
artifact archiveApiDocs {
|
||||||
|
artifactId archivedApiDocsBaseFilename
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
artifactoryPublish {
|
||||||
|
publications('archivedApiDocs')
|
||||||
|
version = version.replaceAll('-SNAPSHOT', '')
|
||||||
|
publishPom = false
|
||||||
|
}
|
||||||
|
|
||||||
|
artifactory {
|
||||||
|
publish {
|
||||||
|
contextUrl = artifactory_contextUrl
|
||||||
|
repository {
|
||||||
|
repoKey = 'corda-dependencies-dev'
|
||||||
|
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
|
||||||
|
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -37,7 +37,9 @@ def copyJdk = tasks.register('copyJdk', Copy) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assemble.dependsOn copyJdk
|
tasks.named('assemble') {
|
||||||
|
dependsOn copyJdk
|
||||||
|
}
|
||||||
tasks.named('jar', Jar) {
|
tasks.named('jar', Jar) {
|
||||||
enabled = false
|
enabled = false
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@ import net.corda.core.crypto.internal.Instances
|
|||||||
import org.bouncycastle.asn1.x509.AlgorithmIdentifier
|
import org.bouncycastle.asn1.x509.AlgorithmIdentifier
|
||||||
import org.bouncycastle.operator.ContentSigner
|
import org.bouncycastle.operator.ContentSigner
|
||||||
import java.io.OutputStream
|
import java.io.OutputStream
|
||||||
|
import java.security.InvalidKeyException
|
||||||
import java.security.PrivateKey
|
import java.security.PrivateKey
|
||||||
import java.security.Provider
|
import java.security.Provider
|
||||||
import java.security.SecureRandom
|
import java.security.SecureRandom
|
||||||
@ -24,14 +25,18 @@ object ContentSignerBuilder {
|
|||||||
else
|
else
|
||||||
Signature.getInstance(signatureScheme.signatureName, provider)
|
Signature.getInstance(signatureScheme.signatureName, provider)
|
||||||
|
|
||||||
val sig = signatureInstance.apply {
|
val sig = try {
|
||||||
// TODO special handling for Sphincs due to a known BouncyCastle's Sphincs bug we reported.
|
signatureInstance.apply {
|
||||||
// It is fixed in BC 161b12, so consider updating the below if-statement after updating BouncyCastle.
|
// TODO special handling for Sphincs due to a known BouncyCastle's Sphincs bug we reported.
|
||||||
if (random != null && signatureScheme != SPHINCS256_SHA256) {
|
// It is fixed in BC 161b12, so consider updating the below if-statement after updating BouncyCastle.
|
||||||
initSign(privateKey, random)
|
if (random != null && signatureScheme != SPHINCS256_SHA256) {
|
||||||
} else {
|
initSign(privateKey, random)
|
||||||
initSign(privateKey)
|
} else {
|
||||||
|
initSign(privateKey)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} catch(ex: InvalidKeyException) {
|
||||||
|
throw InvalidKeyException("Incorrect key type ${privateKey.algorithm} for signature scheme ${signatureInstance.algorithm}", ex)
|
||||||
}
|
}
|
||||||
return object : ContentSigner {
|
return object : ContentSigner {
|
||||||
private val stream = SignatureOutputStream(sig, optimised)
|
private val stream = SignatureOutputStream(sig, optimised)
|
||||||
|
@ -0,0 +1,103 @@
|
|||||||
|
package net.corda.nodeapi.internal.serialization.kryo
|
||||||
|
|
||||||
|
import com.esotericsoftware.kryo.Kryo
|
||||||
|
import com.esotericsoftware.kryo.Serializer
|
||||||
|
import com.esotericsoftware.kryo.io.Input
|
||||||
|
import com.esotericsoftware.kryo.io.Output
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
|
import net.corda.serialization.internal.amqp.CORDAPP_TYPE
|
||||||
|
import java.lang.reflect.Type
|
||||||
|
import kotlin.reflect.jvm.javaType
|
||||||
|
import kotlin.reflect.jvm.jvmErasure
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adapts CheckpointCustomSerializer for use in Kryo
|
||||||
|
*/
|
||||||
|
internal class CustomSerializerCheckpointAdaptor<OBJ, PROXY>(private val userSerializer : CheckpointCustomSerializer<OBJ, PROXY>) : Serializer<OBJ>() {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The class name of the serializer we are adapting.
|
||||||
|
*/
|
||||||
|
val serializerName: String = userSerializer.javaClass.name
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The input type of this custom serializer.
|
||||||
|
*/
|
||||||
|
val cordappType: Type
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check we have access to the types specified on the CheckpointCustomSerializer interface.
|
||||||
|
*
|
||||||
|
* Throws UnableToDetermineSerializerTypesException if the types are missing.
|
||||||
|
*/
|
||||||
|
init {
|
||||||
|
val types: List<Type> = userSerializer::class
|
||||||
|
.supertypes
|
||||||
|
.filter { it.jvmErasure == CheckpointCustomSerializer::class }
|
||||||
|
.flatMap { it.arguments }
|
||||||
|
.mapNotNull { it.type?.javaType }
|
||||||
|
|
||||||
|
// We are expecting a cordapp type and a proxy type.
|
||||||
|
// We will only use the cordapp type in this class
|
||||||
|
// but we want to check both are present.
|
||||||
|
val typeParameterCount = 2
|
||||||
|
if (types.size != typeParameterCount) {
|
||||||
|
throw UnableToDetermineSerializerTypesException("Unable to determine serializer parent types")
|
||||||
|
}
|
||||||
|
cordappType = types[CORDAPP_TYPE]
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Serialize obj to the Kryo stream.
|
||||||
|
*/
|
||||||
|
override fun write(kryo: Kryo, output: Output, obj: OBJ) {
|
||||||
|
|
||||||
|
fun <T> writeToKryo(obj: T) = kryo.writeClassAndObject(output, obj)
|
||||||
|
|
||||||
|
// Write serializer type
|
||||||
|
writeToKryo(serializerName)
|
||||||
|
|
||||||
|
// Write proxy object
|
||||||
|
writeToKryo(userSerializer.toProxy(obj))
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deserialize an object from the Kryo stream.
|
||||||
|
*/
|
||||||
|
override fun read(kryo: Kryo, input: Input, type: Class<OBJ>): OBJ {
|
||||||
|
|
||||||
|
@Suppress("UNCHECKED_CAST")
|
||||||
|
fun <T> readFromKryo() = kryo.readClassAndObject(input) as T
|
||||||
|
|
||||||
|
// Check the serializer type
|
||||||
|
checkSerializerType(readFromKryo())
|
||||||
|
|
||||||
|
// Read the proxy object
|
||||||
|
return userSerializer.fromProxy(readFromKryo())
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Throws a `CustomCheckpointSerializersHaveChangedException` if the serializer type in the kryo stream does not match the serializer
|
||||||
|
* type for this custom serializer.
|
||||||
|
*
|
||||||
|
* @param checkpointSerializerType Serializer type from the Kryo stream
|
||||||
|
*/
|
||||||
|
private fun checkSerializerType(checkpointSerializerType: String) {
|
||||||
|
if (checkpointSerializerType != serializerName)
|
||||||
|
throw CustomCheckpointSerializersHaveChangedException("The custom checkpoint serializers have changed while checkpoints exist. " +
|
||||||
|
"Please restore the CorDapps to when this checkpoint was created.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when the input/output types are missing from the custom serializer.
|
||||||
|
*/
|
||||||
|
class UnableToDetermineSerializerTypesException(message: String) : RuntimeException(message)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when the custom serializer is found to be reading data from another type of custom serializer.
|
||||||
|
*
|
||||||
|
* This was expected to happen if the user adds or removes CorDapps while checkpoints exist but it turned out that registering serializers
|
||||||
|
* as default made the system reliable.
|
||||||
|
*/
|
||||||
|
class CustomCheckpointSerializersHaveChangedException(message: String) : RuntimeException(message)
|
@ -10,12 +10,14 @@ import com.esotericsoftware.kryo.io.Output
|
|||||||
import com.esotericsoftware.kryo.pool.KryoPool
|
import com.esotericsoftware.kryo.pool.KryoPool
|
||||||
import com.esotericsoftware.kryo.serializers.ClosureSerializer
|
import com.esotericsoftware.kryo.serializers.ClosureSerializer
|
||||||
import net.corda.core.internal.uncheckedCast
|
import net.corda.core.internal.uncheckedCast
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
import net.corda.core.serialization.ClassWhitelist
|
import net.corda.core.serialization.ClassWhitelist
|
||||||
import net.corda.core.serialization.SerializationDefaults
|
import net.corda.core.serialization.SerializationDefaults
|
||||||
import net.corda.core.serialization.SerializedBytes
|
import net.corda.core.serialization.SerializedBytes
|
||||||
import net.corda.core.serialization.internal.CheckpointSerializationContext
|
import net.corda.core.serialization.internal.CheckpointSerializationContext
|
||||||
import net.corda.core.serialization.internal.CheckpointSerializer
|
import net.corda.core.serialization.internal.CheckpointSerializer
|
||||||
import net.corda.core.utilities.ByteSequence
|
import net.corda.core.utilities.ByteSequence
|
||||||
|
import net.corda.core.utilities.loggerFor
|
||||||
import net.corda.serialization.internal.AlwaysAcceptEncodingWhitelist
|
import net.corda.serialization.internal.AlwaysAcceptEncodingWhitelist
|
||||||
import net.corda.serialization.internal.ByteBufferInputStream
|
import net.corda.serialization.internal.ByteBufferInputStream
|
||||||
import net.corda.serialization.internal.CheckpointSerializationContextImpl
|
import net.corda.serialization.internal.CheckpointSerializationContextImpl
|
||||||
@ -40,10 +42,10 @@ private object AutoCloseableSerialisationDetector : Serializer<AutoCloseable>()
|
|||||||
}
|
}
|
||||||
|
|
||||||
object KryoCheckpointSerializer : CheckpointSerializer {
|
object KryoCheckpointSerializer : CheckpointSerializer {
|
||||||
private val kryoPoolsForContexts = ConcurrentHashMap<Pair<ClassWhitelist, ClassLoader>, KryoPool>()
|
private val kryoPoolsForContexts = ConcurrentHashMap<Triple<ClassWhitelist, ClassLoader, Iterable<CheckpointCustomSerializer<*,*>>>, KryoPool>()
|
||||||
|
|
||||||
private fun getPool(context: CheckpointSerializationContext): KryoPool {
|
private fun getPool(context: CheckpointSerializationContext): KryoPool {
|
||||||
return kryoPoolsForContexts.computeIfAbsent(Pair(context.whitelist, context.deserializationClassLoader)) {
|
return kryoPoolsForContexts.computeIfAbsent(Triple(context.whitelist, context.deserializationClassLoader, context.checkpointCustomSerializers)) {
|
||||||
KryoPool.Builder {
|
KryoPool.Builder {
|
||||||
val serializer = Fiber.getFiberSerializer(false) as KryoSerializer
|
val serializer = Fiber.getFiberSerializer(false) as KryoSerializer
|
||||||
val classResolver = CordaClassResolver(context).apply { setKryo(serializer.kryo) }
|
val classResolver = CordaClassResolver(context).apply { setKryo(serializer.kryo) }
|
||||||
@ -56,12 +58,60 @@ object KryoCheckpointSerializer : CheckpointSerializer {
|
|||||||
addDefaultSerializer(AutoCloseable::class.java, AutoCloseableSerialisationDetector)
|
addDefaultSerializer(AutoCloseable::class.java, AutoCloseableSerialisationDetector)
|
||||||
register(ClosureSerializer.Closure::class.java, CordaClosureSerializer)
|
register(ClosureSerializer.Closure::class.java, CordaClosureSerializer)
|
||||||
classLoader = it.second
|
classLoader = it.second
|
||||||
|
|
||||||
|
// Add custom serializers
|
||||||
|
val customSerializers = buildCustomSerializerAdaptors(context)
|
||||||
|
warnAboutDuplicateSerializers(customSerializers)
|
||||||
|
val classToSerializer = mapInputClassToCustomSerializer(context.deserializationClassLoader, customSerializers)
|
||||||
|
addDefaultCustomSerializers(this, classToSerializer)
|
||||||
}
|
}
|
||||||
}.build()
|
}.build()
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a sorted list of CustomSerializerCheckpointAdaptor based on the custom serializers inside context.
|
||||||
|
*
|
||||||
|
* The adaptors are sorted by serializerName which maps to javaClass.name for the serializer class
|
||||||
|
*/
|
||||||
|
private fun buildCustomSerializerAdaptors(context: CheckpointSerializationContext) =
|
||||||
|
context.checkpointCustomSerializers.map { CustomSerializerCheckpointAdaptor(it) }.sortedBy { it.serializerName }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a list of pairs where the first element is the input class of the custom serializer and the second element is the
|
||||||
|
* custom serializer.
|
||||||
|
*/
|
||||||
|
private fun mapInputClassToCustomSerializer(classLoader: ClassLoader, customSerializers: Iterable<CustomSerializerCheckpointAdaptor<*, *>>) =
|
||||||
|
customSerializers.map { getInputClassForCustomSerializer(classLoader, it) to it }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the Class object for the serializers input type.
|
||||||
|
*/
|
||||||
|
private fun getInputClassForCustomSerializer(classLoader: ClassLoader, customSerializer: CustomSerializerCheckpointAdaptor<*, *>): Class<*> {
|
||||||
|
val typeNameWithoutGenerics = customSerializer.cordappType.typeName.substringBefore('<')
|
||||||
|
return classLoader.loadClass(typeNameWithoutGenerics)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Emit a warning if two or more custom serializers are found for the same input type.
|
||||||
|
*/
|
||||||
|
private fun warnAboutDuplicateSerializers(customSerializers: Iterable<CustomSerializerCheckpointAdaptor<*,*>>) =
|
||||||
|
customSerializers
|
||||||
|
.groupBy({ it.cordappType }, { it.serializerName })
|
||||||
|
.filter { (_, serializerNames) -> serializerNames.distinct().size > 1 }
|
||||||
|
.forEach { (inputType, serializerNames) -> loggerFor<KryoCheckpointSerializer>().warn("Duplicate custom checkpoint serializer for type $inputType. Serializers: ${serializerNames.joinToString(", ")}") }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Register all custom serializers as default, this class + subclass, registrations.
|
||||||
|
*
|
||||||
|
* Serializers registered before this will take priority. This needs to run after registrations we want to keep otherwise it may
|
||||||
|
* replace them.
|
||||||
|
*/
|
||||||
|
private fun addDefaultCustomSerializers(kryo: Kryo, classToSerializer: Iterable<Pair<Class<*>, CustomSerializerCheckpointAdaptor<*, *>>>) =
|
||||||
|
classToSerializer
|
||||||
|
.forEach { (clazz, customSerializer) -> kryo.addDefaultSerializer(clazz, customSerializer) }
|
||||||
|
|
||||||
private fun <T : Any> CheckpointSerializationContext.kryo(task: Kryo.() -> T): T {
|
private fun <T : Any> CheckpointSerializationContext.kryo(task: Kryo.() -> T): T {
|
||||||
return getPool(this).run { kryo ->
|
return getPool(this).run { kryo ->
|
||||||
kryo.context.ensureCapacity(properties.size)
|
kryo.context.ensureCapacity(properties.size)
|
||||||
|
@ -0,0 +1,33 @@
|
|||||||
|
package net.corda.nodeapi.internal.crypto
|
||||||
|
|
||||||
|
import net.corda.core.crypto.Crypto
|
||||||
|
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
||||||
|
import org.junit.Test
|
||||||
|
import java.math.BigInteger
|
||||||
|
import java.security.InvalidKeyException
|
||||||
|
|
||||||
|
class ContentSignerBuilderTest {
|
||||||
|
companion object {
|
||||||
|
private const val entropy = "20200723"
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `should build content signer for valid eddsa key`() {
|
||||||
|
val signatureScheme = Crypto.EDDSA_ED25519_SHA512
|
||||||
|
val provider = Crypto.findProvider(signatureScheme.providerName)
|
||||||
|
val issuerKeyPair = Crypto.deriveKeyPairFromEntropy(signatureScheme, BigInteger(entropy))
|
||||||
|
ContentSignerBuilder.build(signatureScheme, issuerKeyPair.private, provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `should fail to build content signer for incorrect key type`() {
|
||||||
|
val signatureScheme = Crypto.EDDSA_ED25519_SHA512
|
||||||
|
val provider = Crypto.findProvider(signatureScheme.providerName)
|
||||||
|
val issuerKeyPair = Crypto.deriveKeyPairFromEntropy(Crypto.ECDSA_SECP256R1_SHA256, BigInteger(entropy))
|
||||||
|
assertThatExceptionOfType(InvalidKeyException::class.java)
|
||||||
|
.isThrownBy {
|
||||||
|
ContentSignerBuilder.build(signatureScheme, issuerKeyPair.private, provider)
|
||||||
|
}
|
||||||
|
.withMessage("Incorrect key type EC for signature scheme NONEwithEdDSA")
|
||||||
|
}
|
||||||
|
}
|
@ -39,9 +39,9 @@ capsule {
|
|||||||
def nodeProject = project(':node')
|
def nodeProject = project(':node')
|
||||||
|
|
||||||
task buildCordaJAR(type: FatCapsule, dependsOn: [
|
task buildCordaJAR(type: FatCapsule, dependsOn: [
|
||||||
nodeProject.tasks.jar,
|
nodeProject.tasks.named('jar'),
|
||||||
project(':core-deterministic').tasks.assemble,
|
project(':core-deterministic').tasks.named('assemble'),
|
||||||
project(':serialization-deterministic').tasks.assemble
|
project(':serialization-deterministic').tasks.named('assemble')
|
||||||
]) {
|
]) {
|
||||||
applicationClass 'net.corda.node.Corda'
|
applicationClass 'net.corda.node.Corda'
|
||||||
archiveBaseName = 'corda'
|
archiveBaseName = 'corda'
|
||||||
|
@ -23,8 +23,10 @@ class NodesStartStopSingleVmTests(@Suppress("unused") private val iteration: Int
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun nodesStartStop() {
|
fun nodesStartStop() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
startNode(providedName = ALICE_NAME).getOrThrow()
|
val alice = startNode(providedName = ALICE_NAME)
|
||||||
startNode(providedName = BOB_NAME).getOrThrow()
|
val bob = startNode(providedName = BOB_NAME)
|
||||||
|
alice.getOrThrow()
|
||||||
|
bob.getOrThrow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,6 +1,7 @@
|
|||||||
package net.corda.node.flows
|
package net.corda.node.flows
|
||||||
|
|
||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.core.CordaException
|
||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
@ -16,7 +17,6 @@ import net.corda.testing.driver.DriverDSL
|
|||||||
import net.corda.testing.driver.DriverParameters
|
import net.corda.testing.driver.DriverParameters
|
||||||
import net.corda.testing.driver.NodeParameters
|
import net.corda.testing.driver.NodeParameters
|
||||||
import net.corda.testing.driver.driver
|
import net.corda.testing.driver.driver
|
||||||
import net.corda.testing.node.internal.ListenProcessDeathException
|
|
||||||
import net.corda.testing.node.internal.assertUncompletedCheckpoints
|
import net.corda.testing.node.internal.assertUncompletedCheckpoints
|
||||||
import net.corda.testing.node.internal.enclosedCordapp
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
import org.assertj.core.api.Assertions.assertThat
|
||||||
@ -77,7 +77,7 @@ class FlowCheckpointVersionNodeStartupCheckTest {
|
|||||||
private fun DriverDSL.assertBobFailsToStartWithLogMessage(logMessage: String) {
|
private fun DriverDSL.assertBobFailsToStartWithLogMessage(logMessage: String) {
|
||||||
assertUncompletedCheckpoints(BOB_NAME, 1)
|
assertUncompletedCheckpoints(BOB_NAME, 1)
|
||||||
|
|
||||||
assertFailsWith(ListenProcessDeathException::class) {
|
assertFailsWith(CordaException::class) {
|
||||||
startNode(NodeParameters(
|
startNode(NodeParameters(
|
||||||
providedName = BOB_NAME,
|
providedName = BOB_NAME,
|
||||||
customOverrides = mapOf("devMode" to false)
|
customOverrides = mapOf("devMode" to false)
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package net.corda.node.logging
|
package net.corda.node.logging
|
||||||
|
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.internal.div
|
import net.corda.core.internal.div
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.OpaqueBytes
|
import net.corda.core.utilities.OpaqueBytes
|
||||||
@ -22,8 +23,10 @@ class IssueCashLoggingTests {
|
|||||||
fun `issuing and sending cash as payment do not result in duplicate insertion warnings`() {
|
fun `issuing and sending cash as payment do not result in duplicate insertion warnings`() {
|
||||||
val user = User("mark", "dadada", setOf(all()))
|
val user = User("mark", "dadada", setOf(all()))
|
||||||
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
||||||
val nodeA = startNode(rpcUsers = listOf(user)).getOrThrow()
|
val (nodeA, nodeB) = listOf(startNode(rpcUsers = listOf(user)),
|
||||||
val nodeB = startNode().getOrThrow()
|
startNode())
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val amount = 1.DOLLARS
|
val amount = 1.DOLLARS
|
||||||
val ref = OpaqueBytes.of(0)
|
val ref = OpaqueBytes.of(0)
|
||||||
|
@ -1,355 +0,0 @@
|
|||||||
package net.corda.node.services.rpc
|
|
||||||
|
|
||||||
import net.corda.client.rpc.CordaRPCClient
|
|
||||||
import net.corda.client.rpc.CordaRPCClientConfiguration
|
|
||||||
import net.corda.client.rpc.GracefulReconnect
|
|
||||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps
|
|
||||||
import net.corda.client.rpc.notUsed
|
|
||||||
import net.corda.core.contracts.Amount
|
|
||||||
import net.corda.core.flows.StateMachineRunId
|
|
||||||
import net.corda.core.internal.concurrent.transpose
|
|
||||||
import net.corda.core.messaging.StateMachineUpdate
|
|
||||||
import net.corda.core.node.services.Vault
|
|
||||||
import net.corda.core.node.services.vault.PageSpecification
|
|
||||||
import net.corda.core.node.services.vault.QueryCriteria
|
|
||||||
import net.corda.core.node.services.vault.builder
|
|
||||||
import net.corda.core.utilities.NetworkHostAndPort
|
|
||||||
import net.corda.core.utilities.OpaqueBytes
|
|
||||||
import net.corda.core.utilities.contextLogger
|
|
||||||
import net.corda.core.utilities.getOrThrow
|
|
||||||
import net.corda.core.utilities.seconds
|
|
||||||
import net.corda.finance.contracts.asset.Cash
|
|
||||||
import net.corda.finance.flows.CashIssueAndPaymentFlow
|
|
||||||
import net.corda.finance.schemas.CashSchemaV1
|
|
||||||
import net.corda.node.services.Permissions
|
|
||||||
import net.corda.node.services.rpc.RpcReconnectTests.Companion.NUMBER_OF_FLOWS_TO_RUN
|
|
||||||
import net.corda.testing.core.DUMMY_BANK_A_NAME
|
|
||||||
import net.corda.testing.core.DUMMY_BANK_B_NAME
|
|
||||||
import net.corda.testing.driver.DriverParameters
|
|
||||||
import net.corda.testing.driver.NodeHandle
|
|
||||||
import net.corda.testing.driver.OutOfProcess
|
|
||||||
import net.corda.testing.driver.driver
|
|
||||||
import net.corda.testing.driver.internal.OutOfProcessImpl
|
|
||||||
import net.corda.testing.driver.internal.incrementalPortAllocation
|
|
||||||
import net.corda.testing.node.User
|
|
||||||
import net.corda.testing.node.internal.FINANCE_CORDAPPS
|
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
|
||||||
import org.junit.Test
|
|
||||||
import java.util.*
|
|
||||||
import java.util.concurrent.CountDownLatch
|
|
||||||
import java.util.concurrent.TimeUnit
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger
|
|
||||||
import kotlin.concurrent.thread
|
|
||||||
import kotlin.math.absoluteValue
|
|
||||||
import kotlin.math.max
|
|
||||||
import kotlin.test.assertEquals
|
|
||||||
import kotlin.test.assertTrue
|
|
||||||
import kotlin.test.currentStackTrace
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is a stress test for the rpc reconnection logic, which triggers failures in a probabilistic way.
|
|
||||||
*
|
|
||||||
* You can adjust the variable [NUMBER_OF_FLOWS_TO_RUN] to adjust the number of flows to run and the duration of the test.
|
|
||||||
*/
|
|
||||||
class RpcReconnectTests {
|
|
||||||
|
|
||||||
companion object {
|
|
||||||
// this many flows take ~5 minutes
|
|
||||||
const val NUMBER_OF_FLOWS_TO_RUN = 100
|
|
||||||
|
|
||||||
private val log = contextLogger()
|
|
||||||
}
|
|
||||||
|
|
||||||
private val portAllocator = incrementalPortAllocation()
|
|
||||||
|
|
||||||
private lateinit var proxy: RandomFailingProxy
|
|
||||||
private lateinit var node: NodeHandle
|
|
||||||
private lateinit var currentAddressPair: AddressPair
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This test showcases and stress tests the demo [ReconnectingCordaRPCOps].
|
|
||||||
*
|
|
||||||
* Note that during node failure events can be lost and starting flows can become unreliable.
|
|
||||||
* The only available way to retry failed flows is to attempt a "logical retry" which is also showcased.
|
|
||||||
*
|
|
||||||
* This test runs flows in a loop and in the background kills the node or restarts it.
|
|
||||||
* Also the RPC connection is made through a proxy that introduces random latencies and is also periodically killed.
|
|
||||||
*/
|
|
||||||
@Suppress("ComplexMethod")
|
|
||||||
@Test(timeout=420_000)
|
|
||||||
fun `test that the RPC client is able to reconnect and proceed after node failure, restart, or connection reset`() {
|
|
||||||
val nodeRunningTime = { Random().nextInt(12000) + 8000 }
|
|
||||||
|
|
||||||
val demoUser = User("demo", "demo", setOf(Permissions.all()))
|
|
||||||
|
|
||||||
// When this reaches 0 - the test will end.
|
|
||||||
val flowsCountdownLatch = CountDownLatch(NUMBER_OF_FLOWS_TO_RUN)
|
|
||||||
// These are the expected progress steps for the CashIssueAndPayFlow.
|
|
||||||
val expectedProgress = listOf(
|
|
||||||
"Starting",
|
|
||||||
"Issuing cash",
|
|
||||||
"Generating transaction",
|
|
||||||
"Signing transaction",
|
|
||||||
"Finalising transaction",
|
|
||||||
"Broadcasting transaction to participants",
|
|
||||||
"Paying recipient",
|
|
||||||
"Generating anonymous identities",
|
|
||||||
"Generating transaction",
|
|
||||||
"Signing transaction",
|
|
||||||
"Finalising transaction",
|
|
||||||
"Requesting signature by notary service",
|
|
||||||
"Requesting signature by Notary service",
|
|
||||||
"Validating response from Notary service",
|
|
||||||
"Broadcasting transaction to participants",
|
|
||||||
"Done"
|
|
||||||
)
|
|
||||||
|
|
||||||
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS, startNodesInProcess = false, inMemoryDB = false)) {
|
|
||||||
fun startBankA(address: NetworkHostAndPort) = startNode(providedName = DUMMY_BANK_A_NAME, rpcUsers = listOf(demoUser), customOverrides = mapOf("rpcSettings.address" to address.toString()))
|
|
||||||
fun startProxy(addressPair: AddressPair) = RandomFailingProxy(serverPort = addressPair.proxyAddress.port, remotePort = addressPair.nodeAddress.port).start()
|
|
||||||
|
|
||||||
val addresses = (1..2).map { getRandomAddressPair() }
|
|
||||||
currentAddressPair = addresses[0]
|
|
||||||
|
|
||||||
proxy = startProxy(currentAddressPair)
|
|
||||||
val (bankA, bankB) = listOf(
|
|
||||||
startBankA(currentAddressPair.nodeAddress),
|
|
||||||
startNode(providedName = DUMMY_BANK_B_NAME, rpcUsers = listOf(demoUser))
|
|
||||||
).transpose().getOrThrow()
|
|
||||||
node = bankA
|
|
||||||
|
|
||||||
val notary = defaultNotaryIdentity
|
|
||||||
val baseAmount = Amount.parseCurrency("0 USD")
|
|
||||||
val issuerRef = OpaqueBytes.of(0x01)
|
|
||||||
|
|
||||||
var numDisconnects = 0
|
|
||||||
var numReconnects = 0
|
|
||||||
val maxStackOccurrences = AtomicInteger()
|
|
||||||
|
|
||||||
val addressesForRpc = addresses.map { it.proxyAddress }
|
|
||||||
// DOCSTART rpcReconnectingRPC
|
|
||||||
val onReconnect = {
|
|
||||||
numReconnects++
|
|
||||||
// We only expect to see a single reconnectOnError in the stack trace. Otherwise we're in danger of stack overflow recursion
|
|
||||||
maxStackOccurrences.set(max(maxStackOccurrences.get(), currentStackTrace().count { it.methodName == "reconnectOnError" }))
|
|
||||||
Unit
|
|
||||||
}
|
|
||||||
val reconnect = GracefulReconnect(onDisconnect = { numDisconnects++ }, onReconnect = onReconnect)
|
|
||||||
val config = CordaRPCClientConfiguration.DEFAULT.copy(
|
|
||||||
connectionRetryInterval = 1.seconds,
|
|
||||||
connectionRetryIntervalMultiplier = 1.0
|
|
||||||
)
|
|
||||||
val client = CordaRPCClient(addressesForRpc, configuration = config)
|
|
||||||
val bankAReconnectingRPCConnection = client.start(demoUser.username, demoUser.password, gracefulReconnect = reconnect)
|
|
||||||
val bankAReconnectingRpc = bankAReconnectingRPCConnection.proxy as ReconnectingCordaRPCOps
|
|
||||||
// DOCEND rpcReconnectingRPC
|
|
||||||
|
|
||||||
// Observe the vault and collect the observations.
|
|
||||||
val vaultEvents = Collections.synchronizedList(mutableListOf<Vault.Update<Cash.State>>())
|
|
||||||
// DOCSTART rpcReconnectingRPCVaultTracking
|
|
||||||
val vaultFeed = bankAReconnectingRpc.vaultTrackByWithPagingSpec(
|
|
||||||
Cash.State::class.java,
|
|
||||||
QueryCriteria.VaultQueryCriteria(),
|
|
||||||
PageSpecification(1, 1))
|
|
||||||
val vaultSubscription = vaultFeed.updates.subscribe { update: Vault.Update<Cash.State> ->
|
|
||||||
log.info("vault update produced ${update.produced.map { it.state.data.amount }} consumed ${update.consumed.map { it.ref }}")
|
|
||||||
vaultEvents.add(update)
|
|
||||||
}
|
|
||||||
// DOCEND rpcReconnectingRPCVaultTracking
|
|
||||||
|
|
||||||
// Observe the stateMachine and collect the observations.
|
|
||||||
val stateMachineEvents = Collections.synchronizedList(mutableListOf<StateMachineUpdate>())
|
|
||||||
val stateMachineSubscription = bankAReconnectingRpc.stateMachinesFeed().updates.subscribe { update ->
|
|
||||||
log.info(update.toString())
|
|
||||||
stateMachineEvents.add(update)
|
|
||||||
}
|
|
||||||
|
|
||||||
// While the flows are running, randomly apply a different failure scenario.
|
|
||||||
val nrRestarts = AtomicInteger()
|
|
||||||
thread(name = "Node killer") {
|
|
||||||
while (true) {
|
|
||||||
if (flowsCountdownLatch.count == 0L) break
|
|
||||||
|
|
||||||
// Let the node run for a random time interval.
|
|
||||||
nodeRunningTime().also { ms ->
|
|
||||||
log.info("Running node for ${ms / 1000} s.")
|
|
||||||
Thread.sleep(ms.toLong())
|
|
||||||
}
|
|
||||||
|
|
||||||
if (flowsCountdownLatch.count == 0L) break
|
|
||||||
when (Random().nextInt().rem(7).absoluteValue) {
|
|
||||||
0 -> {
|
|
||||||
log.info("Forcefully killing node and proxy.")
|
|
||||||
(node as OutOfProcessImpl).onStopCallback()
|
|
||||||
(node as OutOfProcess).process.destroyForcibly()
|
|
||||||
proxy.stop()
|
|
||||||
node = startBankA(currentAddressPair.nodeAddress).get()
|
|
||||||
proxy.start()
|
|
||||||
}
|
|
||||||
1 -> {
|
|
||||||
log.info("Forcefully killing node.")
|
|
||||||
(node as OutOfProcessImpl).onStopCallback()
|
|
||||||
(node as OutOfProcess).process.destroyForcibly()
|
|
||||||
node = startBankA(currentAddressPair.nodeAddress).get()
|
|
||||||
}
|
|
||||||
2 -> {
|
|
||||||
log.info("Shutting down node.")
|
|
||||||
node.stop()
|
|
||||||
proxy.stop()
|
|
||||||
node = startBankA(currentAddressPair.nodeAddress).get()
|
|
||||||
proxy.start()
|
|
||||||
}
|
|
||||||
3, 4 -> {
|
|
||||||
log.info("Killing proxy.")
|
|
||||||
proxy.stop()
|
|
||||||
Thread.sleep(Random().nextInt(5000).toLong())
|
|
||||||
proxy.start()
|
|
||||||
}
|
|
||||||
5 -> {
|
|
||||||
log.info("Dropping connection.")
|
|
||||||
proxy.failConnection()
|
|
||||||
}
|
|
||||||
6 -> {
|
|
||||||
log.info("Performing failover to a different node")
|
|
||||||
node.stop()
|
|
||||||
proxy.stop()
|
|
||||||
currentAddressPair = (addresses - currentAddressPair).first()
|
|
||||||
node = startBankA(currentAddressPair.nodeAddress).get()
|
|
||||||
proxy = startProxy(currentAddressPair)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nrRestarts.incrementAndGet()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start nrOfFlowsToRun and provide a logical retry function that checks the vault.
|
|
||||||
val flowProgressEvents = mutableMapOf<StateMachineRunId, MutableList<String>>()
|
|
||||||
for (amount in (1..NUMBER_OF_FLOWS_TO_RUN)) {
|
|
||||||
// DOCSTART rpcReconnectingRPCFlowStarting
|
|
||||||
bankAReconnectingRpc.runFlowWithLogicalRetry(
|
|
||||||
runFlow = { rpc ->
|
|
||||||
log.info("Starting CashIssueAndPaymentFlow for $amount")
|
|
||||||
val flowHandle = rpc.startTrackedFlowDynamic(
|
|
||||||
CashIssueAndPaymentFlow::class.java,
|
|
||||||
baseAmount.plus(Amount.parseCurrency("$amount USD")),
|
|
||||||
issuerRef,
|
|
||||||
bankB.nodeInfo.legalIdentities.first(),
|
|
||||||
false,
|
|
||||||
notary
|
|
||||||
)
|
|
||||||
val flowId = flowHandle.id
|
|
||||||
log.info("Started flow $amount with flowId: $flowId")
|
|
||||||
flowProgressEvents.addEvent(flowId, null)
|
|
||||||
|
|
||||||
flowHandle.stepsTreeFeed?.updates?.notUsed()
|
|
||||||
flowHandle.stepsTreeIndexFeed?.updates?.notUsed()
|
|
||||||
// No reconnecting possible.
|
|
||||||
flowHandle.progress.subscribe(
|
|
||||||
{ prog ->
|
|
||||||
flowProgressEvents.addEvent(flowId, prog)
|
|
||||||
log.info("Progress $flowId : $prog")
|
|
||||||
},
|
|
||||||
{ error ->
|
|
||||||
log.error("Error thrown in the flow progress observer", error)
|
|
||||||
})
|
|
||||||
flowHandle.id
|
|
||||||
},
|
|
||||||
hasFlowStarted = { rpc ->
|
|
||||||
// Query for a state that is the result of this flow.
|
|
||||||
val criteria = QueryCriteria.VaultCustomQueryCriteria(builder { CashSchemaV1.PersistentCashState::pennies.equal(amount.toLong() * 100) }, status = Vault.StateStatus.ALL)
|
|
||||||
val results = rpc.vaultQueryByCriteria(criteria, Cash.State::class.java)
|
|
||||||
log.info("$amount - Found states ${results.states}")
|
|
||||||
// The flow has completed if a state is found
|
|
||||||
results.states.isNotEmpty()
|
|
||||||
},
|
|
||||||
onFlowConfirmed = {
|
|
||||||
flowsCountdownLatch.countDown()
|
|
||||||
log.info("Flow started for $amount. Remaining flows: ${flowsCountdownLatch.count}")
|
|
||||||
}
|
|
||||||
)
|
|
||||||
// DOCEND rpcReconnectingRPCFlowStarting
|
|
||||||
|
|
||||||
Thread.sleep(Random().nextInt(250).toLong())
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info("Started all flows")
|
|
||||||
|
|
||||||
// Wait until all flows have been started.
|
|
||||||
val flowsConfirmed = flowsCountdownLatch.await(10, TimeUnit.MINUTES)
|
|
||||||
|
|
||||||
if (flowsConfirmed) {
|
|
||||||
log.info("Confirmed all flows have started.")
|
|
||||||
} else {
|
|
||||||
log.info("Timed out waiting for confirmation that all flows have started. Remaining flows: ${flowsCountdownLatch.count}")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Wait for all events to come in and flows to finish.
|
|
||||||
Thread.sleep(4000)
|
|
||||||
|
|
||||||
val nrFailures = nrRestarts.get()
|
|
||||||
log.info("Checking results after $nrFailures restarts.")
|
|
||||||
|
|
||||||
// We should get one disconnect and one reconnect for each failure
|
|
||||||
assertThat(numDisconnects).isEqualTo(numReconnects)
|
|
||||||
assertThat(numReconnects).isLessThanOrEqualTo(nrFailures)
|
|
||||||
assertThat(maxStackOccurrences.get()).isLessThan(2)
|
|
||||||
|
|
||||||
// Query the vault and check that states were created for all flows.
|
|
||||||
fun readCashStates() = bankAReconnectingRpc
|
|
||||||
.vaultQueryByWithPagingSpec(Cash.State::class.java, QueryCriteria.VaultQueryCriteria(status = Vault.StateStatus.CONSUMED), PageSpecification(1, 10000))
|
|
||||||
.states
|
|
||||||
|
|
||||||
var allCashStates = readCashStates()
|
|
||||||
var nrRetries = 0
|
|
||||||
|
|
||||||
// It might be necessary to wait more for all events to arrive when the node is slow.
|
|
||||||
while (allCashStates.size < NUMBER_OF_FLOWS_TO_RUN && nrRetries++ < 50) {
|
|
||||||
Thread.sleep(2000)
|
|
||||||
allCashStates = readCashStates()
|
|
||||||
}
|
|
||||||
|
|
||||||
val allCash = allCashStates.map { it.state.data.amount.quantity }.toSet()
|
|
||||||
val missingCash = (1..NUMBER_OF_FLOWS_TO_RUN).filterNot { allCash.contains(it.toLong() * 100) }
|
|
||||||
log.info("Missing cash states: $missingCash")
|
|
||||||
|
|
||||||
assertEquals(NUMBER_OF_FLOWS_TO_RUN, allCashStates.size, "Not all flows were executed successfully")
|
|
||||||
|
|
||||||
// The progress status for each flow can only miss the last events, because the node might have been killed.
|
|
||||||
val missingProgressEvents = flowProgressEvents.filterValues { expectedProgress.subList(0, it.size) != it }
|
|
||||||
assertTrue(missingProgressEvents.isEmpty(), "The flow progress tracker is missing events: $missingProgressEvents")
|
|
||||||
|
|
||||||
// DOCSTART missingVaultEvents
|
|
||||||
// Check that enough vault events were received.
|
|
||||||
// This check is fuzzy because events can go missing during node restarts.
|
|
||||||
// Ideally there should be nrOfFlowsToRun events receive but some might get lost for each restart.
|
|
||||||
assertThat(vaultEvents!!.size + nrFailures * 3).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN)
|
|
||||||
// DOCEND missingVaultEvents
|
|
||||||
|
|
||||||
// Check that no flow was triggered twice.
|
|
||||||
val duplicates = allCashStates.groupBy { it.state.data.amount }.filterValues { it.size > 1 }
|
|
||||||
assertTrue(duplicates.isEmpty(), "${duplicates.size} flows were retried illegally.")
|
|
||||||
|
|
||||||
log.info("State machine events seen: ${stateMachineEvents!!.size}")
|
|
||||||
// State machine events are very likely to get lost more often because they seem to be sent with a delay.
|
|
||||||
assertThat(stateMachineEvents.count { it is StateMachineUpdate.Added }).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN / 3)
|
|
||||||
assertThat(stateMachineEvents.count { it is StateMachineUpdate.Removed }).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN / 3)
|
|
||||||
|
|
||||||
// Stop the observers.
|
|
||||||
vaultSubscription.unsubscribe()
|
|
||||||
stateMachineSubscription.unsubscribe()
|
|
||||||
bankAReconnectingRPCConnection.close()
|
|
||||||
}
|
|
||||||
|
|
||||||
proxy.close()
|
|
||||||
}
|
|
||||||
|
|
||||||
@Synchronized
|
|
||||||
fun MutableMap<StateMachineRunId, MutableList<String>>.addEvent(id: StateMachineRunId, progress: String?): Boolean {
|
|
||||||
return getOrPut(id) { mutableListOf() }.let { if (progress != null) it.add(progress) else false }
|
|
||||||
}
|
|
||||||
private fun getRandomAddressPair() = AddressPair(getRandomAddress(), getRandomAddress())
|
|
||||||
private fun getRandomAddress() = NetworkHostAndPort("localhost", portAllocator.nextPort())
|
|
||||||
|
|
||||||
data class AddressPair(val proxyAddress: NetworkHostAndPort, val nodeAddress: NetworkHostAndPort)
|
|
||||||
}
|
|
@ -62,30 +62,49 @@ abstract class StateMachineErrorHandlingTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun DriverDSL.createBytemanNode(
|
internal fun DriverDSL.createBytemanNode(nodeProvidedName: CordaX500Name): Pair<NodeHandle, Int> {
|
||||||
providedName: CordaX500Name,
|
val port = nextPort()
|
||||||
|
val bytemanNodeHandle = (this as InternalDriverDSL).startNode(
|
||||||
|
NodeParameters(
|
||||||
|
providedName = nodeProvidedName,
|
||||||
|
rpcUsers = listOf(rpcUser)
|
||||||
|
),
|
||||||
|
bytemanPort = port
|
||||||
|
)
|
||||||
|
return bytemanNodeHandle.getOrThrow() to port
|
||||||
|
}
|
||||||
|
|
||||||
|
internal fun DriverDSL.createNode(nodeProvidedName: CordaX500Name): NodeHandle {
|
||||||
|
return (this as InternalDriverDSL).startNode(
|
||||||
|
NodeParameters(
|
||||||
|
providedName = nodeProvidedName,
|
||||||
|
rpcUsers = listOf(rpcUser)
|
||||||
|
)
|
||||||
|
).getOrThrow()
|
||||||
|
}
|
||||||
|
|
||||||
|
internal fun DriverDSL.createNodeAndBytemanNode(
|
||||||
|
nodeProvidedName: CordaX500Name,
|
||||||
|
bytemanNodeProvidedName: CordaX500Name,
|
||||||
additionalCordapps: Collection<TestCordapp> = emptyList()
|
additionalCordapps: Collection<TestCordapp> = emptyList()
|
||||||
): Pair<NodeHandle, Int> {
|
): Triple<NodeHandle, NodeHandle, Int> {
|
||||||
val port = nextPort()
|
val port = nextPort()
|
||||||
val nodeHandle = (this as InternalDriverDSL).startNode(
|
val nodeHandle = (this as InternalDriverDSL).startNode(
|
||||||
NodeParameters(
|
NodeParameters(
|
||||||
providedName = providedName,
|
providedName = nodeProvidedName,
|
||||||
|
rpcUsers = listOf(rpcUser),
|
||||||
|
additionalCordapps = additionalCordapps
|
||||||
|
)
|
||||||
|
)
|
||||||
|
val bytemanNodeHandle = startNode(
|
||||||
|
NodeParameters(
|
||||||
|
providedName = bytemanNodeProvidedName,
|
||||||
rpcUsers = listOf(rpcUser),
|
rpcUsers = listOf(rpcUser),
|
||||||
additionalCordapps = additionalCordapps
|
additionalCordapps = additionalCordapps
|
||||||
),
|
),
|
||||||
bytemanPort = port
|
bytemanPort = port
|
||||||
).getOrThrow()
|
)
|
||||||
return nodeHandle to port
|
return Triple(nodeHandle.getOrThrow(), bytemanNodeHandle.getOrThrow(), port)
|
||||||
}
|
|
||||||
|
|
||||||
internal fun DriverDSL.createNode(providedName: CordaX500Name, additionalCordapps: Collection<TestCordapp> = emptyList()): NodeHandle {
|
|
||||||
return startNode(
|
|
||||||
NodeParameters(
|
|
||||||
providedName = providedName,
|
|
||||||
rpcUsers = listOf(rpcUser),
|
|
||||||
additionalCordapps = additionalCordapps
|
|
||||||
)
|
|
||||||
).getOrThrow()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
internal fun submitBytemanRules(rules: String, port: Int) {
|
internal fun submitBytemanRules(rules: String, port: Int) {
|
||||||
@ -285,4 +304,4 @@ abstract class StateMachineErrorHandlingTest {
|
|||||||
internal val stateMachineManagerClassName: String by lazy {
|
internal val stateMachineManagerClassName: String by lazy {
|
||||||
Class.forName("net.corda.node.services.statemachine.SingleThreadedStateMachineManager").name
|
Class.forName("net.corda.node.services.statemachine.SingleThreadedStateMachineManager").name
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,8 +35,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error recording a transaction inside of ReceiveFinalityFlow will keep the flow in for observation`() {
|
fun `error recording a transaction inside of ReceiveFinalityFlow will keep the flow in for observation`() {
|
||||||
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
|
||||||
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
|
|
||||||
|
|
||||||
// could not get rule for FinalityDoctor + observation counter to work
|
// could not get rule for FinalityDoctor + observation counter to work
|
||||||
val rules = """
|
val rules = """
|
||||||
@ -97,8 +96,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error resolving a transaction's dependencies inside of ReceiveFinalityFlow will keep the flow in for observation`() {
|
fun `error resolving a transaction's dependencies inside of ReceiveFinalityFlow will keep the flow in for observation`() {
|
||||||
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
|
||||||
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
|
|
||||||
|
|
||||||
// could not get rule for FinalityDoctor + observation counter to work
|
// could not get rule for FinalityDoctor + observation counter to work
|
||||||
val rules = """
|
val rules = """
|
||||||
@ -161,8 +159,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and complete successfully`() {
|
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and complete successfully`() {
|
||||||
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
|
||||||
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -229,8 +226,7 @@ class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and be kept for observation is error persists`() {
|
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and be kept for observation is error persists`() {
|
||||||
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME, FINANCE_CORDAPPS)
|
||||||
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -40,8 +40,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
|
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -88,8 +87,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `unexpected error during flow initialisation throws exception to client`() {
|
fun `unexpected error during flow initialisation throws exception to client`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
CLASS ${FlowStateMachineImpl::class.java.name}
|
CLASS ${FlowStateMachineImpl::class.java.name}
|
||||||
@ -134,8 +132,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during initialisation when trying to rollback the flow's database transaction the flow is able to retry and complete successfully`() {
|
fun `error during initialisation when trying to rollback the flow's database transaction the flow is able to retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -187,8 +184,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during initialisation when trying to close the flow's database transaction the flow is able to retry and complete successfully`() {
|
fun `error during initialisation when trying to close the flow's database transaction the flow is able to retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -242,8 +238,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -298,8 +293,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during retrying a flow that failed when committing its original checkpoint will retry the flow again and complete successfully`() {
|
fun `error during retrying a flow that failed when committing its original checkpoint will retry the flow again and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Throw exception on executeCommitTransaction action after first suspend + commit
|
RULE Throw exception on executeCommitTransaction action after first suspend + commit
|
||||||
@ -351,8 +345,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
|
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -400,8 +393,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -464,8 +456,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - session init can be retried when there is a transient connection error to the database`() {
|
fun `responding flow - session init can be retried when there is a transient connection error to the database`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -529,8 +520,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - session init can be retried when there is a transient connection error to the database goes to observation if error persists`() {
|
fun `responding flow - session init can be retried when there is a transient connection error to the database goes to observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -35,8 +35,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with SendInitial action is retried 3 times and kept for observation if error persists`() {
|
fun `error during transition with SendInitial action is retried 3 times and kept for observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -87,8 +86,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with SendInitial action that does not persist will retry and complete successfully`() {
|
fun `error during transition with SendInitial action that does not persist will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -135,8 +133,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with AcknowledgeMessages action is swallowed and flow completes successfully`() {
|
fun `error during transition with AcknowledgeMessages action is swallowed and flow completes successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Set flag when inside executeAcknowledgeMessages
|
RULE Set flag when inside executeAcknowledgeMessages
|
||||||
@ -230,8 +227,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during flow retry when executing retryFlowFromSafePoint the flow is able to retry and recover`() {
|
fun `error during flow retry when executing retryFlowFromSafePoint the flow is able to retry and recover`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Set flag when executing first suspend
|
RULE Set flag when executing first suspend
|
||||||
@ -296,8 +292,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action that occurs after the first suspend will retry and complete successfully`() {
|
fun `error during transition with CommitTransaction action that occurs after the first suspend will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
// seems to be restarting the flow from the beginning every time
|
// seems to be restarting the flow from the beginning every time
|
||||||
val rules = """
|
val rules = """
|
||||||
@ -362,8 +357,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
|
fun `error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
// seems to be restarting the flow from the beginning every time
|
// seems to be restarting the flow from the beginning every time
|
||||||
val rules = """
|
val rules = """
|
||||||
@ -419,8 +413,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `error during transition with CommitTransaction action and ConstraintViolationException that occurs when completing a flow will retry and be kept for observation if error persists`() {
|
fun `error during transition with CommitTransaction action and ConstraintViolationException that occurs when completing a flow will retry and be kept for observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -488,8 +481,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow can be retried when there is a transient connection error to the database`() {
|
fun `flow can be retried when there is a transient connection error to the database`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -552,8 +544,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow can be retried when there is a transient connection error to the database goes to observation if error persists`() {
|
fun `flow can be retried when there is a transient connection error to the database goes to observation if error persists`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -610,8 +601,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `responding flow - error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
|
fun `responding flow - error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
|
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||||
val alice = createNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -103,8 +103,7 @@ class StateMachineKillFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow killed when it is in the flow hospital for observation is removed correctly`() {
|
fun `flow killed when it is in the flow hospital for observation is removed correctly`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -40,8 +40,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `initiating subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
|
fun `initiating subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -119,8 +118,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `initiating subflow - error during transition with CommitTransaction action that occurs after the first receive will retry and complete successfully`() {
|
fun `initiating subflow - error during transition with CommitTransaction action that occurs after the first receive will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -190,8 +188,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
|
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
@ -253,8 +250,7 @@ class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first receive will retry and complete successfully`() {
|
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first receive will retry and complete successfully`() {
|
||||||
startDriver {
|
startDriver {
|
||||||
val charlie = createNode(CHARLIE_NAME)
|
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
|
||||||
|
|
||||||
val rules = """
|
val rules = """
|
||||||
RULE Create Counter
|
RULE Create Counter
|
||||||
|
@ -41,7 +41,7 @@ class AddressBindingFailureTests {
|
|||||||
|
|
||||||
assertThatThrownBy {
|
assertThatThrownBy {
|
||||||
driver(DriverParameters(startNodesInProcess = false,
|
driver(DriverParameters(startNodesInProcess = false,
|
||||||
notarySpecs = listOf(NotarySpec(notaryName)),
|
notarySpecs = listOf(NotarySpec(notaryName, startInProcess = false)),
|
||||||
notaryCustomOverrides = mapOf("p2pAddress" to address.toString()),
|
notaryCustomOverrides = mapOf("p2pAddress" to address.toString()),
|
||||||
portAllocation = portAllocation,
|
portAllocation = portAllocation,
|
||||||
cordappsForAllNodes = emptyList())
|
cordappsForAllNodes = emptyList())
|
||||||
|
@ -0,0 +1,99 @@
|
|||||||
|
package net.corda.node.customcheckpointserializer
|
||||||
|
|
||||||
|
import com.nhaarman.mockito_kotlin.doReturn
|
||||||
|
import com.nhaarman.mockito_kotlin.whenever
|
||||||
|
import net.corda.core.crypto.generateKeyPair
|
||||||
|
import net.corda.core.serialization.EncodingWhitelist
|
||||||
|
import net.corda.core.serialization.internal.CheckpointSerializationContext
|
||||||
|
import net.corda.core.serialization.internal.checkpointDeserialize
|
||||||
|
import net.corda.core.serialization.internal.checkpointSerialize
|
||||||
|
import net.corda.coretesting.internal.rigorousMock
|
||||||
|
import net.corda.serialization.internal.AllWhitelist
|
||||||
|
import net.corda.serialization.internal.CheckpointSerializationContextImpl
|
||||||
|
import net.corda.serialization.internal.CordaSerializationEncoding
|
||||||
|
import net.corda.testing.core.internal.CheckpointSerializationEnvironmentRule
|
||||||
|
import org.junit.Assert
|
||||||
|
import org.junit.Rule
|
||||||
|
import org.junit.Test
|
||||||
|
import org.junit.runner.RunWith
|
||||||
|
import org.junit.runners.Parameterized
|
||||||
|
|
||||||
|
@RunWith(Parameterized::class)
|
||||||
|
class CustomCheckpointSerializerTest(private val compression: CordaSerializationEncoding?) {
|
||||||
|
companion object {
|
||||||
|
@Parameterized.Parameters(name = "{0}")
|
||||||
|
@JvmStatic
|
||||||
|
fun compression() = arrayOf<CordaSerializationEncoding?>(null) + CordaSerializationEncoding.values()
|
||||||
|
}
|
||||||
|
|
||||||
|
@get:Rule
|
||||||
|
val serializationRule = CheckpointSerializationEnvironmentRule(inheritable = true)
|
||||||
|
private val context: CheckpointSerializationContext = CheckpointSerializationContextImpl(
|
||||||
|
deserializationClassLoader = javaClass.classLoader,
|
||||||
|
whitelist = AllWhitelist,
|
||||||
|
properties = emptyMap(),
|
||||||
|
objectReferencesEnabled = true,
|
||||||
|
encoding = compression,
|
||||||
|
encodingWhitelist = rigorousMock<EncodingWhitelist>().also {
|
||||||
|
if (compression != null) doReturn(true).whenever(it).acceptEncoding(compression)
|
||||||
|
},
|
||||||
|
checkpointCustomSerializers = listOf(
|
||||||
|
TestCorDapp.TestAbstractClassSerializer(),
|
||||||
|
TestCorDapp.TestClassSerializer(),
|
||||||
|
TestCorDapp.TestInterfaceSerializer(),
|
||||||
|
TestCorDapp.TestFinalClassSerializer(),
|
||||||
|
TestCorDapp.BrokenPublicKeySerializer()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `test custom checkpoint serialization`() {
|
||||||
|
testBrokenMapSerialization(DifficultToSerialize.BrokenMapClass())
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `test custom checkpoint serialization using interface`() {
|
||||||
|
testBrokenMapSerialization(DifficultToSerialize.BrokenMapInterfaceImpl())
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `test custom checkpoint serialization using abstract class`() {
|
||||||
|
testBrokenMapSerialization(DifficultToSerialize.BrokenMapAbstractImpl())
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `test custom checkpoint serialization using final class`() {
|
||||||
|
testBrokenMapSerialization(DifficultToSerialize.BrokenMapFinal())
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `test PublicKey serializer has not been overridden`() {
|
||||||
|
|
||||||
|
val publicKey = generateKeyPair().public
|
||||||
|
|
||||||
|
// Serialize/deserialize
|
||||||
|
val checkpoint = publicKey.checkpointSerialize(context)
|
||||||
|
val deserializedCheckpoint = checkpoint.checkpointDeserialize(context)
|
||||||
|
|
||||||
|
// Check the elements are as expected
|
||||||
|
Assert.assertArrayEquals(publicKey.encoded, deserializedCheckpoint.encoded)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private fun testBrokenMapSerialization(brokenMap : MutableMap<String, String>): MutableMap<String, String> {
|
||||||
|
// Add elements to the map
|
||||||
|
brokenMap.putAll(mapOf("key" to "value"))
|
||||||
|
|
||||||
|
// Serialize/deserialize
|
||||||
|
val checkpoint = brokenMap.checkpointSerialize(context)
|
||||||
|
val deserializedCheckpoint = checkpoint.checkpointDeserialize(context)
|
||||||
|
|
||||||
|
// Check the elements are as expected
|
||||||
|
Assert.assertEquals(1, deserializedCheckpoint.size)
|
||||||
|
Assert.assertEquals("value", deserializedCheckpoint.get("key"))
|
||||||
|
|
||||||
|
// Return map for extra checks
|
||||||
|
return deserializedCheckpoint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,27 @@
|
|||||||
|
package net.corda.node.customcheckpointserializer
|
||||||
|
|
||||||
|
import net.corda.core.flows.FlowException
|
||||||
|
|
||||||
|
class DifficultToSerialize {
|
||||||
|
|
||||||
|
// Broken Map
|
||||||
|
// This map breaks the rules for the put method. Making the normal map serializer fail.
|
||||||
|
|
||||||
|
open class BrokenMapBaseImpl<K,V>(delegate: MutableMap<K, V> = mutableMapOf()) : MutableMap<K,V> by delegate {
|
||||||
|
override fun put(key: K, value: V): V? = throw FlowException("Broken on purpose")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A class to test custom serializers applied to implementations
|
||||||
|
class BrokenMapClass<K,V> : BrokenMapBaseImpl<K, V>()
|
||||||
|
|
||||||
|
// An interface and implementation to test custom serializers applied to interface types
|
||||||
|
interface BrokenMapInterface<K, V> : MutableMap<K, V>
|
||||||
|
class BrokenMapInterfaceImpl<K,V> : BrokenMapBaseImpl<K, V>(), BrokenMapInterface<K, V>
|
||||||
|
|
||||||
|
// An abstract class and implementation to test custom serializers applied to interface types
|
||||||
|
abstract class BrokenMapAbstract<K, V> : BrokenMapBaseImpl<K, V>(), MutableMap<K, V>
|
||||||
|
class BrokenMapAbstractImpl<K,V> : BrokenMapAbstract<K, V>()
|
||||||
|
|
||||||
|
// A final class
|
||||||
|
final class BrokenMapFinal<K, V>: BrokenMapBaseImpl<K, V>()
|
||||||
|
}
|
@ -0,0 +1,59 @@
|
|||||||
|
package net.corda.node.customcheckpointserializer
|
||||||
|
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.InitiatingFlow
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
|
import net.corda.core.messaging.startFlow
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
|
import net.corda.core.utilities.getOrThrow
|
||||||
|
import net.corda.testing.driver.driver
|
||||||
|
import net.corda.testing.driver.logFile
|
||||||
|
import org.assertj.core.api.Assertions
|
||||||
|
import org.junit.Test
|
||||||
|
import java.time.Duration
|
||||||
|
|
||||||
|
class DuplicateSerializerLogTest{
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `check duplicate serialisers are logged`() {
|
||||||
|
driver {
|
||||||
|
val node = startNode(startInSameProcess = false).getOrThrow()
|
||||||
|
node.rpc.startFlow(::TestFlow).returnValue.get()
|
||||||
|
|
||||||
|
val text = node.logFile().readLines().filter { it.startsWith("[WARN") }
|
||||||
|
|
||||||
|
// Initial message is correct
|
||||||
|
Assertions.assertThat(text).anyMatch {it.contains("Duplicate custom checkpoint serializer for type net.corda.node.customcheckpointserializer.DifficultToSerialize\$BrokenMapInterface<java.lang.Object, java.lang.Object>. Serializers: ")}
|
||||||
|
// Message mentions TestInterfaceSerializer
|
||||||
|
Assertions.assertThat(text).anyMatch {it.contains("net.corda.node.customcheckpointserializer.TestCorDapp\$TestInterfaceSerializer")}
|
||||||
|
// Message mentions DuplicateSerializer
|
||||||
|
Assertions.assertThat(text).anyMatch {it.contains("net.corda.node.customcheckpointserializer.DuplicateSerializerLogTest\$DuplicateSerializer")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class TestFlow : FlowLogic<DifficultToSerialize.BrokenMapInterface<String, String>>() {
|
||||||
|
override fun call(): DifficultToSerialize.BrokenMapInterface<String, String> {
|
||||||
|
val brokenMap: DifficultToSerialize.BrokenMapInterface<String, String> = DifficultToSerialize.BrokenMapInterfaceImpl()
|
||||||
|
brokenMap.putAll(mapOf("test" to "input"))
|
||||||
|
|
||||||
|
sleep(Duration.ofSeconds(0))
|
||||||
|
|
||||||
|
return brokenMap
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suppress("unused")
|
||||||
|
class DuplicateSerializer :
|
||||||
|
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapInterface<Any, Any>, HashMap<Any, Any>> {
|
||||||
|
|
||||||
|
override fun toProxy(obj: DifficultToSerialize.BrokenMapInterface<Any, Any>): HashMap<Any, Any> {
|
||||||
|
val proxy = HashMap<Any, Any>()
|
||||||
|
return obj.toMap(proxy)
|
||||||
|
}
|
||||||
|
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapInterface<Any, Any> {
|
||||||
|
return DifficultToSerialize.BrokenMapInterfaceImpl<Any, Any>()
|
||||||
|
.also { it.putAll(proxy) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,58 @@
|
|||||||
|
package net.corda.node.customcheckpointserializer
|
||||||
|
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.InitiatingFlow
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
|
import net.corda.core.messaging.startFlow
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
|
import net.corda.core.serialization.CordaSerializable
|
||||||
|
import net.corda.core.utilities.getOrThrow
|
||||||
|
import net.corda.testing.driver.DriverParameters
|
||||||
|
import net.corda.testing.driver.driver
|
||||||
|
import net.corda.testing.driver.logFile
|
||||||
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
|
import org.assertj.core.api.Assertions
|
||||||
|
import org.junit.Test
|
||||||
|
import java.time.Duration
|
||||||
|
|
||||||
|
class DuplicateSerializerLogWithSameSerializerTest {
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `check duplicate serialisers are logged not logged for the same class`() {
|
||||||
|
|
||||||
|
// Duplicate the cordapp in this node
|
||||||
|
driver(DriverParameters(cordappsForAllNodes = listOf(this.enclosedCordapp(), this.enclosedCordapp()))) {
|
||||||
|
val node = startNode(startInSameProcess = false).getOrThrow()
|
||||||
|
node.rpc.startFlow(::TestFlow).returnValue.get()
|
||||||
|
|
||||||
|
val text = node.logFile().readLines().filter { it.startsWith("[WARN") }
|
||||||
|
|
||||||
|
// Initial message is not logged
|
||||||
|
Assertions.assertThat(text)
|
||||||
|
.anyMatch { !it.contains("Duplicate custom checkpoint serializer for type ") }
|
||||||
|
// Log does not mention DuplicateSerializerThatShouldNotBeLogged
|
||||||
|
Assertions.assertThat(text)
|
||||||
|
.anyMatch { !it.contains("DuplicateSerializerThatShouldNotBeLogged") }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@CordaSerializable
|
||||||
|
class UnusedClass
|
||||||
|
|
||||||
|
@Suppress("unused")
|
||||||
|
class DuplicateSerializerThatShouldNotBeLogged : CheckpointCustomSerializer<UnusedClass, String> {
|
||||||
|
override fun toProxy(obj: UnusedClass): String = ""
|
||||||
|
override fun fromProxy(proxy: String): UnusedClass = UnusedClass()
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class TestFlow : FlowLogic<UnusedClass>() {
|
||||||
|
override fun call(): UnusedClass {
|
||||||
|
val unusedClass = UnusedClass()
|
||||||
|
|
||||||
|
sleep(Duration.ofSeconds(0))
|
||||||
|
|
||||||
|
return unusedClass
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,75 @@
|
|||||||
|
package net.corda.node.customcheckpointserializer
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.testing.node.MockNetwork
|
||||||
|
import net.corda.testing.node.MockNetworkParameters
|
||||||
|
import org.assertj.core.api.Assertions
|
||||||
|
import org.junit.After
|
||||||
|
import org.junit.Before
|
||||||
|
import org.junit.Test
|
||||||
|
|
||||||
|
class MockNetworkCustomCheckpointSerializerTest {
|
||||||
|
private lateinit var mockNetwork: MockNetwork
|
||||||
|
|
||||||
|
@Before
|
||||||
|
fun setup() {
|
||||||
|
mockNetwork = MockNetwork(MockNetworkParameters(cordappsForAllNodes = listOf(TestCorDapp.getCorDapp())))
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
fun shutdown() {
|
||||||
|
mockNetwork.stopNodes()
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow suspend with custom kryo serializer`() {
|
||||||
|
val node = mockNetwork.createPartyNode()
|
||||||
|
val expected = 5
|
||||||
|
val actual = node.startFlow(TestCorDapp.TestFlowWithDifficultToSerializeLocalVariable(5)).get()
|
||||||
|
|
||||||
|
Assertions.assertThat(actual).isEqualTo(expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `check references are restored correctly`() {
|
||||||
|
val node = mockNetwork.createPartyNode()
|
||||||
|
val expectedReference = DifficultToSerialize.BrokenMapClass<String, Int>()
|
||||||
|
expectedReference.putAll(mapOf("one" to 1))
|
||||||
|
val actualReference = node.startFlow(TestCorDapp.TestFlowCheckingReferencesWork(expectedReference)).get()
|
||||||
|
|
||||||
|
Assertions.assertThat(actualReference).isSameAs(expectedReference)
|
||||||
|
Assertions.assertThat(actualReference["one"]).isEqualTo(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
@Suspendable
|
||||||
|
fun `check serialization of interfaces`() {
|
||||||
|
val node = mockNetwork.createPartyNode()
|
||||||
|
val result = node.startFlow(TestCorDapp.TestFlowWithDifficultToSerializeLocalVariableAsInterface(5)).get()
|
||||||
|
Assertions.assertThat(result).isEqualTo(5)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
@Suspendable
|
||||||
|
fun `check serialization of abstract classes`() {
|
||||||
|
val node = mockNetwork.createPartyNode()
|
||||||
|
val result = node.startFlow(TestCorDapp.TestFlowWithDifficultToSerializeLocalVariableAsAbstract(5)).get()
|
||||||
|
Assertions.assertThat(result).isEqualTo(5)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
@Suspendable
|
||||||
|
fun `check serialization of final classes`() {
|
||||||
|
val node = mockNetwork.createPartyNode()
|
||||||
|
val result = node.startFlow(TestCorDapp.TestFlowWithDifficultToSerializeLocalVariableAsFinal(5)).get()
|
||||||
|
Assertions.assertThat(result).isEqualTo(5)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
@Suspendable
|
||||||
|
fun `check PublicKey serializer has not been overridden`() {
|
||||||
|
val node = mockNetwork.createPartyNode()
|
||||||
|
val result = node.startFlow(TestCorDapp.TestFlowCheckingPublicKeySerializer()).get()
|
||||||
|
Assertions.assertThat(result.encoded).isEqualTo(node.info.legalIdentities.first().owningKey.encoded)
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,75 @@
|
|||||||
|
package net.corda.node.customcheckpointserializer
|
||||||
|
|
||||||
|
import com.nhaarman.mockito_kotlin.doReturn
|
||||||
|
import com.nhaarman.mockito_kotlin.whenever
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
|
import net.corda.core.serialization.EncodingWhitelist
|
||||||
|
import net.corda.core.serialization.internal.CheckpointSerializationContext
|
||||||
|
import net.corda.core.serialization.internal.checkpointDeserialize
|
||||||
|
import net.corda.core.serialization.internal.checkpointSerialize
|
||||||
|
import net.corda.coretesting.internal.rigorousMock
|
||||||
|
import net.corda.serialization.internal.AllWhitelist
|
||||||
|
import net.corda.serialization.internal.CheckpointSerializationContextImpl
|
||||||
|
import net.corda.serialization.internal.CordaSerializationEncoding
|
||||||
|
import net.corda.testing.core.internal.CheckpointSerializationEnvironmentRule
|
||||||
|
import org.junit.Assert
|
||||||
|
import org.junit.Rule
|
||||||
|
import org.junit.Test
|
||||||
|
import org.junit.runner.RunWith
|
||||||
|
import org.junit.runners.Parameterized
|
||||||
|
|
||||||
|
@RunWith(Parameterized::class)
|
||||||
|
class ReferenceLoopTest(private val compression: CordaSerializationEncoding?) {
|
||||||
|
companion object {
|
||||||
|
@Parameterized.Parameters(name = "{0}")
|
||||||
|
@JvmStatic
|
||||||
|
fun compression() = arrayOf<CordaSerializationEncoding?>(null) + CordaSerializationEncoding.values()
|
||||||
|
}
|
||||||
|
|
||||||
|
@get:Rule
|
||||||
|
val serializationRule = CheckpointSerializationEnvironmentRule(inheritable = true)
|
||||||
|
private val context: CheckpointSerializationContext = CheckpointSerializationContextImpl(
|
||||||
|
deserializationClassLoader = javaClass.classLoader,
|
||||||
|
whitelist = AllWhitelist,
|
||||||
|
properties = emptyMap(),
|
||||||
|
objectReferencesEnabled = true,
|
||||||
|
encoding = compression,
|
||||||
|
encodingWhitelist = rigorousMock<EncodingWhitelist>()
|
||||||
|
.also {
|
||||||
|
if (compression != null) doReturn(true).whenever(it)
|
||||||
|
.acceptEncoding(compression)
|
||||||
|
},
|
||||||
|
checkpointCustomSerializers = listOf(PersonSerializer()))
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `custom checkpoint serialization with reference loop`() {
|
||||||
|
val person = Person("Test name")
|
||||||
|
|
||||||
|
val result = person.checkpointSerialize(context).checkpointDeserialize(context)
|
||||||
|
|
||||||
|
Assert.assertEquals("Test name", result.name)
|
||||||
|
Assert.assertEquals("Test name", result.bestFriend.name)
|
||||||
|
Assert.assertSame(result, result.bestFriend)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test class that will hold a reference to itself
|
||||||
|
*/
|
||||||
|
class Person(val name: String, bestFriend: Person? = null) {
|
||||||
|
val bestFriend: Person = bestFriend ?: this
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Custom serializer for the Person class
|
||||||
|
*/
|
||||||
|
@Suppress("unused")
|
||||||
|
class PersonSerializer : CheckpointCustomSerializer<Person, Map<String, Any>> {
|
||||||
|
override fun toProxy(obj: Person): Map<String, Any> {
|
||||||
|
return mapOf("name" to obj.name, "bestFriend" to obj.bestFriend)
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun fromProxy(proxy: Map<String, Any>): Person {
|
||||||
|
return Person(proxy["name"] as String, proxy["bestFriend"] as Person?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,214 @@
|
|||||||
|
package net.corda.node.customcheckpointserializer
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.core.flows.FlowException
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
|
import net.corda.testing.node.internal.CustomCordapp
|
||||||
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
|
import net.i2p.crypto.eddsa.EdDSAPublicKey
|
||||||
|
import org.assertj.core.api.Assertions
|
||||||
|
import java.security.PublicKey
|
||||||
|
import java.time.Duration
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Contains all the flows and custom serializers for testing custom checkpoint serializers
|
||||||
|
*/
|
||||||
|
class TestCorDapp {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
fun getCorDapp(): CustomCordapp = enclosedCordapp()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flows
|
||||||
|
@StartableByRPC
|
||||||
|
class TestFlowWithDifficultToSerializeLocalVariableAsAbstract(private val purchase: Int) : FlowLogic<Int>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call(): Int {
|
||||||
|
|
||||||
|
// This object is difficult to serialize with Kryo
|
||||||
|
val difficultToSerialize: DifficultToSerialize.BrokenMapAbstract<String, Int> = DifficultToSerialize.BrokenMapAbstractImpl()
|
||||||
|
difficultToSerialize.putAll(mapOf("foo" to purchase))
|
||||||
|
|
||||||
|
// Force a checkpoint
|
||||||
|
sleep(Duration.ofSeconds(0))
|
||||||
|
|
||||||
|
// Return value from deserialized object
|
||||||
|
return difficultToSerialize["foo"] ?: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
class TestFlowWithDifficultToSerializeLocalVariableAsFinal(private val purchase: Int) : FlowLogic<Int>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call(): Int {
|
||||||
|
|
||||||
|
// This object is difficult to serialize with Kryo
|
||||||
|
val difficultToSerialize: DifficultToSerialize.BrokenMapFinal<String, Int> = DifficultToSerialize.BrokenMapFinal()
|
||||||
|
difficultToSerialize.putAll(mapOf("foo" to purchase))
|
||||||
|
|
||||||
|
// Force a checkpoint
|
||||||
|
sleep(Duration.ofSeconds(0))
|
||||||
|
|
||||||
|
// Return value from deserialized object
|
||||||
|
return difficultToSerialize["foo"] ?: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
class TestFlowWithDifficultToSerializeLocalVariableAsInterface(private val purchase: Int) : FlowLogic<Int>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call(): Int {
|
||||||
|
|
||||||
|
// This object is difficult to serialize with Kryo
|
||||||
|
val difficultToSerialize: DifficultToSerialize.BrokenMapInterface<String, Int> = DifficultToSerialize.BrokenMapInterfaceImpl()
|
||||||
|
difficultToSerialize.putAll(mapOf("foo" to purchase))
|
||||||
|
|
||||||
|
// Force a checkpoint
|
||||||
|
sleep(Duration.ofSeconds(0))
|
||||||
|
|
||||||
|
// Return value from deserialized object
|
||||||
|
return difficultToSerialize["foo"] ?: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
class TestFlowWithDifficultToSerializeLocalVariable(private val purchase: Int) : FlowLogic<Int>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call(): Int {
|
||||||
|
|
||||||
|
// This object is difficult to serialize with Kryo
|
||||||
|
val difficultToSerialize: DifficultToSerialize.BrokenMapClass<String, Int> = DifficultToSerialize.BrokenMapClass()
|
||||||
|
difficultToSerialize.putAll(mapOf("foo" to purchase))
|
||||||
|
|
||||||
|
// Force a checkpoint
|
||||||
|
sleep(Duration.ofSeconds(0))
|
||||||
|
|
||||||
|
// Return value from deserialized object
|
||||||
|
return difficultToSerialize["foo"] ?: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
class TestFlowCheckingReferencesWork(private val reference: DifficultToSerialize.BrokenMapClass<String, Int>) :
|
||||||
|
FlowLogic<DifficultToSerialize.BrokenMapClass<String, Int>>() {
|
||||||
|
|
||||||
|
private val referenceField = reference
|
||||||
|
@Suspendable
|
||||||
|
override fun call(): DifficultToSerialize.BrokenMapClass<String, Int> {
|
||||||
|
|
||||||
|
val ref = referenceField
|
||||||
|
|
||||||
|
// Force a checkpoint
|
||||||
|
sleep(Duration.ofSeconds(0))
|
||||||
|
|
||||||
|
// Check all objects refer to same object
|
||||||
|
Assertions.assertThat(reference).isSameAs(referenceField)
|
||||||
|
Assertions.assertThat(referenceField).isSameAs(ref)
|
||||||
|
|
||||||
|
// Return deserialized object
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
class TestFlowCheckingPublicKeySerializer :
|
||||||
|
FlowLogic<PublicKey>() {
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call(): PublicKey {
|
||||||
|
val ref = ourIdentity.owningKey
|
||||||
|
|
||||||
|
// Force a checkpoint
|
||||||
|
sleep(Duration.ofSeconds(0))
|
||||||
|
|
||||||
|
// Return deserialized object
|
||||||
|
return ref
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom serializers
|
||||||
|
|
||||||
|
@Suppress("unused")
|
||||||
|
class TestInterfaceSerializer :
|
||||||
|
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapInterface<Any, Any>, HashMap<Any, Any>> {
|
||||||
|
|
||||||
|
override fun toProxy(obj: DifficultToSerialize.BrokenMapInterface<Any, Any>): HashMap<Any, Any> {
|
||||||
|
val proxy = HashMap<Any, Any>()
|
||||||
|
return obj.toMap(proxy)
|
||||||
|
}
|
||||||
|
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapInterface<Any, Any> {
|
||||||
|
return DifficultToSerialize.BrokenMapInterfaceImpl<Any, Any>()
|
||||||
|
.also { it.putAll(proxy) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suppress("unused")
|
||||||
|
class TestClassSerializer :
|
||||||
|
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapClass<Any, Any>, HashMap<Any, Any>> {
|
||||||
|
|
||||||
|
override fun toProxy(obj: DifficultToSerialize.BrokenMapClass<Any, Any>): HashMap<Any, Any> {
|
||||||
|
val proxy = HashMap<Any, Any>()
|
||||||
|
return obj.toMap(proxy)
|
||||||
|
}
|
||||||
|
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapClass<Any, Any> {
|
||||||
|
return DifficultToSerialize.BrokenMapClass<Any, Any>()
|
||||||
|
.also { it.putAll(proxy) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suppress("unused")
|
||||||
|
class TestAbstractClassSerializer :
|
||||||
|
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapAbstract<Any, Any>, HashMap<Any, Any>> {
|
||||||
|
|
||||||
|
override fun toProxy(obj: DifficultToSerialize.BrokenMapAbstract<Any, Any>): HashMap<Any, Any> {
|
||||||
|
val proxy = HashMap<Any, Any>()
|
||||||
|
return obj.toMap(proxy)
|
||||||
|
}
|
||||||
|
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapAbstract<Any, Any> {
|
||||||
|
return DifficultToSerialize.BrokenMapAbstractImpl<Any, Any>()
|
||||||
|
.also { it.putAll(proxy) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suppress("unused")
|
||||||
|
class TestFinalClassSerializer :
|
||||||
|
CheckpointCustomSerializer<DifficultToSerialize.BrokenMapFinal<Any, Any>, HashMap<Any, Any>> {
|
||||||
|
|
||||||
|
override fun toProxy(obj: DifficultToSerialize.BrokenMapFinal<Any, Any>): HashMap<Any, Any> {
|
||||||
|
val proxy = HashMap<Any, Any>()
|
||||||
|
return obj.toMap(proxy)
|
||||||
|
}
|
||||||
|
override fun fromProxy(proxy: HashMap<Any, Any>): DifficultToSerialize.BrokenMapFinal<Any, Any> {
|
||||||
|
return DifficultToSerialize.BrokenMapFinal<Any, Any>()
|
||||||
|
.also { it.putAll(proxy) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suppress("unused")
|
||||||
|
class BrokenPublicKeySerializer :
|
||||||
|
CheckpointCustomSerializer<PublicKey, String> {
|
||||||
|
override fun toProxy(obj: PublicKey): String {
|
||||||
|
throw FlowException("Broken on purpose")
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun fromProxy(proxy: String): PublicKey {
|
||||||
|
throw FlowException("Broken on purpose")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suppress("unused")
|
||||||
|
class BrokenEdDSAPublicKeySerializer :
|
||||||
|
CheckpointCustomSerializer<EdDSAPublicKey, String> {
|
||||||
|
override fun toProxy(obj: EdDSAPublicKey): String {
|
||||||
|
throw FlowException("Broken on purpose")
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun fromProxy(proxy: String): EdDSAPublicKey {
|
||||||
|
throw FlowException("Broken on purpose")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -12,6 +12,7 @@ import net.corda.core.flows.ReceiveFinalityFlow
|
|||||||
import net.corda.core.flows.SignTransactionFlow
|
import net.corda.core.flows.SignTransactionFlow
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.AppServiceHub
|
import net.corda.core.node.AppServiceHub
|
||||||
import net.corda.core.node.services.CordaService
|
import net.corda.core.node.services.CordaService
|
||||||
@ -318,8 +319,10 @@ class FlowEntityManagerTest : AbstractFlowEntityManagerTest() {
|
|||||||
StaffedFlowHospital.onFlowDischarged.add { _, _ -> ++counter }
|
StaffedFlowHospital.onFlowDischarged.add { _, _ -> ++counter }
|
||||||
driver(DriverParameters(startNodesInProcess = true)) {
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val txId =
|
val txId =
|
||||||
alice.rpc.startFlow(::EntityManagerWithFlushCatchAndInteractWithOtherPartyFlow, bob.nodeInfo.singleIdentity())
|
alice.rpc.startFlow(::EntityManagerWithFlushCatchAndInteractWithOtherPartyFlow, bob.nodeInfo.singleIdentity())
|
||||||
|
@ -3,6 +3,7 @@ package net.corda.node.flows
|
|||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.unwrap
|
import net.corda.core.utilities.unwrap
|
||||||
@ -65,36 +66,35 @@ class FlowOverrideTests {
|
|||||||
private val nodeAClasses = setOf(Ping::class.java, Pong::class.java, Pongiest::class.java)
|
private val nodeAClasses = setOf(Ping::class.java, Pong::class.java, Pongiest::class.java)
|
||||||
private val nodeBClasses = setOf(Ping::class.java, Pong::class.java)
|
private val nodeBClasses = setOf(Ping::class.java, Pong::class.java)
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `should use the most specific implementation of a responding flow`() {
|
fun `should use the most specific implementation of a responding flow`() {
|
||||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||||
val nodeA = startNode(NodeParameters(
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
providedName = ALICE_NAME,
|
.map {
|
||||||
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray()))
|
NodeParameters(providedName = it,
|
||||||
)).getOrThrow()
|
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())))
|
||||||
val nodeB = startNode(NodeParameters(
|
}
|
||||||
providedName = BOB_NAME,
|
.map { startNode(it) }
|
||||||
additionalCordapps = setOf(cordappForClasses(*nodeBClasses.toTypedArray()))
|
.transpose()
|
||||||
)).getOrThrow()
|
.getOrThrow()
|
||||||
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pongiest.GORGONZOLA))
|
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pongiest.GORGONZOLA))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `should use the overriden implementation of a responding flow`() {
|
fun `should use the overriden implementation of a responding flow`() {
|
||||||
val flowOverrides = mapOf(Ping::class.java to Pong::class.java)
|
val flowOverrides = mapOf(Ping::class.java to Pong::class.java)
|
||||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||||
val nodeA = startNode(NodeParameters(
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
providedName = ALICE_NAME,
|
.map {
|
||||||
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())),
|
NodeParameters(providedName = it,
|
||||||
flowOverrides = flowOverrides
|
flowOverrides = flowOverrides,
|
||||||
)).getOrThrow()
|
additionalCordapps = setOf(cordappForClasses(*nodeAClasses.toTypedArray())))
|
||||||
val nodeB = startNode(NodeParameters(
|
}
|
||||||
providedName = BOB_NAME,
|
.map { startNode(it) }
|
||||||
additionalCordapps = setOf(cordappForClasses(*nodeBClasses.toTypedArray()))
|
.transpose()
|
||||||
)).getOrThrow()
|
.getOrThrow()
|
||||||
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pong.PONG))
|
assertThat(nodeB.rpc.startFlow(::Ping, nodeA.nodeInfo.singleIdentity()).returnValue.getOrThrow(), `is`(Pong.PONG))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -0,0 +1,511 @@
|
|||||||
|
package net.corda.node.flows
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.FlowSession
|
||||||
|
import net.corda.core.flows.HospitalizeFlowException
|
||||||
|
import net.corda.core.flows.InitiatedBy
|
||||||
|
import net.corda.core.flows.InitiatingFlow
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
|
import net.corda.core.flows.StateMachineRunId
|
||||||
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.FlowIORequest
|
||||||
|
import net.corda.core.internal.IdempotentFlow
|
||||||
|
import net.corda.core.internal.TimedFlow
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
|
import net.corda.core.messaging.StateMachineTransactionMapping
|
||||||
|
import net.corda.core.messaging.startFlow
|
||||||
|
import net.corda.core.utilities.OpaqueBytes
|
||||||
|
import net.corda.core.utilities.getOrThrow
|
||||||
|
import net.corda.core.utilities.seconds
|
||||||
|
import net.corda.core.utilities.unwrap
|
||||||
|
import net.corda.finance.DOLLARS
|
||||||
|
import net.corda.finance.flows.CashIssueAndPaymentFlow
|
||||||
|
import net.corda.node.services.config.NodeConfiguration
|
||||||
|
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
||||||
|
import net.corda.node.services.statemachine.FlowTimeoutException
|
||||||
|
import net.corda.node.services.statemachine.StaffedFlowHospital
|
||||||
|
import net.corda.testing.core.ALICE_NAME
|
||||||
|
import net.corda.testing.core.BOB_NAME
|
||||||
|
import net.corda.testing.core.singleIdentity
|
||||||
|
import net.corda.testing.driver.DriverParameters
|
||||||
|
import net.corda.testing.driver.driver
|
||||||
|
import net.corda.testing.node.internal.FINANCE_CORDAPPS
|
||||||
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
|
import org.junit.Test
|
||||||
|
import java.sql.SQLTransientConnectionException
|
||||||
|
import java.util.concurrent.Semaphore
|
||||||
|
import kotlin.test.assertEquals
|
||||||
|
import kotlin.test.assertNull
|
||||||
|
|
||||||
|
class FlowReloadAfterCheckpointTest {
|
||||||
|
|
||||||
|
private companion object {
|
||||||
|
val cordapps = listOf(enclosedCordapp())
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, false)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
handle.returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCounts[flowStartedByAlice])
|
||||||
|
assertEquals(6, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will not reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is false`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to false)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, false)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
handle.returnValue.getOrThrow()
|
||||||
|
assertNull(reloadCounts[flowStartedByAlice])
|
||||||
|
assertNull(reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true and be kept for observation due to failed deserialization`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
lateinit var flowKeptForObservation: StateMachineRunId
|
||||||
|
val lock = Semaphore(0)
|
||||||
|
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { id, _ ->
|
||||||
|
flowKeptForObservation = id
|
||||||
|
lock.release()
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), true, false, false)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
lock.acquire()
|
||||||
|
assertEquals(flowStartedByAlice, flowKeptForObservation)
|
||||||
|
assertEquals(4, reloadCounts[flowStartedByAlice])
|
||||||
|
assertEquals(4, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will reload from a previous checkpoint after calling suspending function and skipping the persisting the current checkpoint when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, true)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
handle.returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCounts[flowStartedByAlice])
|
||||||
|
assertEquals(6, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `idempotent flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::MyIdempotentFlow, false).returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `idempotent flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true but can't throw deserialization error from objects in the call function`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::MyIdempotentFlow, true).returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `timed flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::MyTimedFlow).returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow will correctly retry after an error when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
var timesDischarged = 0
|
||||||
|
StaffedFlowHospital.onFlowDischarged.add { _, _ -> timesDischarged += 1 }
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::TransientConnectionFailureFlow).returnValue.getOrThrow()
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
assertEquals(3, timesDischarged)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `flow continues reloading from checkpoints after node restart when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(
|
||||||
|
DriverParameters(
|
||||||
|
inMemoryDB = false,
|
||||||
|
startNodesInProcess = true,
|
||||||
|
notarySpecs = emptyList(),
|
||||||
|
cordappsForAllNodes = cordapps
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::MyHospitalizingFlow)
|
||||||
|
Thread.sleep(10.seconds.toMillis())
|
||||||
|
|
||||||
|
alice.stop()
|
||||||
|
|
||||||
|
startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
Thread.sleep(20.seconds.toMillis())
|
||||||
|
|
||||||
|
assertEquals(5, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `idempotent flow continues reloading from checkpoints after node restart when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
var reloadCount = 0
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||||
|
driver(
|
||||||
|
DriverParameters(
|
||||||
|
inMemoryDB = false,
|
||||||
|
startNodesInProcess = true,
|
||||||
|
notarySpecs = emptyList(),
|
||||||
|
cordappsForAllNodes = cordapps
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
|
||||||
|
val alice = startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::IdempotentHospitalizingFlow)
|
||||||
|
Thread.sleep(10.seconds.toMillis())
|
||||||
|
|
||||||
|
alice.stop()
|
||||||
|
|
||||||
|
startNode(
|
||||||
|
providedName = ALICE_NAME,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
).getOrThrow()
|
||||||
|
|
||||||
|
Thread.sleep(20.seconds.toMillis())
|
||||||
|
|
||||||
|
// restarts completely from the beginning and forgets the in-memory reload count therefore
|
||||||
|
// it reloads an extra 2 times for checkpoints it had already reloaded before the node shutdown
|
||||||
|
assertEquals(7, reloadCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `more complicated flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true`() {
|
||||||
|
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||||
|
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||||
|
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||||
|
}
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
||||||
|
|
||||||
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map {
|
||||||
|
startNode(
|
||||||
|
providedName = it,
|
||||||
|
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val handle = alice.rpc.startFlow(
|
||||||
|
::CashIssueAndPaymentFlow,
|
||||||
|
500.DOLLARS,
|
||||||
|
OpaqueBytes.of(0x01),
|
||||||
|
bob.nodeInfo.singleIdentity(),
|
||||||
|
false,
|
||||||
|
defaultNotaryIdentity
|
||||||
|
)
|
||||||
|
val flowStartedByAlice = handle.id
|
||||||
|
handle.returnValue.getOrThrow(30.seconds)
|
||||||
|
val flowStartedByBob = bob.rpc.stateMachineRecordedTransactionMappingSnapshot()
|
||||||
|
.map(StateMachineTransactionMapping::stateMachineRunId)
|
||||||
|
.toSet()
|
||||||
|
.single()
|
||||||
|
Thread.sleep(10.seconds.toMillis())
|
||||||
|
assertEquals(7, reloadCounts[flowStartedByAlice])
|
||||||
|
assertEquals(6, reloadCounts[flowStartedByBob])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class ReloadFromCheckpointFlow(
|
||||||
|
private val party: Party,
|
||||||
|
private val shouldHaveDeserializationError: Boolean,
|
||||||
|
private val counterPartyHasDeserializationError: Boolean,
|
||||||
|
private val skipCheckpoints: Boolean
|
||||||
|
) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
val session = initiateFlow(party)
|
||||||
|
session.send(counterPartyHasDeserializationError, skipCheckpoints)
|
||||||
|
session.receive(String::class.java, skipCheckpoints).unwrap { it }
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, skipCheckpoints)
|
||||||
|
val map = if (shouldHaveDeserializationError) {
|
||||||
|
BrokenMap(mutableMapOf("i dont want" to "this to work"))
|
||||||
|
} else {
|
||||||
|
mapOf("i dont want" to "this to work")
|
||||||
|
}
|
||||||
|
logger.info("I need to use my variable to pass the build!: $map")
|
||||||
|
session.sendAndReceive<String>("hey I made it this far")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 5 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 6.
|
||||||
|
* Therefore this flow should reload 6 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@InitiatedBy(ReloadFromCheckpointFlow::class)
|
||||||
|
class ReloadFromCheckpointResponder(private val session: FlowSession) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var flowId: StateMachineRunId? = null
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
flowId = runId
|
||||||
|
val counterPartyHasDeserializationError = session.receive<Boolean>().unwrap { it }
|
||||||
|
session.send("hello there 12312311")
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
val map = if (counterPartyHasDeserializationError) {
|
||||||
|
BrokenMap(mutableMapOf("i dont want" to "this to work"))
|
||||||
|
} else {
|
||||||
|
mapOf("i dont want" to "this to work")
|
||||||
|
}
|
||||||
|
logger.info("I need to use my variable to pass the build!: $map")
|
||||||
|
session.receive<String>().unwrap { it }
|
||||||
|
session.send("sending back a message")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class MyIdempotentFlow(private val shouldHaveDeserializationError: Boolean) : FlowLogic<Unit>(), IdempotentFlow {
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
val map = if (shouldHaveDeserializationError) {
|
||||||
|
BrokenMap(mutableMapOf("i dont want" to "this to work"))
|
||||||
|
} else {
|
||||||
|
mapOf("i dont want" to "this to work")
|
||||||
|
}
|
||||||
|
logger.info("I need to use my variable to pass the build!: $map")
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class MyTimedFlow : FlowLogic<Unit>(), TimedFlow {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var thrown = false
|
||||||
|
}
|
||||||
|
|
||||||
|
override val isTimeoutEnabled: Boolean = true
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
if (!thrown) {
|
||||||
|
thrown = true
|
||||||
|
throw FlowTimeoutException()
|
||||||
|
}
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class TransientConnectionFailureFlow : FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var retryCount = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
if (retryCount < 3) {
|
||||||
|
retryCount += 1
|
||||||
|
throw SQLTransientConnectionException("Connection is not available")
|
||||||
|
|
||||||
|
}
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class MyHospitalizingFlow : FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var thrown = false
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
if (!thrown) {
|
||||||
|
thrown = true
|
||||||
|
throw HospitalizeFlowException("i want to try again")
|
||||||
|
}
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Has 4 suspension points inside the flow and 1 in [FlowStateMachineImpl.run] totaling 5.
|
||||||
|
* Therefore this flow should reload 5 times when completed without errors or restarts.
|
||||||
|
*/
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class IdempotentHospitalizingFlow : FlowLogic<Unit>(), IdempotentFlow {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var thrown = false
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
if (!thrown) {
|
||||||
|
thrown = true
|
||||||
|
throw HospitalizeFlowException("i want to try again")
|
||||||
|
}
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,12 +1,16 @@
|
|||||||
package net.corda.node.flows
|
package net.corda.node.flows
|
||||||
|
|
||||||
import co.paralleluniverse.fibers.Suspendable
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
import net.corda.client.rpc.CordaRPCClient
|
|
||||||
import net.corda.client.rpc.CordaRPCClientConfiguration
|
|
||||||
import net.corda.core.CordaRuntimeException
|
import net.corda.core.CordaRuntimeException
|
||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.FlowExternalAsyncOperation
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.FlowSession
|
||||||
|
import net.corda.core.flows.InitiatedBy
|
||||||
|
import net.corda.core.flows.InitiatingFlow
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.IdempotentFlow
|
import net.corda.core.internal.IdempotentFlow
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.serialization.CordaSerializable
|
import net.corda.core.serialization.CordaSerializable
|
||||||
import net.corda.core.utilities.ProgressTracker
|
import net.corda.core.utilities.ProgressTracker
|
||||||
@ -22,6 +26,7 @@ import net.corda.testing.core.singleIdentity
|
|||||||
import net.corda.testing.driver.DriverParameters
|
import net.corda.testing.driver.DriverParameters
|
||||||
import net.corda.testing.driver.driver
|
import net.corda.testing.driver.driver
|
||||||
import net.corda.testing.node.User
|
import net.corda.testing.node.User
|
||||||
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
||||||
import org.hibernate.exception.ConstraintViolationException
|
import org.hibernate.exception.ConstraintViolationException
|
||||||
import org.junit.After
|
import org.junit.After
|
||||||
@ -32,7 +37,8 @@ import java.sql.SQLException
|
|||||||
import java.sql.SQLTransientConnectionException
|
import java.sql.SQLTransientConnectionException
|
||||||
import java.time.Duration
|
import java.time.Duration
|
||||||
import java.time.temporal.ChronoUnit
|
import java.time.temporal.ChronoUnit
|
||||||
import java.util.*
|
import java.util.Collections
|
||||||
|
import java.util.HashSet
|
||||||
import java.util.concurrent.CompletableFuture
|
import java.util.concurrent.CompletableFuture
|
||||||
import java.util.concurrent.TimeoutException
|
import java.util.concurrent.TimeoutException
|
||||||
import kotlin.test.assertEquals
|
import kotlin.test.assertEquals
|
||||||
@ -40,7 +46,11 @@ import kotlin.test.assertFailsWith
|
|||||||
import kotlin.test.assertNotNull
|
import kotlin.test.assertNotNull
|
||||||
|
|
||||||
class FlowRetryTest {
|
class FlowRetryTest {
|
||||||
val config = CordaRPCClientConfiguration.DEFAULT.copy(connectionRetryIntervalMultiplier = 1.1)
|
|
||||||
|
private companion object {
|
||||||
|
val user = User("mark", "dadada", setOf(Permissions.all()))
|
||||||
|
val cordapps = listOf(enclosedCordapp())
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
fun resetCounters() {
|
fun resetCounters() {
|
||||||
@ -57,146 +67,134 @@ class FlowRetryTest {
|
|||||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
|
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flows continue despite errors`() {
|
fun `flows continue despite errors`() {
|
||||||
val numSessions = 2
|
val numSessions = 2
|
||||||
val numIterations = 10
|
val numIterations = 10
|
||||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<InitiatorFlow>()))
|
val result: Any? = driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList())) {
|
||||||
val result: Any? = driver(DriverParameters(
|
|
||||||
startNodesInProcess = isQuasarAgentSpecified(),
|
|
||||||
notarySpecs = emptyList()
|
|
||||||
)) {
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
|
|
||||||
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
it.proxy.startFlow(::InitiatorFlow, numSessions, numIterations, nodeBHandle.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
}
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
val result = nodeAHandle.rpc.startFlow(
|
||||||
|
::InitiatorFlow,
|
||||||
|
numSessions,
|
||||||
|
numIterations,
|
||||||
|
nodeBHandle.nodeInfo.singleIdentity()
|
||||||
|
).returnValue.getOrThrow()
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
assertNotNull(result)
|
assertNotNull(result)
|
||||||
assertEquals("$numSessions:$numIterations", result)
|
assertEquals("$numSessions:$numIterations", result)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `async operation deduplication id is stable accross retries`() {
|
fun `async operation deduplication id is stable accross retries`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<AsyncRetryFlow>()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
driver(DriverParameters(
|
|
||||||
startNodesInProcess = isQuasarAgentSpecified(),
|
|
||||||
notarySpecs = emptyList()
|
|
||||||
)) {
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||||
|
nodeAHandle.rpc.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
|
||||||
it.proxy.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `flow gives up after number of exceptions, even if this is the first line of the flow`() {
|
fun `flow gives up after number of exceptions, even if this is the first line of the flow`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<RetryFlow>()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
|
||||||
driver(DriverParameters(
|
|
||||||
startNodesInProcess = isQuasarAgentSpecified(),
|
|
||||||
notarySpecs = emptyList()
|
|
||||||
)) {
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
|
|
||||||
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
|
||||||
it.proxy.startFlow(::RetryFlow).returnValue.getOrThrow()
|
|
||||||
}
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
|
||||||
fun `flow that throws in constructor throw for the RPC client that attempted to start them`() {
|
|
||||||
val user = User("mark", "dadada", setOf(Permissions.startFlow<ThrowingFlow>()))
|
|
||||||
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
|
||||||
driver(DriverParameters(
|
|
||||||
startNodesInProcess = isQuasarAgentSpecified(),
|
|
||||||
notarySpecs = emptyList()
|
|
||||||
)) {
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
|
||||||
|
|
||||||
val result = CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
|
||||||
it.proxy.startFlow(::ThrowingFlow).returnValue.getOrThrow()
|
|
||||||
}
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
|
||||||
fun `SQLTransientConnectionExceptions thrown by hikari are retried 3 times and then kept in the checkpoints table`() {
|
|
||||||
val user = User("mark", "dadada", setOf(Permissions.all()))
|
|
||||||
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
|
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
assertFailsWith<CordaRuntimeException> {
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
nodeAHandle.rpc.startFlow(::RetryFlow).returnValue.getOrThrow()
|
||||||
assertFailsWith<TimeoutException> {
|
|
||||||
it.proxy.startFlow(::TransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
|
|
||||||
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
|
|
||||||
}
|
|
||||||
assertEquals(3, TransientConnectionFailureFlow.retryCount)
|
|
||||||
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `Specific exception still detected even if it is nested inside another exception`() {
|
fun `flow that throws in constructor throw for the RPC client that attempted to start them`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.all()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
|
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
assertFailsWith<CordaRuntimeException> {
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
nodeAHandle.rpc.startFlow(::ThrowingFlow).returnValue.getOrThrow()
|
||||||
assertFailsWith<TimeoutException> {
|
|
||||||
it.proxy.startFlow(::WrappedTransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
|
|
||||||
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
|
|
||||||
}
|
|
||||||
assertEquals(3, WrappedTransientConnectionFailureFlow.retryCount)
|
|
||||||
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `General external exceptions are not retried and propagate`() {
|
fun `SQLTransientConnectionExceptions thrown by hikari are retried 3 times and then kept in the checkpoints table`() {
|
||||||
val user = User("mark", "dadada", setOf(Permissions.all()))
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
|
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeBHandle = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
assertFailsWith<TimeoutException> {
|
||||||
assertFailsWith<CordaRuntimeException> {
|
nodeAHandle.rpc.startFlow(::TransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
|
||||||
it.proxy.startFlow(::GeneralExternalFailureFlow, nodeBHandle.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
|
||||||
}
|
|
||||||
assertEquals(0, GeneralExternalFailureFlow.retryCount)
|
|
||||||
assertEquals(1, it.proxy.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.FAILED).returnValue.get())
|
|
||||||
}
|
}
|
||||||
|
assertEquals(3, TransientConnectionFailureFlow.retryCount)
|
||||||
|
assertEquals(
|
||||||
|
1,
|
||||||
|
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get()
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `Permission exceptions are not retried and propagate`() {
|
fun `Specific exception still detected even if it is nested inside another exception`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
assertFailsWith<TimeoutException> {
|
||||||
|
nodeAHandle.rpc.startFlow(::WrappedTransientConnectionFailureFlow, nodeBHandle.nodeInfo.singleIdentity())
|
||||||
|
.returnValue.getOrThrow(Duration.of(10, ChronoUnit.SECONDS))
|
||||||
|
}
|
||||||
|
assertEquals(3, WrappedTransientConnectionFailureFlow.retryCount)
|
||||||
|
assertEquals(
|
||||||
|
1,
|
||||||
|
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.HOSPITALIZED).returnValue.get()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `General external exceptions are not retried and propagate`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
|
.map { startNode(providedName = it, rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
|
assertFailsWith<CordaRuntimeException> {
|
||||||
|
nodeAHandle.rpc.startFlow(
|
||||||
|
::GeneralExternalFailureFlow,
|
||||||
|
nodeBHandle.nodeInfo.singleIdentity()
|
||||||
|
).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
assertEquals(0, GeneralExternalFailureFlow.retryCount)
|
||||||
|
assertEquals(
|
||||||
|
1,
|
||||||
|
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.FAILED).returnValue.get()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 300_000)
|
||||||
|
fun `Permission exceptions are not retried and propagate`() {
|
||||||
val user = User("mark", "dadada", setOf())
|
val user = User("mark", "dadada", setOf())
|
||||||
driver(DriverParameters(isDebug = true, startNodesInProcess = isQuasarAgentSpecified())) {
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||||
|
|
||||||
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val nodeAHandle = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||||
|
|
||||||
CordaRPCClient(nodeAHandle.rpcAddress, config).start(user.username, user.password).use {
|
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
||||||
assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
nodeAHandle.rpc.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
|
||||||
it.proxy.startFlow(::AsyncRetryFlow).returnValue.getOrThrow()
|
}.withMessageStartingWith("User not authorized to perform RPC call")
|
||||||
}.withMessageStartingWith("User not authorized to perform RPC call")
|
// This stays at -1 since the flow never even got called
|
||||||
// This stays at -1 since the flow never even got called
|
assertEquals(-1, GeneralExternalFailureFlow.retryCount)
|
||||||
assertEquals(-1, GeneralExternalFailureFlow.retryCount)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -306,6 +304,10 @@ enum class Step { First, BeforeInitiate, AfterInitiate, AfterInitiateSendReceive
|
|||||||
|
|
||||||
data class Visited(val sessionNum: Int, val iterationNum: Int, val step: Step)
|
data class Visited(val sessionNum: Int, val iterationNum: Int, val step: Step)
|
||||||
|
|
||||||
|
class BrokenMap<K, V>(delegate: MutableMap<K, V> = mutableMapOf()) : MutableMap<K, V> by delegate {
|
||||||
|
override fun put(key: K, value: V): V? = throw IllegalStateException("Broken on purpose")
|
||||||
|
}
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class RetryFlow() : FlowLogic<String>(), IdempotentFlow {
|
class RetryFlow() : FlowLogic<String>(), IdempotentFlow {
|
||||||
companion object {
|
companion object {
|
||||||
@ -333,7 +335,7 @@ class AsyncRetryFlow() : FlowLogic<String>(), IdempotentFlow {
|
|||||||
val deduplicationIds = mutableSetOf<String>()
|
val deduplicationIds = mutableSetOf<String>()
|
||||||
}
|
}
|
||||||
|
|
||||||
class RecordDeduplicationId: FlowExternalAsyncOperation<String> {
|
class RecordDeduplicationId : FlowExternalAsyncOperation<String> {
|
||||||
override fun execute(deduplicationId: String): CompletableFuture<String> {
|
override fun execute(deduplicationId: String): CompletableFuture<String> {
|
||||||
val dedupeIdIsNew = deduplicationIds.add(deduplicationId)
|
val dedupeIdIsNew = deduplicationIds.add(deduplicationId)
|
||||||
if (dedupeIdIsNew) {
|
if (dedupeIdIsNew) {
|
||||||
@ -414,8 +416,9 @@ class WrappedTransientConnectionFailureFlow(private val party: Party) : FlowLogi
|
|||||||
// checkpoint will restart the flow after the send
|
// checkpoint will restart the flow after the send
|
||||||
retryCount += 1
|
retryCount += 1
|
||||||
throw IllegalStateException(
|
throw IllegalStateException(
|
||||||
"wrapped error message",
|
"wrapped error message",
|
||||||
IllegalStateException("another layer deep", SQLTransientConnectionException("Connection is not available")))
|
IllegalStateException("another layer deep", SQLTransientConnectionException("Connection is not available"))
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -456,12 +459,14 @@ class GeneralExternalFailureResponder(private val session: FlowSession) : FlowLo
|
|||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class GetCheckpointNumberOfStatusFlow(private val flowStatus: Checkpoint.FlowStatus) : FlowLogic<Long>() {
|
class GetCheckpointNumberOfStatusFlow(private val flowStatus: Checkpoint.FlowStatus) : FlowLogic<Long>() {
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
override fun call(): Long {
|
override fun call(): Long {
|
||||||
val sqlStatement =
|
val sqlStatement =
|
||||||
"select count(*) " +
|
"select count(*) " +
|
||||||
"from node_checkpoints " +
|
"from node_checkpoints " +
|
||||||
"where status = ${flowStatus.ordinal} " +
|
"where status = ${flowStatus.ordinal} " +
|
||||||
"and flow_id != '${runId.uuid}' " // don't count in the checkpoint of the current flow
|
"and flow_id != '${runId.uuid}' " // don't count in the checkpoint of the current flow
|
||||||
|
|
||||||
return serviceHub.jdbcSession().prepareStatement(sqlStatement).use { ps ->
|
return serviceHub.jdbcSession().prepareStatement(sqlStatement).use { ps ->
|
||||||
ps.executeQuery().use { rs ->
|
ps.executeQuery().use { rs ->
|
||||||
|
@ -0,0 +1,294 @@
|
|||||||
|
package net.corda.node.flows
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.client.rpc.CordaRPCClient
|
||||||
|
import net.corda.core.CordaRuntimeException
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.FlowSession
|
||||||
|
import net.corda.core.flows.InitiatedBy
|
||||||
|
import net.corda.core.flows.InitiatingFlow
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
|
import net.corda.core.flows.UnexpectedFlowEndException
|
||||||
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
|
import net.corda.core.messaging.startFlow
|
||||||
|
import net.corda.core.serialization.CordaSerializable
|
||||||
|
import net.corda.core.utilities.getOrThrow
|
||||||
|
import net.corda.core.utilities.seconds
|
||||||
|
import net.corda.core.utilities.toNonEmptySet
|
||||||
|
import net.corda.core.utilities.unwrap
|
||||||
|
import net.corda.node.services.Permissions
|
||||||
|
import net.corda.node.services.statemachine.transitions.PrematureSessionCloseException
|
||||||
|
import net.corda.testing.core.ALICE_NAME
|
||||||
|
import net.corda.testing.core.BOB_NAME
|
||||||
|
import net.corda.testing.driver.DriverParameters
|
||||||
|
import net.corda.testing.driver.driver
|
||||||
|
import net.corda.testing.node.User
|
||||||
|
import net.corda.testing.node.internal.enclosedCordapp
|
||||||
|
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||||
|
import org.junit.Test
|
||||||
|
import java.sql.SQLTransientConnectionException
|
||||||
|
import kotlin.test.assertEquals
|
||||||
|
|
||||||
|
class FlowSessionCloseTest {
|
||||||
|
|
||||||
|
private val user = User("user", "pwd", setOf(Permissions.all()))
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `flow cannot close uninitialised session`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(
|
||||||
|
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
|
||||||
|
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
|
||||||
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
|
assertThatThrownBy { it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), true, null, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow() }
|
||||||
|
.isInstanceOf(CordaRuntimeException::class.java)
|
||||||
|
.hasMessageContaining(PrematureSessionCloseException::class.java.name)
|
||||||
|
.hasMessageContaining("The following session was closed before it was initialised")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `flow cannot access closed session, unless it's a duplicate close which is handled gracefully`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(
|
||||||
|
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
|
||||||
|
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
|
||||||
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
|
|
||||||
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
|
InitiatorFlow.SessionAPI.values().forEach { sessionAPI ->
|
||||||
|
when (sessionAPI) {
|
||||||
|
InitiatorFlow.SessionAPI.CLOSE -> {
|
||||||
|
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, sessionAPI, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
else -> {
|
||||||
|
assertThatThrownBy { it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, sessionAPI, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow() }
|
||||||
|
.isInstanceOf(UnexpectedFlowEndException::class.java)
|
||||||
|
.hasMessageContaining("Tried to access ended session")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `flow can close initialised session successfully`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(
|
||||||
|
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
|
||||||
|
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
|
||||||
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
|
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, null, InitiatorFlow.ResponderReaction.NORMAL_CLOSE).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `flow can close initialised session successfully even in case of failures and replays`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(
|
||||||
|
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
|
||||||
|
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
|
||||||
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
|
it.proxy.startFlow(::InitiatorFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false, null, InitiatorFlow.ResponderReaction.RETRY_CLOSE_FROM_CHECKPOINT).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `flow can close multiple sessions successfully`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(
|
||||||
|
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
|
||||||
|
startNode(providedName = BOB_NAME, rpcUsers = listOf(user))
|
||||||
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
|
it.proxy.startFlow(::InitiatorMultipleSessionsFlow, nodeBHandle.nodeInfo.legalIdentities.first()).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test ensures that when sessions are closed, the associated resources are eagerly cleaned up.
|
||||||
|
* If sessions are not closed, then the node will crash with an out-of-memory error.
|
||||||
|
* This can be confirmed by commenting out [FlowSession.close] operation in the invoked flow and re-run the test.
|
||||||
|
*/
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `flow looping over sessions can close them to release resources and avoid out-of-memory failures, when the other side does not finish early`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = false, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(
|
||||||
|
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user), maximumHeapSize = "256m"),
|
||||||
|
startNode(providedName = BOB_NAME, rpcUsers = listOf(user), maximumHeapSize = "256m")
|
||||||
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
|
it.proxy.startFlow(::InitiatorLoopingFlow, nodeBHandle.nodeInfo.legalIdentities.first(), true).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `flow looping over sessions will close sessions automatically, when the other side finishes early`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = false, cordappsForAllNodes = listOf(enclosedCordapp()), notarySpecs = emptyList())) {
|
||||||
|
val (nodeAHandle, nodeBHandle) = listOf(
|
||||||
|
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user), maximumHeapSize = "256m"),
|
||||||
|
startNode(providedName = BOB_NAME, rpcUsers = listOf(user), maximumHeapSize = "256m")
|
||||||
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
|
CordaRPCClient(nodeAHandle.rpcAddress).start(user.username, user.password).use {
|
||||||
|
it.proxy.startFlow(::InitiatorLoopingFlow, nodeBHandle.nodeInfo.legalIdentities.first(), false).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@InitiatingFlow
|
||||||
|
@StartableByRPC
|
||||||
|
class InitiatorFlow(val party: Party, private val prematureClose: Boolean = false,
|
||||||
|
private val accessClosedSessionWithApi: SessionAPI? = null,
|
||||||
|
private val responderReaction: ResponderReaction): FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
@CordaSerializable
|
||||||
|
enum class SessionAPI {
|
||||||
|
SEND,
|
||||||
|
SEND_AND_RECEIVE,
|
||||||
|
RECEIVE,
|
||||||
|
GET_FLOW_INFO,
|
||||||
|
CLOSE
|
||||||
|
}
|
||||||
|
|
||||||
|
@CordaSerializable
|
||||||
|
enum class ResponderReaction {
|
||||||
|
NORMAL_CLOSE,
|
||||||
|
RETRY_CLOSE_FROM_CHECKPOINT
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
val session = initiateFlow(party)
|
||||||
|
|
||||||
|
if (prematureClose) {
|
||||||
|
session.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
session.send(responderReaction)
|
||||||
|
sleep(1.seconds)
|
||||||
|
|
||||||
|
if (accessClosedSessionWithApi != null) {
|
||||||
|
when(accessClosedSessionWithApi) {
|
||||||
|
SessionAPI.SEND -> session.send("dummy payload ")
|
||||||
|
SessionAPI.RECEIVE -> session.receive<String>()
|
||||||
|
SessionAPI.SEND_AND_RECEIVE -> session.sendAndReceive<String>("dummy payload")
|
||||||
|
SessionAPI.GET_FLOW_INFO -> session.getCounterpartyFlowInfo()
|
||||||
|
SessionAPI.CLOSE -> session.close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@InitiatedBy(InitiatorFlow::class)
|
||||||
|
class InitiatedFlow(private val otherSideSession: FlowSession): FlowLogic<Unit>() {
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
var thrown = false
|
||||||
|
}
|
||||||
|
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
val responderReaction = otherSideSession.receive<InitiatorFlow.ResponderReaction>()
|
||||||
|
.unwrap{ it }
|
||||||
|
|
||||||
|
when(responderReaction) {
|
||||||
|
InitiatorFlow.ResponderReaction.NORMAL_CLOSE -> {
|
||||||
|
otherSideSession.close()
|
||||||
|
}
|
||||||
|
InitiatorFlow.ResponderReaction.RETRY_CLOSE_FROM_CHECKPOINT -> {
|
||||||
|
otherSideSession.close()
|
||||||
|
|
||||||
|
// failing with a transient exception to force a replay of the close.
|
||||||
|
if (!thrown) {
|
||||||
|
thrown = true
|
||||||
|
throw SQLTransientConnectionException("Connection is not available")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@InitiatingFlow
|
||||||
|
@StartableByRPC
|
||||||
|
class InitiatorLoopingFlow(val party: Party, val blockingCounterparty: Boolean = false): FlowLogic<Unit>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
for (i in 1..1_000) {
|
||||||
|
val session = initiateFlow(party)
|
||||||
|
session.sendAndReceive<String>(blockingCounterparty ).unwrap{ assertEquals("Got it", it) }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the counterparty blocks, we need to eagerly close the session and release resources to avoid running out of memory.
|
||||||
|
* Otherwise, the session end messages from the other side will do that automatically.
|
||||||
|
*/
|
||||||
|
if (blockingCounterparty) {
|
||||||
|
session.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Completed iteration $i")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@InitiatedBy(InitiatorLoopingFlow::class)
|
||||||
|
class InitiatedLoopingFlow(private val otherSideSession: FlowSession): FlowLogic<Unit>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
val shouldBlock = otherSideSession.receive<Boolean>()
|
||||||
|
.unwrap{ it }
|
||||||
|
otherSideSession.send("Got it")
|
||||||
|
|
||||||
|
if (shouldBlock) {
|
||||||
|
otherSideSession.receive<String>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@InitiatingFlow
|
||||||
|
@StartableByRPC
|
||||||
|
class InitiatorMultipleSessionsFlow(val party: Party): FlowLogic<Unit>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
for (round in 1 .. 2) {
|
||||||
|
val sessions = mutableListOf<FlowSession>()
|
||||||
|
for (session_number in 1 .. 5) {
|
||||||
|
val session = initiateFlow(party)
|
||||||
|
sessions.add(session)
|
||||||
|
session.sendAndReceive<String>("What's up?").unwrap{ assertEquals("All good!", it) }
|
||||||
|
}
|
||||||
|
close(sessions.toNonEmptySet())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@InitiatedBy(InitiatorMultipleSessionsFlow::class)
|
||||||
|
class InitiatedMultipleSessionsFlow(private val otherSideSession: FlowSession): FlowLogic<Unit>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call() {
|
||||||
|
otherSideSession.receive<String>()
|
||||||
|
.unwrap{ assertEquals("What's up?", it) }
|
||||||
|
otherSideSession.send("All good!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -14,6 +14,7 @@ import net.corda.core.flows.StateMachineRunId
|
|||||||
import net.corda.core.flows.UnexpectedFlowEndException
|
import net.corda.core.flows.UnexpectedFlowEndException
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.CordaRPCOps
|
import net.corda.core.messaging.CordaRPCOps
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.services.StatesNotAvailableException
|
import net.corda.core.node.services.StatesNotAvailableException
|
||||||
@ -68,9 +69,10 @@ class KillFlowTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `a killed flow will propagate the killed error to counter parties when it reaches the next suspension point`() {
|
fun `a killed flow will propagate the killed error to counter parties when it reaches the next suspension point`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.let { rpc ->
|
alice.rpc.let { rpc ->
|
||||||
val handle = rpc.startFlow(
|
val handle = rpc.startFlow(
|
||||||
::AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriends,
|
::AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriends,
|
||||||
@ -118,8 +120,10 @@ class KillFlowTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `killing a flow suspended in send + receive + sendAndReceive ends the flow immediately`() {
|
fun `killing a flow suspended in send + receive + sendAndReceive ends the flow immediately`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = false)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = false)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val bobParty = bob.nodeInfo.singleIdentity()
|
val bobParty = bob.nodeInfo.singleIdentity()
|
||||||
bob.stop()
|
bob.stop()
|
||||||
val terminated = (bob as OutOfProcess).process.waitFor(30, TimeUnit.SECONDS)
|
val terminated = (bob as OutOfProcess).process.waitFor(30, TimeUnit.SECONDS)
|
||||||
@ -192,9 +196,10 @@ class KillFlowTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `a killed flow will propagate the killed error to counter parties if it was suspended`() {
|
fun `a killed flow will propagate the killed error to counter parties if it was suspended`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
alice.rpc.let { rpc ->
|
alice.rpc.let { rpc ->
|
||||||
val handle = rpc.startFlow(
|
val handle = rpc.startFlow(
|
||||||
::AFlowThatGetsMurderedAndSomehowKillsItsFriends,
|
::AFlowThatGetsMurderedAndSomehowKillsItsFriends,
|
||||||
@ -224,9 +229,10 @@ class KillFlowTest {
|
|||||||
@Test(timeout = 300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `a killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
fun `a killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
||||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||||
val alice = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||||
val bob = startNode(providedName = BOB_NAME).getOrThrow()
|
.map { startNode(providedName = it) }
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME).getOrThrow()
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val handle = alice.rpc.startFlow(
|
val handle = alice.rpc.startFlow(
|
||||||
::AFlowThatGetsMurderedByItsFriend,
|
::AFlowThatGetsMurderedByItsFriend,
|
||||||
listOf(bob.nodeInfo.singleIdentity(), charlie.nodeInfo.singleIdentity())
|
listOf(bob.nodeInfo.singleIdentity(), charlie.nodeInfo.singleIdentity())
|
||||||
|
@ -1,68 +0,0 @@
|
|||||||
package net.corda.node.logging
|
|
||||||
|
|
||||||
import net.corda.core.flows.FlowLogic
|
|
||||||
import net.corda.core.flows.InitiatingFlow
|
|
||||||
import net.corda.core.flows.StartableByRPC
|
|
||||||
import net.corda.core.internal.div
|
|
||||||
import net.corda.core.messaging.FlowHandle
|
|
||||||
import net.corda.core.messaging.startFlow
|
|
||||||
import net.corda.core.utilities.getOrThrow
|
|
||||||
import net.corda.testing.driver.DriverParameters
|
|
||||||
import net.corda.testing.driver.NodeHandle
|
|
||||||
import net.corda.testing.driver.driver
|
|
||||||
import org.assertj.core.api.Assertions.assertThat
|
|
||||||
import org.junit.Test
|
|
||||||
import java.io.File
|
|
||||||
|
|
||||||
class ErrorCodeLoggingTests {
|
|
||||||
@Test(timeout=300_000)
|
|
||||||
fun `log entries with a throwable and ERROR or WARN get an error code appended`() {
|
|
||||||
driver(DriverParameters(notarySpecs = emptyList())) {
|
|
||||||
val node = startNode(startInSameProcess = false).getOrThrow()
|
|
||||||
node.rpc.startFlow(::MyFlow).waitForCompletion()
|
|
||||||
val logFile = node.logFile()
|
|
||||||
|
|
||||||
val linesWithErrorCode = logFile.useLines { lines ->
|
|
||||||
lines.filter { line ->
|
|
||||||
line.contains("[errorCode=")
|
|
||||||
}.filter { line ->
|
|
||||||
line.contains("moreInformationAt=https://errors.corda.net/")
|
|
||||||
}.toList()
|
|
||||||
}
|
|
||||||
|
|
||||||
assertThat(linesWithErrorCode).isNotEmpty
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is used to detect broken logging which can be caused by loggers being initialized
|
|
||||||
// before the initLogging() call is made
|
|
||||||
@Test(timeout=300_000)
|
|
||||||
fun `When logging is set to error level, there are no other levels logged after node startup`() {
|
|
||||||
driver(DriverParameters(notarySpecs = emptyList())) {
|
|
||||||
val node = startNode(startInSameProcess = false, logLevelOverride = "ERROR").getOrThrow()
|
|
||||||
val logFile = node.logFile()
|
|
||||||
val lengthAfterStart = logFile.length()
|
|
||||||
node.rpc.startFlow(::MyFlow).waitForCompletion()
|
|
||||||
// An exception thrown in a flow will log at the "INFO" level.
|
|
||||||
assertThat(logFile.length()).isEqualTo(lengthAfterStart)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@StartableByRPC
|
|
||||||
@InitiatingFlow
|
|
||||||
class MyFlow : FlowLogic<String>() {
|
|
||||||
override fun call(): String {
|
|
||||||
throw IllegalArgumentException("Mwahahahah")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private fun FlowHandle<*>.waitForCompletion() {
|
|
||||||
try {
|
|
||||||
returnValue.getOrThrow()
|
|
||||||
} catch (e: Exception) {
|
|
||||||
// This is expected to throw an exception, using getOrThrow() just to wait until done.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fun NodeHandle.logFile(): File = (baseDirectory / "logs").toFile().walk().filter { it.name.startsWith("node-") && it.extension == "log" }.single()
|
|
@ -7,6 +7,7 @@ import net.corda.core.contracts.Command
|
|||||||
import net.corda.core.contracts.StateAndContract
|
import net.corda.core.contracts.StateAndContract
|
||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.internal.packageName
|
import net.corda.core.internal.packageName
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.transactions.SignedTransaction
|
import net.corda.core.transactions.SignedTransaction
|
||||||
@ -57,8 +58,10 @@ class FlowsDrainingModeContentionTest {
|
|||||||
portAllocation = portAllocation,
|
portAllocation = portAllocation,
|
||||||
extraCordappPackagesToScan = listOf(MessageState::class.packageName)
|
extraCordappPackagesToScan = listOf(MessageState::class.packageName)
|
||||||
)) {
|
)) {
|
||||||
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = users) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
val nodeARpcInfo = RpcInfo(nodeA.rpcAddress, user.username, user.password)
|
val nodeARpcInfo = RpcInfo(nodeA.rpcAddress, user.username, user.password)
|
||||||
val flow = nodeA.rpc.startFlow(::ProposeTransactionAndWaitForCommit, message, nodeARpcInfo, nodeB.nodeInfo.singleIdentity(), defaultNotaryIdentity)
|
val flow = nodeA.rpc.startFlow(::ProposeTransactionAndWaitForCommit, message, nodeARpcInfo, nodeB.nodeInfo.singleIdentity(), defaultNotaryIdentity)
|
||||||
|
@ -4,6 +4,7 @@ import co.paralleluniverse.fibers.Suspendable
|
|||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.concurrent.map
|
import net.corda.core.internal.concurrent.map
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.contextLogger
|
import net.corda.core.utilities.contextLogger
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
@ -53,8 +54,11 @@ class P2PFlowsDrainingModeTest {
|
|||||||
@Test(timeout=300_000)
|
@Test(timeout=300_000)
|
||||||
fun `flows draining mode suspends consumption of initial session messages`() {
|
fun `flows draining mode suspends consumption of initial session messages`() {
|
||||||
driver(DriverParameters(startNodesInProcess = false, portAllocation = portAllocation, notarySpecs = emptyList())) {
|
driver(DriverParameters(startNodesInProcess = false, portAllocation = portAllocation, notarySpecs = emptyList())) {
|
||||||
val initiatedNode = startNode(providedName = ALICE_NAME).getOrThrow()
|
val (initiatedNode, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val initiating = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow().rpc
|
.map { startNode(providedName = it, rpcUsers = users) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
val initiating = bob.rpc
|
||||||
val counterParty = initiatedNode.nodeInfo.singleIdentity()
|
val counterParty = initiatedNode.nodeInfo.singleIdentity()
|
||||||
val initiated = initiatedNode.rpc
|
val initiated = initiatedNode.rpc
|
||||||
|
|
||||||
@ -85,8 +89,10 @@ class P2PFlowsDrainingModeTest {
|
|||||||
|
|
||||||
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
|
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
|
||||||
|
|
||||||
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = users) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
var successful = false
|
var successful = false
|
||||||
val latch = CountDownLatch(1)
|
val latch = CountDownLatch(1)
|
||||||
|
|
||||||
@ -133,8 +139,10 @@ class P2PFlowsDrainingModeTest {
|
|||||||
|
|
||||||
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
|
driver(DriverParameters(portAllocation = portAllocation, notarySpecs = emptyList())) {
|
||||||
|
|
||||||
val nodeA = startNode(providedName = ALICE_NAME, rpcUsers = users).getOrThrow()
|
val (nodeA, nodeB) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val nodeB = startNode(providedName = BOB_NAME, rpcUsers = users).getOrThrow()
|
.map { startNode(providedName = it, rpcUsers = users) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
var successful = false
|
var successful = false
|
||||||
val latch = CountDownLatch(1)
|
val latch = CountDownLatch(1)
|
||||||
|
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
package net.corda.node.services.config
|
package net.corda.node.services.config
|
||||||
|
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.node.logging.logFile
|
|
||||||
import net.corda.testing.driver.DriverParameters
|
import net.corda.testing.driver.DriverParameters
|
||||||
import net.corda.testing.driver.driver
|
import net.corda.testing.driver.driver
|
||||||
import net.corda.testing.driver.internal.incrementalPortAllocation
|
import net.corda.testing.driver.internal.incrementalPortAllocation
|
||||||
|
import net.corda.testing.driver.logFile
|
||||||
import org.junit.Assert.assertTrue
|
import org.junit.Assert.assertTrue
|
||||||
import org.junit.Test
|
import org.junit.Test
|
||||||
|
|
||||||
|
@ -0,0 +1,72 @@
|
|||||||
|
package net.corda.node.services.messaging
|
||||||
|
|
||||||
|
import co.paralleluniverse.fibers.Suspendable
|
||||||
|
import net.corda.core.flows.Destination
|
||||||
|
import net.corda.core.flows.FlowLogic
|
||||||
|
import net.corda.core.flows.FlowSession
|
||||||
|
import net.corda.core.flows.InitiatedBy
|
||||||
|
import net.corda.core.flows.InitiatingFlow
|
||||||
|
import net.corda.core.flows.StartableByRPC
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
|
import net.corda.core.messaging.startFlow
|
||||||
|
import net.corda.core.utilities.getOrThrow
|
||||||
|
import net.corda.core.utilities.unwrap
|
||||||
|
import net.corda.testing.core.ALICE_NAME
|
||||||
|
import net.corda.testing.core.BOB_NAME
|
||||||
|
import net.corda.testing.core.singleIdentity
|
||||||
|
import net.corda.testing.driver.DriverParameters
|
||||||
|
import net.corda.testing.driver.driver
|
||||||
|
import org.junit.Test
|
||||||
|
import kotlin.test.assertEquals
|
||||||
|
|
||||||
|
class MessagingSendAllTest {
|
||||||
|
|
||||||
|
@Test(timeout=300_000)
|
||||||
|
fun `flow can exchange messages with multiple sessions to the same party in parallel`() {
|
||||||
|
driver(DriverParameters(startNodesInProcess = true)) {
|
||||||
|
val (alice, bob) = listOf(
|
||||||
|
startNode(providedName = ALICE_NAME),
|
||||||
|
startNode(providedName = BOB_NAME)
|
||||||
|
).transpose().getOrThrow()
|
||||||
|
|
||||||
|
val bobIdentity = bob.nodeInfo.singleIdentity()
|
||||||
|
val messages = listOf(
|
||||||
|
bobIdentity to "hey bob 1",
|
||||||
|
bobIdentity to "hey bob 2"
|
||||||
|
)
|
||||||
|
|
||||||
|
alice.rpc.startFlow(::SenderFlow, messages).returnValue.getOrThrow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@StartableByRPC
|
||||||
|
@InitiatingFlow
|
||||||
|
class SenderFlow(private val parties: List<Pair<Destination, String>>): FlowLogic<String>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call(): String {
|
||||||
|
val messagesPerSession = parties.toList().map { (party, messageType) ->
|
||||||
|
val session = initiateFlow(party)
|
||||||
|
Pair(session, messageType)
|
||||||
|
}.toMap()
|
||||||
|
|
||||||
|
sendAllMap(messagesPerSession)
|
||||||
|
val messages = receiveAll(String::class.java, messagesPerSession.keys.toList())
|
||||||
|
|
||||||
|
messages.map { it.unwrap { payload -> assertEquals("pong", payload) } }
|
||||||
|
|
||||||
|
return "ok"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@InitiatedBy(SenderFlow::class)
|
||||||
|
class RecipientFlow(private val otherPartySession: FlowSession): FlowLogic<String>() {
|
||||||
|
@Suspendable
|
||||||
|
override fun call(): String {
|
||||||
|
otherPartySession.receive<String>().unwrap { it }
|
||||||
|
otherPartySession.send("pong")
|
||||||
|
|
||||||
|
return "ok"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -5,6 +5,7 @@ import net.corda.core.CordaRuntimeException
|
|||||||
import net.corda.core.flows.*
|
import net.corda.core.flows.*
|
||||||
import net.corda.core.identity.CordaX500Name
|
import net.corda.core.identity.CordaX500Name
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.getOrThrow
|
import net.corda.core.utilities.getOrThrow
|
||||||
import net.corda.core.utilities.unwrap
|
import net.corda.core.utilities.unwrap
|
||||||
@ -58,8 +59,10 @@ class RpcExceptionHandlingTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
||||||
val devModeNode = startNode(params, BOB_NAME).getOrThrow()
|
val (devModeNode, node) = listOf(startNode(params, BOB_NAME),
|
||||||
val node = startNode(ALICE_NAME, devMode = false, parameters = params).getOrThrow()
|
startNode(ALICE_NAME, devMode = false, parameters = params))
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
assertThatThrownExceptionIsReceivedUnwrapped(devModeNode)
|
assertThatThrownExceptionIsReceivedUnwrapped(devModeNode)
|
||||||
assertThatThrownExceptionIsReceivedUnwrapped(node)
|
assertThatThrownExceptionIsReceivedUnwrapped(node)
|
||||||
@ -77,8 +80,10 @@ class RpcExceptionHandlingTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
||||||
val devModeNode = startNode(params, BOB_NAME).getOrThrow()
|
val (devModeNode, node) = listOf(startNode(params, BOB_NAME),
|
||||||
val node = startNode(ALICE_NAME, devMode = false, parameters = params).getOrThrow()
|
startNode(ALICE_NAME, devMode = false, parameters = params))
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
assertThatThrownBy { devModeNode.throwExceptionFromFlow() }.isInstanceOfSatisfying(FlowException::class.java) { exception ->
|
assertThatThrownBy { devModeNode.throwExceptionFromFlow() }.isInstanceOfSatisfying(FlowException::class.java) { exception ->
|
||||||
assertThat(exception).hasNoCause()
|
assertThat(exception).hasNoCause()
|
||||||
@ -102,8 +107,10 @@ class RpcExceptionHandlingTest {
|
|||||||
|
|
||||||
fun DriverDSL.scenario(nameA: CordaX500Name, nameB: CordaX500Name, devMode: Boolean) {
|
fun DriverDSL.scenario(nameA: CordaX500Name, nameB: CordaX500Name, devMode: Boolean) {
|
||||||
|
|
||||||
val nodeA = startNode(nameA, devMode, params).getOrThrow()
|
val (nodeA, nodeB) = listOf(nameA, nameB)
|
||||||
val nodeB = startNode(nameB, devMode, params).getOrThrow()
|
.map { startNode(it, devMode, params) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
|
||||||
nodeA.rpc.startFlow(::InitFlow, nodeB.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
nodeA.rpc.startFlow(::InitFlow, nodeB.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@ import net.corda.core.flows.NotaryException
|
|||||||
import net.corda.core.flows.ReceiveFinalityFlow
|
import net.corda.core.flows.ReceiveFinalityFlow
|
||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.StateMachineUpdate
|
import net.corda.core.messaging.StateMachineUpdate
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.utilities.OpaqueBytes
|
import net.corda.core.utilities.OpaqueBytes
|
||||||
@ -46,14 +47,20 @@ class FlowHospitalTest {
|
|||||||
|
|
||||||
private val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
|
private val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `when double spend occurs, the flow is successfully deleted on the counterparty`() {
|
fun `when double spend occurs, the flow is successfully deleted on the counterparty`() {
|
||||||
driver(DriverParameters(cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts")))) {
|
driver(DriverParameters(cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts")))) {
|
||||||
val charlie = startNode(providedName = CHARLIE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
val (charlieClient, aliceClient) = listOf(CHARLIE_NAME, ALICE_NAME)
|
||||||
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
.map {
|
||||||
|
startNode(providedName = it,
|
||||||
val charlieClient = CordaRPCClient(charlie.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
rpcUsers = listOf(rpcUser))
|
||||||
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
}
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
|
.map {
|
||||||
|
CordaRPCClient(it.rpcAddress)
|
||||||
|
.start(rpcUser.username, rpcUser.password).proxy
|
||||||
|
}
|
||||||
|
|
||||||
val aliceParty = aliceClient.nodeInfo().legalIdentities.first()
|
val aliceParty = aliceClient.nodeInfo().legalIdentities.first()
|
||||||
|
|
||||||
@ -80,7 +87,7 @@ class FlowHospitalTest {
|
|||||||
val secondStateAndRef = charlieClient.startFlow(::IssueFlow, defaultNotaryIdentity).returnValue.get()
|
val secondStateAndRef = charlieClient.startFlow(::IssueFlow, defaultNotaryIdentity).returnValue.get()
|
||||||
charlieClient.startFlow(::SpendFlowWithCustomException, secondStateAndRef, aliceParty).returnValue.get()
|
charlieClient.startFlow(::SpendFlowWithCustomException, secondStateAndRef, aliceParty).returnValue.get()
|
||||||
|
|
||||||
val secondSubscription = aliceClient.stateMachinesFeed().updates.subscribe{
|
val secondSubscription = aliceClient.stateMachinesFeed().updates.subscribe {
|
||||||
if (it is StateMachineUpdate.Removed && it.result.isFailure)
|
if (it is StateMachineUpdate.Removed && it.result.isFailure)
|
||||||
secondLatch.countDown()
|
secondLatch.countDown()
|
||||||
}
|
}
|
||||||
@ -95,75 +102,75 @@ class FlowHospitalTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `HospitalizeFlowException thrown`() {
|
fun `HospitalizeFlowException thrown`() {
|
||||||
var observationCounter: Int = 0
|
var observationCounter: Int = 0
|
||||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||||
++observationCounter
|
++observationCounter
|
||||||
}
|
}
|
||||||
driver(
|
driver(
|
||||||
DriverParameters(
|
DriverParameters(
|
||||||
startNodesInProcess = true,
|
startNodesInProcess = true,
|
||||||
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
|
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
|
||||||
)
|
)
|
||||||
) {
|
) {
|
||||||
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
||||||
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
||||||
assertFailsWith<TimeoutException> {
|
assertFailsWith<TimeoutException> {
|
||||||
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, HospitalizeFlowException::class.java)
|
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, HospitalizeFlowException::class.java)
|
||||||
.returnValue.getOrThrow(5.seconds)
|
.returnValue.getOrThrow(5.seconds)
|
||||||
}
|
}
|
||||||
assertEquals(1, observationCounter)
|
assertEquals(1, observationCounter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `Custom exception wrapping HospitalizeFlowException thrown`() {
|
fun `Custom exception wrapping HospitalizeFlowException thrown`() {
|
||||||
var observationCounter: Int = 0
|
var observationCounter: Int = 0
|
||||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||||
++observationCounter
|
++observationCounter
|
||||||
}
|
}
|
||||||
driver(
|
driver(
|
||||||
DriverParameters(
|
DriverParameters(
|
||||||
startNodesInProcess = true,
|
startNodesInProcess = true,
|
||||||
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
|
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
|
||||||
)
|
)
|
||||||
) {
|
) {
|
||||||
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
||||||
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
||||||
assertFailsWith<TimeoutException> {
|
assertFailsWith<TimeoutException> {
|
||||||
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, WrappingHospitalizeFlowException::class.java)
|
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, WrappingHospitalizeFlowException::class.java)
|
||||||
.returnValue.getOrThrow(5.seconds)
|
.returnValue.getOrThrow(5.seconds)
|
||||||
}
|
}
|
||||||
assertEquals(1, observationCounter)
|
assertEquals(1, observationCounter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `Custom exception extending HospitalizeFlowException thrown`() {
|
fun `Custom exception extending HospitalizeFlowException thrown`() {
|
||||||
var observationCounter: Int = 0
|
var observationCounter: Int = 0
|
||||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||||
++observationCounter
|
++observationCounter
|
||||||
}
|
}
|
||||||
driver(
|
driver(
|
||||||
DriverParameters(
|
DriverParameters(
|
||||||
startNodesInProcess = true,
|
startNodesInProcess = true,
|
||||||
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
|
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
|
||||||
)
|
)
|
||||||
) {
|
) {
|
||||||
// one node will be enough for this testing
|
// one node will be enough for this testing
|
||||||
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
||||||
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
||||||
assertFailsWith<TimeoutException> {
|
assertFailsWith<TimeoutException> {
|
||||||
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, ExtendingHospitalizeFlowException::class.java)
|
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, ExtendingHospitalizeFlowException::class.java)
|
||||||
.returnValue.getOrThrow(5.seconds)
|
.returnValue.getOrThrow(5.seconds)
|
||||||
}
|
}
|
||||||
assertEquals(1, observationCounter)
|
assertEquals(1, observationCounter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout=300_000)
|
@Test(timeout = 300_000)
|
||||||
fun `HospitalizeFlowException cloaking an important exception thrown`() {
|
fun `HospitalizeFlowException cloaking an important exception thrown`() {
|
||||||
var dischargedCounter = 0
|
var dischargedCounter = 0
|
||||||
var observationCounter: Int = 0
|
var observationCounter: Int = 0
|
||||||
StaffedFlowHospital.onFlowDischarged.add { _, _ ->
|
StaffedFlowHospital.onFlowDischarged.add { _, _ ->
|
||||||
@ -173,16 +180,16 @@ class FlowHospitalTest {
|
|||||||
++observationCounter
|
++observationCounter
|
||||||
}
|
}
|
||||||
driver(
|
driver(
|
||||||
DriverParameters(
|
DriverParameters(
|
||||||
startNodesInProcess = true,
|
startNodesInProcess = true,
|
||||||
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
|
cordappsForAllNodes = listOf(enclosedCordapp(), findCordapp("net.corda.testing.contracts"))
|
||||||
)
|
)
|
||||||
) {
|
) {
|
||||||
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(rpcUser)).getOrThrow()
|
||||||
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
val aliceClient = CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
|
||||||
assertFailsWith<TimeoutException> {
|
assertFailsWith<TimeoutException> {
|
||||||
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, CloakingHospitalizeFlowException::class.java)
|
aliceClient.startFlow(::ThrowingHospitalisedExceptionFlow, CloakingHospitalizeFlowException::class.java)
|
||||||
.returnValue.getOrThrow(5.seconds)
|
.returnValue.getOrThrow(5.seconds)
|
||||||
}
|
}
|
||||||
assertEquals(0, observationCounter)
|
assertEquals(0, observationCounter)
|
||||||
// Since the flow will keep getting discharged from hospital dischargedCounter will be > 1.
|
// Since the flow will keep getting discharged from hospital dischargedCounter will be > 1.
|
||||||
@ -191,7 +198,7 @@ class FlowHospitalTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class IssueFlow(val notary: Party): FlowLogic<StateAndRef<SingleOwnerState>>() {
|
class IssueFlow(val notary: Party) : FlowLogic<StateAndRef<SingleOwnerState>>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call(): StateAndRef<SingleOwnerState> {
|
override fun call(): StateAndRef<SingleOwnerState> {
|
||||||
@ -201,12 +208,11 @@ class FlowHospitalTest {
|
|||||||
val notarised = subFlow(FinalityFlow(signedTransaction, emptySet<FlowSession>()))
|
val notarised = subFlow(FinalityFlow(signedTransaction, emptySet<FlowSession>()))
|
||||||
return notarised.coreTransaction.outRef(0)
|
return notarised.coreTransaction.outRef(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
@InitiatingFlow
|
@InitiatingFlow
|
||||||
class SpendFlow(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party): FlowLogic<Unit>() {
|
class SpendFlow(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
@ -216,11 +222,10 @@ class FlowHospitalTest {
|
|||||||
sessionWithCounterParty.sendAndReceive<String>("initial-message")
|
sessionWithCounterParty.sendAndReceive<String>("initial-message")
|
||||||
subFlow(FinalityFlow(signedTransaction, setOf(sessionWithCounterParty)))
|
subFlow(FinalityFlow(signedTransaction, setOf(sessionWithCounterParty)))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@InitiatedBy(SpendFlow::class)
|
@InitiatedBy(SpendFlow::class)
|
||||||
class AcceptSpendFlow(private val otherSide: FlowSession): FlowLogic<Unit>() {
|
class AcceptSpendFlow(private val otherSide: FlowSession) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
@ -229,12 +234,11 @@ class FlowHospitalTest {
|
|||||||
|
|
||||||
subFlow(ReceiveFinalityFlow(otherSide))
|
subFlow(ReceiveFinalityFlow(otherSide))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
@InitiatingFlow
|
@InitiatingFlow
|
||||||
class SpendFlowWithCustomException(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party):
|
class SpendFlowWithCustomException(private val stateAndRef: StateAndRef<SingleOwnerState>, private val newOwner: Party) :
|
||||||
FlowLogic<Unit>() {
|
FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
@ -249,11 +253,10 @@ class FlowHospitalTest {
|
|||||||
throw DoubleSpendException("double spend!", e)
|
throw DoubleSpendException("double spend!", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@InitiatedBy(SpendFlowWithCustomException::class)
|
@InitiatedBy(SpendFlowWithCustomException::class)
|
||||||
class AcceptSpendFlowWithCustomException(private val otherSide: FlowSession): FlowLogic<Unit>() {
|
class AcceptSpendFlowWithCustomException(private val otherSide: FlowSession) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
@ -262,16 +265,15 @@ class FlowHospitalTest {
|
|||||||
|
|
||||||
subFlow(ReceiveFinalityFlow(otherSide))
|
subFlow(ReceiveFinalityFlow(otherSide))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class DoubleSpendException(message: String, cause: Throwable): FlowException(message, cause)
|
class DoubleSpendException(message: String, cause: Throwable) : FlowException(message, cause)
|
||||||
|
|
||||||
@StartableByRPC
|
@StartableByRPC
|
||||||
class ThrowingHospitalisedExceptionFlow(
|
class ThrowingHospitalisedExceptionFlow(
|
||||||
// Starting this Flow from an RPC client: if we pass in an encapsulated exception within another exception then the wrapping
|
// Starting this Flow from an RPC client: if we pass in an encapsulated exception within another exception then the wrapping
|
||||||
// exception, when deserialized, will get grounded into a CordaRuntimeException (this happens in ThrowableSerializer#fromProxy).
|
// exception, when deserialized, will get grounded into a CordaRuntimeException (this happens in ThrowableSerializer#fromProxy).
|
||||||
private val hospitalizeFlowExceptionClass: Class<*>): FlowLogic<Unit>() {
|
private val hospitalizeFlowExceptionClass: Class<*>) : FlowLogic<Unit>() {
|
||||||
|
|
||||||
@Suspendable
|
@Suspendable
|
||||||
override fun call() {
|
override fun call() {
|
||||||
@ -282,7 +284,7 @@ class FlowHospitalTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class WrappingHospitalizeFlowException(cause: HospitalizeFlowException = HospitalizeFlowException()) : Exception(cause)
|
class WrappingHospitalizeFlowException(cause: HospitalizeFlowException = HospitalizeFlowException()) : Exception(cause)
|
||||||
|
|
||||||
class ExtendingHospitalizeFlowException : HospitalizeFlowException()
|
class ExtendingHospitalizeFlowException : HospitalizeFlowException()
|
||||||
|
|
||||||
@ -294,5 +296,4 @@ class FlowHospitalTest {
|
|||||||
setCause(SQLException("deadlock"))
|
setCause(SQLException("deadlock"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -16,6 +16,7 @@ import net.corda.core.flows.FlowLogic
|
|||||||
import net.corda.core.flows.StartableByRPC
|
import net.corda.core.flows.StartableByRPC
|
||||||
import net.corda.core.identity.Party
|
import net.corda.core.identity.Party
|
||||||
import net.corda.core.internal.concurrent.openFuture
|
import net.corda.core.internal.concurrent.openFuture
|
||||||
|
import net.corda.core.internal.concurrent.transpose
|
||||||
import net.corda.core.messaging.startFlow
|
import net.corda.core.messaging.startFlow
|
||||||
import net.corda.core.node.services.Vault
|
import net.corda.core.node.services.Vault
|
||||||
import net.corda.core.node.services.vault.QueryCriteria
|
import net.corda.core.node.services.vault.QueryCriteria
|
||||||
@ -24,7 +25,7 @@ import net.corda.core.utilities.getOrThrow
|
|||||||
import net.corda.core.utilities.seconds
|
import net.corda.core.utilities.seconds
|
||||||
import net.corda.node.services.Permissions
|
import net.corda.node.services.Permissions
|
||||||
import net.corda.node.services.statemachine.StaffedFlowHospital
|
import net.corda.node.services.statemachine.StaffedFlowHospital
|
||||||
import net.corda.node.services.transactions.PersistentUniquenessProvider
|
import net.corda.notary.jpa.JPAUniquenessProvider
|
||||||
import net.corda.testing.core.ALICE_NAME
|
import net.corda.testing.core.ALICE_NAME
|
||||||
import net.corda.testing.core.BOB_NAME
|
import net.corda.testing.core.BOB_NAME
|
||||||
import net.corda.testing.core.singleIdentity
|
import net.corda.testing.core.singleIdentity
|
||||||
@ -450,8 +451,11 @@ class VaultObserverExceptionTest {
|
|||||||
findCordapp("com.r3.dbfailure.schemas")
|
findCordapp("com.r3.dbfailure.schemas")
|
||||||
),inMemoryDB = false)
|
),inMemoryDB = false)
|
||||||
) {
|
) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it,
|
||||||
|
rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
||||||
|
|
||||||
val startErrorInObservableWhenConsumingState = {
|
val startErrorInObservableWhenConsumingState = {
|
||||||
@ -540,8 +544,11 @@ class VaultObserverExceptionTest {
|
|||||||
),
|
),
|
||||||
inMemoryDB = false)
|
inMemoryDB = false)
|
||||||
) {
|
) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it,
|
||||||
|
rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
||||||
|
|
||||||
val startErrorInObservableWhenConsumingState = {
|
val startErrorInObservableWhenConsumingState = {
|
||||||
@ -622,8 +629,11 @@ class VaultObserverExceptionTest {
|
|||||||
),
|
),
|
||||||
inMemoryDB = false)
|
inMemoryDB = false)
|
||||||
) {
|
) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it,
|
||||||
|
rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
||||||
|
|
||||||
val startErrorInObservableWhenCreatingSecondState = {
|
val startErrorInObservableWhenCreatingSecondState = {
|
||||||
@ -699,8 +709,11 @@ class VaultObserverExceptionTest {
|
|||||||
),
|
),
|
||||||
inMemoryDB = false)
|
inMemoryDB = false)
|
||||||
) {
|
) {
|
||||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||||
val bobNode = startNode(providedName = BOB_NAME, rpcUsers = listOf(user)).getOrThrow()
|
.map { startNode(providedName = it,
|
||||||
|
rpcUsers = listOf(user)) }
|
||||||
|
.transpose()
|
||||||
|
.getOrThrow()
|
||||||
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
val notary = defaultNotaryHandle.nodeHandles.getOrThrow().first()
|
||||||
|
|
||||||
val startErrorInObservableWhenConsumingState = {
|
val startErrorInObservableWhenConsumingState = {
|
||||||
@ -843,8 +856,8 @@ class VaultObserverExceptionTest {
|
|||||||
override fun call(): List<String> {
|
override fun call(): List<String> {
|
||||||
return serviceHub.withEntityManager {
|
return serviceHub.withEntityManager {
|
||||||
val criteriaQuery = this.criteriaBuilder.createQuery(String::class.java)
|
val criteriaQuery = this.criteriaBuilder.createQuery(String::class.java)
|
||||||
val root = criteriaQuery.from(PersistentUniquenessProvider.CommittedTransaction::class.java)
|
val root = criteriaQuery.from(JPAUniquenessProvider.CommittedTransaction::class.java)
|
||||||
criteriaQuery.select(root.get<String>(PersistentUniquenessProvider.CommittedTransaction::transactionId.name))
|
criteriaQuery.select(root.get(JPAUniquenessProvider.CommittedTransaction::transactionId.name))
|
||||||
val query = this.createQuery(criteriaQuery)
|
val query = this.createQuery(criteriaQuery)
|
||||||
query.resultList
|
query.resultList
|
||||||
}
|
}
|
||||||
|
@ -59,6 +59,8 @@ import net.corda.core.schemas.MappedSchema
|
|||||||
import net.corda.core.serialization.SerializationWhitelist
|
import net.corda.core.serialization.SerializationWhitelist
|
||||||
import net.corda.core.serialization.SerializeAsToken
|
import net.corda.core.serialization.SerializeAsToken
|
||||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
|
||||||
import net.corda.core.toFuture
|
import net.corda.core.toFuture
|
||||||
import net.corda.core.transactions.LedgerTransaction
|
import net.corda.core.transactions.LedgerTransaction
|
||||||
import net.corda.core.utilities.NetworkHostAndPort
|
import net.corda.core.utilities.NetworkHostAndPort
|
||||||
@ -131,7 +133,6 @@ import net.corda.node.services.statemachine.StateMachineManager
|
|||||||
import net.corda.node.services.transactions.BasicVerifierFactoryService
|
import net.corda.node.services.transactions.BasicVerifierFactoryService
|
||||||
import net.corda.node.services.transactions.DeterministicVerifierFactoryService
|
import net.corda.node.services.transactions.DeterministicVerifierFactoryService
|
||||||
import net.corda.node.services.transactions.InMemoryTransactionVerifierService
|
import net.corda.node.services.transactions.InMemoryTransactionVerifierService
|
||||||
import net.corda.node.services.transactions.SimpleNotaryService
|
|
||||||
import net.corda.node.services.transactions.VerifierFactoryService
|
import net.corda.node.services.transactions.VerifierFactoryService
|
||||||
import net.corda.node.services.upgrade.ContractUpgradeServiceImpl
|
import net.corda.node.services.upgrade.ContractUpgradeServiceImpl
|
||||||
import net.corda.node.services.vault.NodeVaultService
|
import net.corda.node.services.vault.NodeVaultService
|
||||||
@ -317,6 +318,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
|||||||
} else {
|
} else {
|
||||||
BasicVerifierFactoryService()
|
BasicVerifierFactoryService()
|
||||||
}
|
}
|
||||||
|
private val attachmentsClassLoaderCache: AttachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(cacheFactory).tokenize()
|
||||||
val contractUpgradeService = ContractUpgradeServiceImpl(cacheFactory).tokenize()
|
val contractUpgradeService = ContractUpgradeServiceImpl(cacheFactory).tokenize()
|
||||||
val auditService = DummyAuditService().tokenize()
|
val auditService = DummyAuditService().tokenize()
|
||||||
@Suppress("LeakingThis")
|
@Suppress("LeakingThis")
|
||||||
@ -611,11 +613,22 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
|||||||
|
|
||||||
val myNotaryIdentity = configuration.notary?.let {
|
val myNotaryIdentity = configuration.notary?.let {
|
||||||
if (it.serviceLegalName != null) {
|
if (it.serviceLegalName != null) {
|
||||||
val (notaryIdentity, notaryIdentityKeyPair) = loadNotaryClusterIdentity(it.serviceLegalName)
|
val (notaryIdentity, notaryIdentityKeyPair) = loadNotaryServiceIdentity(it.serviceLegalName)
|
||||||
keyPairs += notaryIdentityKeyPair
|
keyPairs += notaryIdentityKeyPair
|
||||||
notaryIdentity
|
notaryIdentity
|
||||||
} else {
|
} else {
|
||||||
// In case of a single notary service myNotaryIdentity will be the node's single identity.
|
// The only case where the myNotaryIdentity will be the node's legal identity is for existing single notary services running
|
||||||
|
// an older version. Current single notary services (V4.6+) sign requests using a separate notary service identity so the
|
||||||
|
// notary identity will be different from the node's legal identity.
|
||||||
|
|
||||||
|
// This check is here to ensure that a user does not accidentally/intentionally remove the serviceLegalName configuration
|
||||||
|
// parameter after a notary has been registered. If that was possible then notary would start and sign incoming requests
|
||||||
|
// with the node's legal identity key, corrupting the data.
|
||||||
|
check (!cryptoService.containsKey(DISTRIBUTED_NOTARY_KEY_ALIAS)) {
|
||||||
|
"The notary service key exists in the key store but no notary service legal name has been configured. " +
|
||||||
|
"Either include the relevant 'notary.serviceLegalName' configuration or validate this key is not necessary " +
|
||||||
|
"and remove from the key store."
|
||||||
|
}
|
||||||
identity
|
identity
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -778,10 +791,6 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun isRunningSimpleNotaryService(configuration: NodeConfiguration): Boolean {
|
|
||||||
return configuration.notary != null && configuration.notary?.className == SimpleNotaryService::class.java.name
|
|
||||||
}
|
|
||||||
|
|
||||||
private class ServiceInstantiationException(cause: Throwable?) : CordaException("Service Instantiation Error", cause)
|
private class ServiceInstantiationException(cause: Throwable?) : CordaException("Service Instantiation Error", cause)
|
||||||
|
|
||||||
private fun installCordaServices() {
|
private fun installCordaServices() {
|
||||||
@ -1054,8 +1063,12 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Loads pre-generated notary service cluster identity. */
|
/**
|
||||||
private fun loadNotaryClusterIdentity(serviceLegalName: CordaX500Name): Pair<PartyAndCertificate, KeyPair> {
|
* Loads notary service identity. In the case of the experimental RAFT and BFT notary clusters, this loads the pre-generated
|
||||||
|
* cluster identity that all worker nodes share. In the case of a simple single notary, this loads the notary service identity
|
||||||
|
* that is generated during initial registration and is used to sign notarisation requests.
|
||||||
|
* */
|
||||||
|
private fun loadNotaryServiceIdentity(serviceLegalName: CordaX500Name): Pair<PartyAndCertificate, KeyPair> {
|
||||||
val privateKeyAlias = "$DISTRIBUTED_NOTARY_KEY_ALIAS"
|
val privateKeyAlias = "$DISTRIBUTED_NOTARY_KEY_ALIAS"
|
||||||
val compositeKeyAlias = "$DISTRIBUTED_NOTARY_COMPOSITE_KEY_ALIAS"
|
val compositeKeyAlias = "$DISTRIBUTED_NOTARY_COMPOSITE_KEY_ALIAS"
|
||||||
|
|
||||||
@ -1171,6 +1184,8 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
|||||||
private lateinit var _myInfo: NodeInfo
|
private lateinit var _myInfo: NodeInfo
|
||||||
override val myInfo: NodeInfo get() = _myInfo
|
override val myInfo: NodeInfo get() = _myInfo
|
||||||
|
|
||||||
|
override val attachmentsClassLoaderCache: AttachmentsClassLoaderCache get() = this@AbstractNode.attachmentsClassLoaderCache
|
||||||
|
|
||||||
private lateinit var _networkParameters: NetworkParameters
|
private lateinit var _networkParameters: NetworkParameters
|
||||||
override val networkParameters: NetworkParameters get() = _networkParameters
|
override val networkParameters: NetworkParameters get() = _networkParameters
|
||||||
|
|
||||||
|
@ -86,6 +86,7 @@ class NetworkParametersReader(private val trustRoot: X509Certificate,
|
|||||||
logger.info("No network-parameters file found. Expecting network parameters to be available from the network map.")
|
logger.info("No network-parameters file found. Expecting network parameters to be available from the network map.")
|
||||||
networkMapClient ?: throw Error.NetworkMapNotConfigured()
|
networkMapClient ?: throw Error.NetworkMapNotConfigured()
|
||||||
val signedParams = networkMapClient.getNetworkParameters(parametersHash)
|
val signedParams = networkMapClient.getNetworkParameters(parametersHash)
|
||||||
|
signedParams.verifiedNetworkParametersCert(trustRoot)
|
||||||
signedParams.serialize().open().copyTo(baseDirectory / NETWORK_PARAMS_FILE_NAME)
|
signedParams.serialize().open().copyTo(baseDirectory / NETWORK_PARAMS_FILE_NAME)
|
||||||
return signedParams
|
return signedParams
|
||||||
}
|
}
|
||||||
|
@ -644,8 +644,8 @@ open class Node(configuration: NodeConfiguration,
|
|||||||
storageContext = AMQP_STORAGE_CONTEXT.withClassLoader(classloader),
|
storageContext = AMQP_STORAGE_CONTEXT.withClassLoader(classloader),
|
||||||
|
|
||||||
checkpointSerializer = KryoCheckpointSerializer,
|
checkpointSerializer = KryoCheckpointSerializer,
|
||||||
checkpointContext = KRYO_CHECKPOINT_CONTEXT.withClassLoader(classloader)
|
checkpointContext = KRYO_CHECKPOINT_CONTEXT.withClassLoader(classloader).withCheckpointCustomSerializers(cordappLoader.cordapps.flatMap { it.checkpointCustomSerializers })
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Starts a blocking event loop for message dispatch. */
|
/** Starts a blocking event loop for message dispatch. */
|
||||||
|
@ -18,6 +18,7 @@ import net.corda.core.internal.notary.NotaryService
|
|||||||
import net.corda.core.internal.notary.SinglePartyNotaryService
|
import net.corda.core.internal.notary.SinglePartyNotaryService
|
||||||
import net.corda.core.node.services.CordaService
|
import net.corda.core.node.services.CordaService
|
||||||
import net.corda.core.schemas.MappedSchema
|
import net.corda.core.schemas.MappedSchema
|
||||||
|
import net.corda.core.serialization.CheckpointCustomSerializer
|
||||||
import net.corda.core.serialization.SerializationCustomSerializer
|
import net.corda.core.serialization.SerializationCustomSerializer
|
||||||
import net.corda.core.serialization.SerializationWhitelist
|
import net.corda.core.serialization.SerializationWhitelist
|
||||||
import net.corda.core.serialization.SerializeAsToken
|
import net.corda.core.serialization.SerializeAsToken
|
||||||
@ -185,6 +186,7 @@ class JarScanningCordappLoader private constructor(private val cordappJarPaths:
|
|||||||
findServices(this),
|
findServices(this),
|
||||||
findWhitelists(url),
|
findWhitelists(url),
|
||||||
findSerializers(this),
|
findSerializers(this),
|
||||||
|
findCheckpointSerializers(this),
|
||||||
findCustomSchemas(this),
|
findCustomSchemas(this),
|
||||||
findAllFlows(this),
|
findAllFlows(this),
|
||||||
url.url,
|
url.url,
|
||||||
@ -334,6 +336,10 @@ class JarScanningCordappLoader private constructor(private val cordappJarPaths:
|
|||||||
return scanResult.getClassesImplementingWithClassVersionCheck(SerializationCustomSerializer::class)
|
return scanResult.getClassesImplementingWithClassVersionCheck(SerializationCustomSerializer::class)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private fun findCheckpointSerializers(scanResult: RestrictedScanResult): List<CheckpointCustomSerializer<*, *>> {
|
||||||
|
return scanResult.getClassesImplementingWithClassVersionCheck(CheckpointCustomSerializer::class)
|
||||||
|
}
|
||||||
|
|
||||||
private fun findCustomSchemas(scanResult: RestrictedScanResult): Set<MappedSchema> {
|
private fun findCustomSchemas(scanResult: RestrictedScanResult): Set<MappedSchema> {
|
||||||
return scanResult.getClassesWithSuperclass(MappedSchema::class).instances().toSet()
|
return scanResult.getClassesWithSuperclass(MappedSchema::class).instances().toSet()
|
||||||
}
|
}
|
||||||
|
@ -6,12 +6,12 @@ import net.corda.core.flows.ContractUpgradeFlow
|
|||||||
import net.corda.core.internal.cordapp.CordappImpl
|
import net.corda.core.internal.cordapp.CordappImpl
|
||||||
import net.corda.core.internal.location
|
import net.corda.core.internal.location
|
||||||
import net.corda.node.VersionInfo
|
import net.corda.node.VersionInfo
|
||||||
import net.corda.node.services.transactions.NodeNotarySchemaV1
|
|
||||||
import net.corda.node.services.transactions.SimpleNotaryService
|
|
||||||
import net.corda.notary.experimental.bftsmart.BFTSmartNotarySchemaV1
|
import net.corda.notary.experimental.bftsmart.BFTSmartNotarySchemaV1
|
||||||
import net.corda.notary.experimental.bftsmart.BFTSmartNotaryService
|
import net.corda.notary.experimental.bftsmart.BFTSmartNotaryService
|
||||||
import net.corda.notary.experimental.raft.RaftNotarySchemaV1
|
import net.corda.notary.experimental.raft.RaftNotarySchemaV1
|
||||||
import net.corda.notary.experimental.raft.RaftNotaryService
|
import net.corda.notary.experimental.raft.RaftNotaryService
|
||||||
|
import net.corda.notary.jpa.JPANotarySchemaV1
|
||||||
|
import net.corda.notary.jpa.JPANotaryService
|
||||||
|
|
||||||
internal object VirtualCordapp {
|
internal object VirtualCordapp {
|
||||||
/** A list of the core RPC flows present in Corda */
|
/** A list of the core RPC flows present in Corda */
|
||||||
@ -32,6 +32,7 @@ internal object VirtualCordapp {
|
|||||||
services = listOf(),
|
services = listOf(),
|
||||||
serializationWhitelists = listOf(),
|
serializationWhitelists = listOf(),
|
||||||
serializationCustomSerializers = listOf(),
|
serializationCustomSerializers = listOf(),
|
||||||
|
checkpointCustomSerializers = listOf(),
|
||||||
customSchemas = setOf(),
|
customSchemas = setOf(),
|
||||||
info = Cordapp.Info.Default("corda-core", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
info = Cordapp.Info.Default("corda-core", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
||||||
allFlows = listOf(),
|
allFlows = listOf(),
|
||||||
@ -45,7 +46,7 @@ internal object VirtualCordapp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** A Cordapp for the built-in notary service implementation. */
|
/** A Cordapp for the built-in notary service implementation. */
|
||||||
fun generateSimpleNotary(versionInfo: VersionInfo): CordappImpl {
|
fun generateJPANotary(versionInfo: VersionInfo): CordappImpl {
|
||||||
return CordappImpl(
|
return CordappImpl(
|
||||||
contractClassNames = listOf(),
|
contractClassNames = listOf(),
|
||||||
initiatedFlows = listOf(),
|
initiatedFlows = listOf(),
|
||||||
@ -55,15 +56,17 @@ internal object VirtualCordapp {
|
|||||||
services = listOf(),
|
services = listOf(),
|
||||||
serializationWhitelists = listOf(),
|
serializationWhitelists = listOf(),
|
||||||
serializationCustomSerializers = listOf(),
|
serializationCustomSerializers = listOf(),
|
||||||
customSchemas = setOf(NodeNotarySchemaV1),
|
checkpointCustomSerializers = listOf(),
|
||||||
|
customSchemas = setOf(JPANotarySchemaV1),
|
||||||
info = Cordapp.Info.Default("corda-notary", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
info = Cordapp.Info.Default("corda-notary", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
||||||
allFlows = listOf(),
|
allFlows = listOf(),
|
||||||
jarPath = SimpleNotaryService::class.java.location,
|
jarPath = JPANotaryService::class.java.location,
|
||||||
jarHash = SecureHash.allOnesHash,
|
jarHash = SecureHash.allOnesHash,
|
||||||
minimumPlatformVersion = versionInfo.platformVersion,
|
minimumPlatformVersion = versionInfo.platformVersion,
|
||||||
targetPlatformVersion = versionInfo.platformVersion,
|
targetPlatformVersion = versionInfo.platformVersion,
|
||||||
notaryService = SimpleNotaryService::class.java,
|
notaryService = JPANotaryService::class.java,
|
||||||
isLoaded = false
|
isLoaded = false,
|
||||||
|
isVirtual = true
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,6 +81,7 @@ internal object VirtualCordapp {
|
|||||||
services = listOf(),
|
services = listOf(),
|
||||||
serializationWhitelists = listOf(),
|
serializationWhitelists = listOf(),
|
||||||
serializationCustomSerializers = listOf(),
|
serializationCustomSerializers = listOf(),
|
||||||
|
checkpointCustomSerializers = listOf(),
|
||||||
customSchemas = setOf(RaftNotarySchemaV1),
|
customSchemas = setOf(RaftNotarySchemaV1),
|
||||||
info = Cordapp.Info.Default("corda-notary-raft", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
info = Cordapp.Info.Default("corda-notary-raft", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
||||||
allFlows = listOf(),
|
allFlows = listOf(),
|
||||||
@ -101,6 +105,7 @@ internal object VirtualCordapp {
|
|||||||
services = listOf(),
|
services = listOf(),
|
||||||
serializationWhitelists = listOf(),
|
serializationWhitelists = listOf(),
|
||||||
serializationCustomSerializers = listOf(),
|
serializationCustomSerializers = listOf(),
|
||||||
|
checkpointCustomSerializers = listOf(),
|
||||||
customSchemas = setOf(BFTSmartNotarySchemaV1),
|
customSchemas = setOf(BFTSmartNotarySchemaV1),
|
||||||
info = Cordapp.Info.Default("corda-notary-bft-smart", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
info = Cordapp.Info.Default("corda-notary-bft-smart", versionInfo.vendor, versionInfo.releaseVersion, "Open Source (Apache 2)"),
|
||||||
allFlows = listOf(),
|
allFlows = listOf(),
|
||||||
|
@ -37,6 +37,7 @@ class MigrationNamedCacheFactory(private val metricRegistry: MetricRegistry?,
|
|||||||
"NodeAttachmentService_contractAttachmentVersions" -> caffeine.maximumSize(defaultCacheSize)
|
"NodeAttachmentService_contractAttachmentVersions" -> caffeine.maximumSize(defaultCacheSize)
|
||||||
"NodeParametersStorage_networkParametersByHash" -> caffeine.maximumSize(defaultCacheSize)
|
"NodeParametersStorage_networkParametersByHash" -> caffeine.maximumSize(defaultCacheSize)
|
||||||
"NodeAttachmentTrustCalculator_trustedKeysCache" -> caffeine.maximumSize(defaultCacheSize)
|
"NodeAttachmentTrustCalculator_trustedKeysCache" -> caffeine.maximumSize(defaultCacheSize)
|
||||||
|
"AttachmentsClassLoader_cache" -> caffeine.maximumSize(defaultCacheSize)
|
||||||
else -> throw IllegalArgumentException("Unexpected cache name $name.")
|
else -> throw IllegalArgumentException("Unexpected cache name $name.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,8 @@ import net.corda.core.node.services.NetworkParametersService
|
|||||||
import net.corda.core.node.services.TransactionStorage
|
import net.corda.core.node.services.TransactionStorage
|
||||||
import net.corda.core.serialization.deserialize
|
import net.corda.core.serialization.deserialize
|
||||||
import net.corda.core.serialization.internal.AttachmentsClassLoaderBuilder
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderBuilder
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
|
||||||
|
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
|
||||||
import net.corda.core.transactions.ContractUpgradeLedgerTransaction
|
import net.corda.core.transactions.ContractUpgradeLedgerTransaction
|
||||||
import net.corda.core.transactions.NotaryChangeLedgerTransaction
|
import net.corda.core.transactions.NotaryChangeLedgerTransaction
|
||||||
import net.corda.core.transactions.WireTransaction
|
import net.corda.core.transactions.WireTransaction
|
||||||
@ -62,6 +64,8 @@ class MigrationServicesForResolution(
|
|||||||
cacheFactory
|
cacheFactory
|
||||||
)
|
)
|
||||||
|
|
||||||
|
private val attachmentsClassLoaderCache: AttachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(cacheFactory)
|
||||||
|
|
||||||
private fun defaultNetworkParameters(): NetworkParameters {
|
private fun defaultNetworkParameters(): NetworkParameters {
|
||||||
logger.warn("Using a dummy set of network parameters for migration.")
|
logger.warn("Using a dummy set of network parameters for migration.")
|
||||||
val clock = Clock.systemUTC()
|
val clock = Clock.systemUTC()
|
||||||
@ -124,7 +128,8 @@ class MigrationServicesForResolution(
|
|||||||
networkParameters,
|
networkParameters,
|
||||||
tx.id,
|
tx.id,
|
||||||
attachmentTrustCalculator::calculate,
|
attachmentTrustCalculator::calculate,
|
||||||
cordappLoader.appClassLoader) {
|
cordappLoader.appClassLoader,
|
||||||
|
attachmentsClassLoaderCache) {
|
||||||
deserialiseComponentGroup(tx.componentGroups, TransactionState::class, ComponentGroupEnum.OUTPUTS_GROUP, forceDeserialize = true)
|
deserialiseComponentGroup(tx.componentGroups, TransactionState::class, ComponentGroupEnum.OUTPUTS_GROUP, forceDeserialize = true)
|
||||||
}
|
}
|
||||||
states.filterIndexed {index, _ -> stateIndices.contains(index)}.toList()
|
states.filterIndexed {index, _ -> stateIndices.contains(index)}.toList()
|
||||||
|
@ -93,6 +93,8 @@ interface NodeConfiguration : ConfigurationWithOptionsContainer {
|
|||||||
|
|
||||||
val quasarExcludePackages: List<String>
|
val quasarExcludePackages: List<String>
|
||||||
|
|
||||||
|
val reloadCheckpointAfterSuspend: Boolean
|
||||||
|
|
||||||
companion object {
|
companion object {
|
||||||
// default to at least 8MB and a bit extra for larger heap sizes
|
// default to at least 8MB and a bit extra for larger heap sizes
|
||||||
val defaultTransactionCacheSize: Long = 8.MB + getAdditionalCacheMemory()
|
val defaultTransactionCacheSize: Long = 8.MB + getAdditionalCacheMemory()
|
||||||
@ -125,9 +127,13 @@ enum class JmxReporterType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
data class DevModeOptions(
|
data class DevModeOptions(
|
||||||
val disableCheckpointChecker: Boolean = Defaults.disableCheckpointChecker,
|
@Deprecated(
|
||||||
val allowCompatibilityZone: Boolean = Defaults.allowCompatibilityZone,
|
"The checkpoint checker has been replaced by the ability to reload a checkpoint from the database after every suspend" +
|
||||||
val djvm: DJVMOptions? = null
|
"Use [NodeConfiguration.disableReloadCheckpointAfterSuspend] instead."
|
||||||
|
)
|
||||||
|
val disableCheckpointChecker: Boolean = Defaults.disableCheckpointChecker,
|
||||||
|
val allowCompatibilityZone: Boolean = Defaults.allowCompatibilityZone,
|
||||||
|
val djvm: DJVMOptions? = null
|
||||||
) {
|
) {
|
||||||
internal object Defaults {
|
internal object Defaults {
|
||||||
val disableCheckpointChecker = false
|
val disableCheckpointChecker = false
|
||||||
@ -140,10 +146,6 @@ data class DJVMOptions(
|
|||||||
val cordaSource: List<String>
|
val cordaSource: List<String>
|
||||||
)
|
)
|
||||||
|
|
||||||
fun NodeConfiguration.shouldCheckCheckpoints(): Boolean {
|
|
||||||
return this.devMode && this.devModeOptions?.disableCheckpointChecker != true
|
|
||||||
}
|
|
||||||
|
|
||||||
fun NodeConfiguration.shouldStartSSHDaemon() = this.sshd != null
|
fun NodeConfiguration.shouldStartSSHDaemon() = this.sshd != null
|
||||||
fun NodeConfiguration.shouldStartLocalShell() = !this.noLocalShell && System.console() != null && this.devMode
|
fun NodeConfiguration.shouldStartLocalShell() = !this.noLocalShell && System.console() != null && this.devMode
|
||||||
fun NodeConfiguration.shouldInitCrashShell() = shouldStartLocalShell() || shouldStartSSHDaemon()
|
fun NodeConfiguration.shouldInitCrashShell() = shouldStartLocalShell() || shouldStartSSHDaemon()
|
||||||
@ -151,7 +153,7 @@ fun NodeConfiguration.shouldInitCrashShell() = shouldStartLocalShell() || should
|
|||||||
data class NotaryConfig(
|
data class NotaryConfig(
|
||||||
/** Specifies whether the notary validates transactions or not. */
|
/** Specifies whether the notary validates transactions or not. */
|
||||||
val validating: Boolean,
|
val validating: Boolean,
|
||||||
/** The legal name of cluster in case of a distributed notary service. */
|
/** The legal name of the notary service identity. */
|
||||||
val serviceLegalName: CordaX500Name? = null,
|
val serviceLegalName: CordaX500Name? = null,
|
||||||
/** The name of the notary service class to load. */
|
/** The name of the notary service class to load. */
|
||||||
val className: String? = null,
|
val className: String? = null,
|
||||||
|
@ -84,7 +84,9 @@ data class NodeConfigurationImpl(
|
|||||||
override val blacklistedAttachmentSigningKeys: List<String> = Defaults.blacklistedAttachmentSigningKeys,
|
override val blacklistedAttachmentSigningKeys: List<String> = Defaults.blacklistedAttachmentSigningKeys,
|
||||||
override val configurationWithOptions: ConfigurationWithOptions,
|
override val configurationWithOptions: ConfigurationWithOptions,
|
||||||
override val flowExternalOperationThreadPoolSize: Int = Defaults.flowExternalOperationThreadPoolSize,
|
override val flowExternalOperationThreadPoolSize: Int = Defaults.flowExternalOperationThreadPoolSize,
|
||||||
override val quasarExcludePackages: List<String> = Defaults.quasarExcludePackages
|
override val quasarExcludePackages: List<String> = Defaults.quasarExcludePackages,
|
||||||
|
override val reloadCheckpointAfterSuspend: Boolean = Defaults.reloadCheckpointAfterSuspend
|
||||||
|
|
||||||
) : NodeConfiguration {
|
) : NodeConfiguration {
|
||||||
internal object Defaults {
|
internal object Defaults {
|
||||||
val jmxMonitoringHttpPort: Int? = null
|
val jmxMonitoringHttpPort: Int? = null
|
||||||
@ -123,6 +125,7 @@ data class NodeConfigurationImpl(
|
|||||||
val blacklistedAttachmentSigningKeys: List<String> = emptyList()
|
val blacklistedAttachmentSigningKeys: List<String> = emptyList()
|
||||||
const val flowExternalOperationThreadPoolSize: Int = 1
|
const val flowExternalOperationThreadPoolSize: Int = 1
|
||||||
val quasarExcludePackages: List<String> = emptyList()
|
val quasarExcludePackages: List<String> = emptyList()
|
||||||
|
val reloadCheckpointAfterSuspend: Boolean = System.getProperty("reloadCheckpointAfterSuspend", "false")!!.toBoolean()
|
||||||
|
|
||||||
fun cordappsDirectories(baseDirectory: Path) = listOf(baseDirectory / CORDAPPS_DIR_NAME_DEFAULT)
|
fun cordappsDirectories(baseDirectory: Path) = listOf(baseDirectory / CORDAPPS_DIR_NAME_DEFAULT)
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ import net.corda.common.validation.internal.Validated.Companion.invalid
|
|||||||
import net.corda.common.validation.internal.Validated.Companion.valid
|
import net.corda.common.validation.internal.Validated.Companion.valid
|
||||||
import net.corda.node.services.config.*
|
import net.corda.node.services.config.*
|
||||||
import net.corda.node.services.config.NodeConfigurationImpl.Defaults
|
import net.corda.node.services.config.NodeConfigurationImpl.Defaults
|
||||||
|
import net.corda.node.services.config.NodeConfigurationImpl.Defaults.reloadCheckpointAfterSuspend
|
||||||
import net.corda.node.services.config.schema.parsers.*
|
import net.corda.node.services.config.schema.parsers.*
|
||||||
|
|
||||||
internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfiguration>("NodeConfiguration") {
|
internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfiguration>("NodeConfiguration") {
|
||||||
@ -66,6 +67,7 @@ internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfig
|
|||||||
.withDefaultValue(Defaults.networkParameterAcceptanceSettings)
|
.withDefaultValue(Defaults.networkParameterAcceptanceSettings)
|
||||||
private val flowExternalOperationThreadPoolSize by int().optional().withDefaultValue(Defaults.flowExternalOperationThreadPoolSize)
|
private val flowExternalOperationThreadPoolSize by int().optional().withDefaultValue(Defaults.flowExternalOperationThreadPoolSize)
|
||||||
private val quasarExcludePackages by string().list().optional().withDefaultValue(Defaults.quasarExcludePackages)
|
private val quasarExcludePackages by string().list().optional().withDefaultValue(Defaults.quasarExcludePackages)
|
||||||
|
private val reloadCheckpointAfterSuspend by boolean().optional().withDefaultValue(Defaults.reloadCheckpointAfterSuspend)
|
||||||
@Suppress("unused")
|
@Suppress("unused")
|
||||||
private val custom by nestedObject().optional()
|
private val custom by nestedObject().optional()
|
||||||
@Suppress("unused")
|
@Suppress("unused")
|
||||||
@ -133,7 +135,8 @@ internal object V1NodeConfigurationSpec : Configuration.Specification<NodeConfig
|
|||||||
networkParameterAcceptanceSettings = config[networkParameterAcceptanceSettings],
|
networkParameterAcceptanceSettings = config[networkParameterAcceptanceSettings],
|
||||||
configurationWithOptions = ConfigurationWithOptions(configuration, Configuration.Options.defaults),
|
configurationWithOptions = ConfigurationWithOptions(configuration, Configuration.Options.defaults),
|
||||||
flowExternalOperationThreadPoolSize = config[flowExternalOperationThreadPoolSize],
|
flowExternalOperationThreadPoolSize = config[flowExternalOperationThreadPoolSize],
|
||||||
quasarExcludePackages = config[quasarExcludePackages]
|
quasarExcludePackages = config[quasarExcludePackages],
|
||||||
|
reloadCheckpointAfterSuspend = config[reloadCheckpointAfterSuspend]
|
||||||
))
|
))
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
return when (e) {
|
return when (e) {
|
||||||
|
@ -54,8 +54,8 @@ class MessagingExecutor(
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Synchronized
|
@Synchronized
|
||||||
fun sendAll(messages: Map<MessageRecipients, Message>) {
|
fun sendAll(messages: List<Pair<MessageRecipients, Message>>) {
|
||||||
messages.forEach { recipients, message -> send(message, recipients) }
|
messages.forEach { (recipients, message) -> send(message, recipients) }
|
||||||
}
|
}
|
||||||
|
|
||||||
@Synchronized
|
@Synchronized
|
||||||
|
@ -121,6 +121,7 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
|||||||
var bridgeSession: ClientSession? = null
|
var bridgeSession: ClientSession? = null
|
||||||
var bridgeNotifyConsumer: ClientConsumer? = null
|
var bridgeNotifyConsumer: ClientConsumer? = null
|
||||||
var networkChangeSubscription: Subscription? = null
|
var networkChangeSubscription: Subscription? = null
|
||||||
|
var sessionFactory: ClientSessionFactory? = null
|
||||||
|
|
||||||
fun sendMessage(address: String, message: ClientMessage) = producer!!.send(address, message)
|
fun sendMessage(address: String, message: ClientMessage) = producer!!.send(address, message)
|
||||||
}
|
}
|
||||||
@ -172,7 +173,7 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
|||||||
minLargeMessageSize = maxMessageSize + JOURNAL_HEADER_SIZE
|
minLargeMessageSize = maxMessageSize + JOURNAL_HEADER_SIZE
|
||||||
isUseGlobalPools = nodeSerializationEnv != null
|
isUseGlobalPools = nodeSerializationEnv != null
|
||||||
}
|
}
|
||||||
val sessionFactory = locator!!.createSessionFactory().addFailoverListener(::failoverCallback)
|
sessionFactory = locator!!.createSessionFactory().addFailoverListener(::failoverCallback)
|
||||||
// Login using the node username. The broker will authenticate us as its node (as opposed to another peer)
|
// Login using the node username. The broker will authenticate us as its node (as opposed to another peer)
|
||||||
// using our TLS certificate.
|
// using our TLS certificate.
|
||||||
// Note that the acknowledgement of messages is not flushed to the Artermis journal until the default buffer
|
// Note that the acknowledgement of messages is not flushed to the Artermis journal until the default buffer
|
||||||
@ -490,8 +491,10 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
|||||||
// Wait for the main loop to notice the consumer has gone and finish up.
|
// Wait for the main loop to notice the consumer has gone and finish up.
|
||||||
shutdownLatch.await()
|
shutdownLatch.await()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only first caller to gets running true to protect against double stop, which seems to happen in some integration tests.
|
// Only first caller to gets running true to protect against double stop, which seems to happen in some integration tests.
|
||||||
state.locked {
|
state.locked {
|
||||||
|
sessionFactory?.close()
|
||||||
locator?.close()
|
locator?.close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package net.corda.node.services.network
|
|||||||
|
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.crypto.SignedData
|
import net.corda.core.crypto.SignedData
|
||||||
|
import net.corda.core.crypto.sha256
|
||||||
import net.corda.core.internal.openHttpConnection
|
import net.corda.core.internal.openHttpConnection
|
||||||
import net.corda.core.internal.post
|
import net.corda.core.internal.post
|
||||||
import net.corda.core.internal.responseAs
|
import net.corda.core.internal.responseAs
|
||||||
@ -13,6 +14,7 @@ import net.corda.core.utilities.seconds
|
|||||||
import net.corda.core.utilities.trace
|
import net.corda.core.utilities.trace
|
||||||
import net.corda.node.VersionInfo
|
import net.corda.node.VersionInfo
|
||||||
import net.corda.node.utilities.registration.cacheControl
|
import net.corda.node.utilities.registration.cacheControl
|
||||||
|
import net.corda.node.utilities.registration.cordaServerVersion
|
||||||
import net.corda.nodeapi.internal.SignedNodeInfo
|
import net.corda.nodeapi.internal.SignedNodeInfo
|
||||||
import net.corda.nodeapi.internal.network.NetworkMap
|
import net.corda.nodeapi.internal.network.NetworkMap
|
||||||
import net.corda.nodeapi.internal.network.SignedNetworkMap
|
import net.corda.nodeapi.internal.network.SignedNetworkMap
|
||||||
@ -61,8 +63,9 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
|
|||||||
val signedNetworkMap = connection.responseAs<SignedNetworkMap>()
|
val signedNetworkMap = connection.responseAs<SignedNetworkMap>()
|
||||||
val networkMap = signedNetworkMap.verifiedNetworkMapCert(trustRoot)
|
val networkMap = signedNetworkMap.verifiedNetworkMapCert(trustRoot)
|
||||||
val timeout = connection.cacheControl.maxAgeSeconds().seconds
|
val timeout = connection.cacheControl.maxAgeSeconds().seconds
|
||||||
|
val version = connection.cordaServerVersion
|
||||||
logger.trace { "Fetched network map update from $url successfully: $networkMap" }
|
logger.trace { "Fetched network map update from $url successfully: $networkMap" }
|
||||||
return NetworkMapResponse(networkMap, timeout)
|
return NetworkMapResponse(networkMap, timeout, version)
|
||||||
}
|
}
|
||||||
|
|
||||||
fun getNodeInfo(nodeInfoHash: SecureHash): NodeInfo {
|
fun getNodeInfo(nodeInfoHash: SecureHash): NodeInfo {
|
||||||
@ -81,6 +84,23 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
|
|||||||
return networkParameter
|
return networkParameter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun getNodeInfos(): List<NodeInfo> {
|
||||||
|
val url = URL("$networkMapUrl/node-infos")
|
||||||
|
logger.trace { "Fetching node infos from $url." }
|
||||||
|
val verifiedNodeInfo = url.openHttpConnection().responseAs<Pair<SignedNetworkMap, List<SignedNodeInfo>>>()
|
||||||
|
.also {
|
||||||
|
val verifiedNodeInfoHashes = it.first.verifiedNetworkMapCert(trustRoot).nodeInfoHashes
|
||||||
|
val nodeInfoHashes = it.second.map { signedNodeInfo -> signedNodeInfo.verified().serialize().sha256() }
|
||||||
|
require(
|
||||||
|
verifiedNodeInfoHashes.containsAll(nodeInfoHashes) &&
|
||||||
|
verifiedNodeInfoHashes.size == nodeInfoHashes.size
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.second.map { it.verified() }
|
||||||
|
logger.trace { "Fetched node infos successfully. Node Infos size: ${verifiedNodeInfo.size}" }
|
||||||
|
return verifiedNodeInfo
|
||||||
|
}
|
||||||
|
|
||||||
fun myPublicHostname(): String {
|
fun myPublicHostname(): String {
|
||||||
val url = URL("$networkMapUrl/my-hostname")
|
val url = URL("$networkMapUrl/my-hostname")
|
||||||
logger.trace { "Resolving public hostname from '$url'." }
|
logger.trace { "Resolving public hostname from '$url'." }
|
||||||
@ -90,4 +110,4 @@ class NetworkMapClient(compatibilityZoneURL: URL, private val versionInfo: Versi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data class NetworkMapResponse(val payload: NetworkMap, val cacheMaxAge: Duration)
|
data class NetworkMapResponse(val payload: NetworkMap, val cacheMaxAge: Duration, val serverVersion: String)
|
||||||
|
@ -4,6 +4,7 @@ import com.google.common.util.concurrent.MoreExecutors
|
|||||||
import net.corda.core.CordaRuntimeException
|
import net.corda.core.CordaRuntimeException
|
||||||
import net.corda.core.crypto.SecureHash
|
import net.corda.core.crypto.SecureHash
|
||||||
import net.corda.core.crypto.SignedData
|
import net.corda.core.crypto.SignedData
|
||||||
|
import net.corda.core.crypto.sha256
|
||||||
import net.corda.core.internal.NetworkParametersStorage
|
import net.corda.core.internal.NetworkParametersStorage
|
||||||
import net.corda.core.internal.VisibleForTesting
|
import net.corda.core.internal.VisibleForTesting
|
||||||
import net.corda.core.internal.copyTo
|
import net.corda.core.internal.copyTo
|
||||||
@ -65,6 +66,7 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
|||||||
companion object {
|
companion object {
|
||||||
private val logger = contextLogger()
|
private val logger = contextLogger()
|
||||||
private val defaultRetryInterval = 1.minutes
|
private val defaultRetryInterval = 1.minutes
|
||||||
|
private const val bulkNodeInfoFetchThreshold = 50
|
||||||
}
|
}
|
||||||
|
|
||||||
private val parametersUpdatesTrack = PublishSubject.create<ParametersUpdateInfo>()
|
private val parametersUpdatesTrack = PublishSubject.create<ParametersUpdateInfo>()
|
||||||
@ -173,17 +175,9 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
|||||||
if (networkMapClient == null) {
|
if (networkMapClient == null) {
|
||||||
throw CordaRuntimeException("Network map cache can be updated only if network map/compatibility zone URL is specified")
|
throw CordaRuntimeException("Network map cache can be updated only if network map/compatibility zone URL is specified")
|
||||||
}
|
}
|
||||||
val (globalNetworkMap, cacheTimeout) = networkMapClient.getNetworkMap()
|
val (globalNetworkMap, cacheTimeout, version) = networkMapClient.getNetworkMap()
|
||||||
globalNetworkMap.parametersUpdate?.let { handleUpdateNetworkParameters(networkMapClient, it) }
|
globalNetworkMap.parametersUpdate?.let { handleUpdateNetworkParameters(networkMapClient, it) }
|
||||||
val additionalHashes = extraNetworkMapKeys.flatMap {
|
val additionalHashes = getPrivateNetworkNodeHashes(version)
|
||||||
try {
|
|
||||||
networkMapClient.getNetworkMap(it).payload.nodeInfoHashes
|
|
||||||
} catch (e: Exception) {
|
|
||||||
// Failure to retrieve one network map using UUID shouldn't stop the whole update.
|
|
||||||
logger.warn("Error encountered when downloading network map with uuid '$it', skipping...", e)
|
|
||||||
emptyList<SecureHash>()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
val allHashesFromNetworkMap = (globalNetworkMap.nodeInfoHashes + additionalHashes).toSet()
|
val allHashesFromNetworkMap = (globalNetworkMap.nodeInfoHashes + additionalHashes).toSet()
|
||||||
if (currentParametersHash != globalNetworkMap.networkParameterHash) {
|
if (currentParametersHash != globalNetworkMap.networkParameterHash) {
|
||||||
exitOnParametersMismatch(globalNetworkMap)
|
exitOnParametersMismatch(globalNetworkMap)
|
||||||
@ -194,6 +188,37 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
|||||||
val allNodeHashes = networkMapCache.allNodeHashes
|
val allNodeHashes = networkMapCache.allNodeHashes
|
||||||
val nodeHashesToBeDeleted = (allNodeHashes - allHashesFromNetworkMap - nodeInfoWatcher.processedNodeInfoHashes)
|
val nodeHashesToBeDeleted = (allNodeHashes - allHashesFromNetworkMap - nodeInfoWatcher.processedNodeInfoHashes)
|
||||||
.filter { it != ourNodeInfoHash }
|
.filter { it != ourNodeInfoHash }
|
||||||
|
// enforce bulk fetch when no other nodes are known or unknown nodes count is less than threshold
|
||||||
|
if (version == "1" || (allNodeHashes.size > 1 && (allHashesFromNetworkMap - allNodeHashes).size < bulkNodeInfoFetchThreshold))
|
||||||
|
updateNodeInfosV1(allHashesFromNetworkMap, allNodeHashes, networkMapClient)
|
||||||
|
else
|
||||||
|
updateNodeInfos(allHashesFromNetworkMap)
|
||||||
|
// NOTE: We remove nodes after any new/updates because updated nodes will have a new hash and, therefore, any
|
||||||
|
// nodes that we can actually pull out of the cache (with the old hashes) should be a truly removed node.
|
||||||
|
nodeHashesToBeDeleted.mapNotNull { networkMapCache.getNodeByHash(it) }.forEach(networkMapCache::removeNode)
|
||||||
|
|
||||||
|
// Mark the network map cache as ready on a successful poll of the HTTP network map, even on the odd chance that
|
||||||
|
// it's empty
|
||||||
|
networkMapCache.nodeReady.set(null)
|
||||||
|
return cacheTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun updateNodeInfos(allHashesFromNetworkMap: Set<SecureHash>) {
|
||||||
|
val networkMapDownloadStartTime = System.currentTimeMillis()
|
||||||
|
val nodeInfos = try {
|
||||||
|
networkMapClient!!.getNodeInfos()
|
||||||
|
} catch (e: Exception) {
|
||||||
|
logger.warn("Error encountered when downloading node infos", e)
|
||||||
|
emptyList<NodeInfo>()
|
||||||
|
}
|
||||||
|
(allHashesFromNetworkMap - nodeInfos.map { it.serialize().sha256() }).forEach {
|
||||||
|
logger.warn("Error encountered when downloading node info '$it', skipping...")
|
||||||
|
}
|
||||||
|
networkMapCache.addOrUpdateNodes(nodeInfos)
|
||||||
|
logger.info("Fetched: ${nodeInfos.size} using 1 bulk request in ${System.currentTimeMillis() - networkMapDownloadStartTime}ms")
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun updateNodeInfosV1(allHashesFromNetworkMap: Set<SecureHash>, allNodeHashes: List<SecureHash>, networkMapClient: NetworkMapClient) {
|
||||||
//at the moment we use a blocking HTTP library - but under the covers, the OS will interleave threads waiting for IO
|
//at the moment we use a blocking HTTP library - but under the covers, the OS will interleave threads waiting for IO
|
||||||
//as HTTP GET is mostly IO bound, use more threads than CPU's
|
//as HTTP GET is mostly IO bound, use more threads than CPU's
|
||||||
//maximum threads to use = 24, as if we did not limit this on large machines it could result in 100's of concurrent requests
|
//maximum threads to use = 24, as if we did not limit this on large machines it could result in 100's of concurrent requests
|
||||||
@ -230,14 +255,25 @@ class NetworkMapUpdater(private val networkMapCache: NetworkMapCacheInternal,
|
|||||||
executorToUseForInsertionIntoDB.shutdown()
|
executorToUseForInsertionIntoDB.shutdown()
|
||||||
}.getOrThrow()
|
}.getOrThrow()
|
||||||
}
|
}
|
||||||
// NOTE: We remove nodes after any new/updates because updated nodes will have a new hash and, therefore, any
|
}
|
||||||
// nodes that we can actually pull out of the cache (with the old hashes) should be a truly removed node.
|
|
||||||
nodeHashesToBeDeleted.mapNotNull { networkMapCache.getNodeByHash(it) }.forEach(networkMapCache::removeNode)
|
|
||||||
|
|
||||||
// Mark the network map cache as ready on a successful poll of the HTTP network map, even on the odd chance that
|
private fun getPrivateNetworkNodeHashes(version: String): List<SecureHash> {
|
||||||
// it's empty
|
// private networks are not supported by latest versions of Network Map
|
||||||
networkMapCache.nodeReady.set(null)
|
// for compatibility reasons, this call is still present for new nodes that communicate with old Network Map service versions
|
||||||
return cacheTimeout
|
// but can be omitted if we know that the version of the Network Map is recent enough
|
||||||
|
return if (version == "1") {
|
||||||
|
extraNetworkMapKeys.flatMap {
|
||||||
|
try {
|
||||||
|
networkMapClient!!.getNetworkMap(it).payload.nodeInfoHashes
|
||||||
|
} catch (e: Exception) {
|
||||||
|
// Failure to retrieve one network map using UUID shouldn't stop the whole update.
|
||||||
|
logger.warn("Error encountered when downloading network map with uuid '$it', skipping...", e)
|
||||||
|
emptyList<SecureHash>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
emptyList()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private fun exitOnParametersMismatch(networkMap: NetworkMap) {
|
private fun exitOnParametersMismatch(networkMap: NetworkMap) {
|
||||||
|
@ -60,13 +60,11 @@ import net.corda.nodeapi.internal.lifecycle.NodeLifecycleObserver.Companion.repo
|
|||||||
import net.corda.node.internal.NodeStartup
|
import net.corda.node.internal.NodeStartup
|
||||||
import net.corda.node.services.api.CheckpointStorage
|
import net.corda.node.services.api.CheckpointStorage
|
||||||
import net.corda.node.services.statemachine.Checkpoint
|
import net.corda.node.services.statemachine.Checkpoint
|
||||||
import net.corda.node.services.statemachine.DataSessionMessage
|
|
||||||
import net.corda.node.services.statemachine.ErrorState
|
import net.corda.node.services.statemachine.ErrorState
|
||||||
import net.corda.node.services.statemachine.FlowError
|
import net.corda.node.services.statemachine.ExistingSessionMessagePayload
|
||||||
import net.corda.node.services.statemachine.FlowSessionImpl
|
import net.corda.node.services.statemachine.FlowSessionImpl
|
||||||
import net.corda.node.services.statemachine.FlowState
|
import net.corda.node.services.statemachine.FlowState
|
||||||
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
import net.corda.node.services.statemachine.FlowStateMachineImpl
|
||||||
import net.corda.node.services.statemachine.InitiatedSessionState
|
|
||||||
import net.corda.node.services.statemachine.SessionId
|
import net.corda.node.services.statemachine.SessionId
|
||||||
import net.corda.node.services.statemachine.SessionState
|
import net.corda.node.services.statemachine.SessionState
|
||||||
import net.corda.node.services.statemachine.SubFlow
|
import net.corda.node.services.statemachine.SubFlow
|
||||||
@ -325,6 +323,7 @@ class CheckpointDumperImpl(private val checkpointStorage: CheckpointStorage, pri
|
|||||||
val send: List<SendJson>? = null,
|
val send: List<SendJson>? = null,
|
||||||
val receive: NonEmptySet<FlowSession>? = null,
|
val receive: NonEmptySet<FlowSession>? = null,
|
||||||
val sendAndReceive: List<SendJson>? = null,
|
val sendAndReceive: List<SendJson>? = null,
|
||||||
|
val closeSessions: NonEmptySet<FlowSession>? = null,
|
||||||
val waitForLedgerCommit: SecureHash? = null,
|
val waitForLedgerCommit: SecureHash? = null,
|
||||||
val waitForStateConsumption: Set<StateRef>? = null,
|
val waitForStateConsumption: Set<StateRef>? = null,
|
||||||
val getFlowInfo: NonEmptySet<FlowSession>? = null,
|
val getFlowInfo: NonEmptySet<FlowSession>? = null,
|
||||||
@ -352,6 +351,7 @@ class CheckpointDumperImpl(private val checkpointStorage: CheckpointStorage, pri
|
|||||||
is FlowIORequest.Send -> SuspendedOn(send = sessionToMessage.toJson())
|
is FlowIORequest.Send -> SuspendedOn(send = sessionToMessage.toJson())
|
||||||
is FlowIORequest.Receive -> SuspendedOn(receive = sessions)
|
is FlowIORequest.Receive -> SuspendedOn(receive = sessions)
|
||||||
is FlowIORequest.SendAndReceive -> SuspendedOn(sendAndReceive = sessionToMessage.toJson())
|
is FlowIORequest.SendAndReceive -> SuspendedOn(sendAndReceive = sessionToMessage.toJson())
|
||||||
|
is FlowIORequest.CloseSessions -> SuspendedOn(closeSessions = sessions)
|
||||||
is FlowIORequest.WaitForLedgerCommit -> SuspendedOn(waitForLedgerCommit = hash)
|
is FlowIORequest.WaitForLedgerCommit -> SuspendedOn(waitForLedgerCommit = hash)
|
||||||
is FlowIORequest.GetFlowInfo -> SuspendedOn(getFlowInfo = sessions)
|
is FlowIORequest.GetFlowInfo -> SuspendedOn(getFlowInfo = sessions)
|
||||||
is FlowIORequest.Sleep -> SuspendedOn(sleepTill = wakeUpAfter)
|
is FlowIORequest.Sleep -> SuspendedOn(sleepTill = wakeUpAfter)
|
||||||
@ -379,16 +379,14 @@ class CheckpointDumperImpl(private val checkpointStorage: CheckpointStorage, pri
|
|||||||
private class ActiveSession(
|
private class ActiveSession(
|
||||||
val peer: Party,
|
val peer: Party,
|
||||||
val ourSessionId: SessionId,
|
val ourSessionId: SessionId,
|
||||||
val receivedMessages: List<DataSessionMessage>,
|
val receivedMessages: List<ExistingSessionMessagePayload>,
|
||||||
val errors: List<FlowError>,
|
|
||||||
val peerFlowInfo: FlowInfo,
|
val peerFlowInfo: FlowInfo,
|
||||||
val peerSessionId: SessionId?
|
val peerSessionId: SessionId?
|
||||||
)
|
)
|
||||||
|
|
||||||
private fun SessionState.toActiveSession(sessionId: SessionId): ActiveSession? {
|
private fun SessionState.toActiveSession(sessionId: SessionId): ActiveSession? {
|
||||||
return if (this is SessionState.Initiated) {
|
return if (this is SessionState.Initiated) {
|
||||||
val peerSessionId = (initiatedState as? InitiatedSessionState.Live)?.peerSinkSessionId
|
ActiveSession(peerParty, sessionId, receivedMessages, peerFlowInfo, peerSinkSessionId)
|
||||||
ActiveSession(peerParty, sessionId, receivedMessages, errors, peerFlowInfo, peerSessionId)
|
|
||||||
} else {
|
} else {
|
||||||
null
|
null
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,6 @@ class NodeSchemaService(private val extraSchemas: Set<MappedSchema> = emptySet()
|
|||||||
// when mapped schemas from the finance module are present, they are considered as internal ones
|
// when mapped schemas from the finance module are present, they are considered as internal ones
|
||||||
schema::class.qualifiedName == "net.corda.finance.schemas.CashSchemaV1" ||
|
schema::class.qualifiedName == "net.corda.finance.schemas.CashSchemaV1" ||
|
||||||
schema::class.qualifiedName == "net.corda.finance.schemas.CommercialPaperSchemaV1" ||
|
schema::class.qualifiedName == "net.corda.finance.schemas.CommercialPaperSchemaV1" ||
|
||||||
schema::class.qualifiedName == "net.corda.node.services.transactions.NodeNotarySchemaV1" ||
|
|
||||||
schema::class.qualifiedName?.startsWith("net.corda.notary.") ?: false
|
schema::class.qualifiedName?.startsWith("net.corda.notary.") ?: false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user