Merge remote-tracking branch 'origin/release/os/4.6' into christians/ENT-5273-update-from-os-4.6

This commit is contained in:
Christian Sailer 2020-07-21 14:17:27 +01:00
commit 377c3f9d78
190 changed files with 5042 additions and 3630 deletions

View File

@ -3,11 +3,31 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
/**
* Sense environment
*/
boolean isReleaseTag = (env.TAG_NAME =~ /^release.*JDK11$/)
/*
** calculate the stage for NexusIQ evaluation
** * build for snapshots
** * stage-release: for release candidates and for health checks
** * operate: for final release
*/
def nexusIqStage = "build"
if (isReleaseTag) {
switch (env.TAG_NAME) {
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
default: nexusIqStage = "operate"
}
}
pipeline {
agent { label 'k8s' }
agent {
label 'k8s'
}
options {
timestamps()
buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7'))
timeout(time: 3, unit: 'HOURS')
}
@ -16,10 +36,33 @@ pipeline {
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish JDK 11 Release to Artifactory".replaceAll("/", "::")
CORDA_USE_CACHE = "corda-remotes"
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
stage('Corda Pull Request - Generate Build Image') {
stage('Sonatype Check') {
steps {
sh "./gradlew --no-daemon clean jar"
script {
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: //'").trim()
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
def artifactId = 'corda'
nexusAppId = "jenkins-${groupId}-${artifactId}-jdk11-${version}"
}
nexusPolicyEvaluation (
failBuildOnNetworkError: false,
iqApplication: manualApplication(nexusAppId),
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
iqStage: nexusIqStage
)
}
}
stage('Generate Build Image') {
steps {
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
sh "./gradlew " +
@ -28,8 +71,11 @@ pipeline {
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\" " +
"-Ddocker.buildbase.tag=11latest " +
"-Ddocker.container.env.parameter.CORDA_USE_CACHE=\"${CORDA_USE_CACHE}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Ddocker.dockerfile=DockerfileJDK11Azul" +
" clean pushBuildImage --stacktrace"
" clean pushBuildImage preAllocateForParallelRegressionTest preAllocateForAllParallelSlowIntegrationTest --stacktrace"
}
sh "kubectl auth can-i get pods"
}
@ -65,12 +111,49 @@ pipeline {
}
}
}
stage('Publish to Artifactory') {
agent {
dockerfile {
reuseNode true
additionalBuildArgs "--build-arg USER=stresstester"
filename '.ci/dev/compatibility/DockerfileJDK11'
}
}
when {
expression { isReleaseTag }
}
steps {
rtServer(
id: 'R3-Artifactory',
url: 'https://software.r3.com/artifactory',
credentialsId: 'artifactory-credentials'
)
rtGradleDeployer(
id: 'deployer',
serverId: 'R3-Artifactory',
repo: 'r3-corda-releases'
)
rtGradleRun(
usesPlugin: true,
useWrapper: true,
switches: '-s --info',
tasks: 'artifactoryPublish',
deployerId: 'deployer',
buildName: env.ARTIFACTORY_BUILD_NAME
)
rtPublishBuildInfo(
serverId: 'R3-Artifactory',
buildName: env.ARTIFACTORY_BUILD_NAME
)
}
}
}
post {
always {
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
junit '**/build/test-results-xml/**/*.xml'
junit testResults: '**/build/test-results-xml/**/*.xml', allowEmptyResults: true
}
cleanup {
deleteDir() /* clean up our workspace */

View File

@ -8,12 +8,13 @@ pipeline {
dockerfile {
label 'k8s'
additionalBuildArgs "--build-arg USER=stresstester"
filename '.ci/dev/compatibility/DockerfileJDK11Compile'
filename '.ci/dev/compatibility/DockerfileJDK11'
}
}
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
stages {

View File

@ -1,62 +0,0 @@
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
pipeline {
agent { label 'k8s' }
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
}
environment {
DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
}
stages {
stage('Corda - Generate Build Image') {
steps {
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
sh "./gradlew " +
"-Dkubenetize=true " +
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.provided.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean pushBuildImage"
}
sh "kubectl auth can-i get pods"
}
}
stage('Corda - Run Tests') {
stage('Integration Tests') {
steps {
sh "./gradlew " +
"-DbuildId=\"\${BUILD_ID}\" " +
"-Dkubenetize=true " +
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" allParallelIntegrationTest"
if (env.CHANGE_ID) {
pullRequest.createStatus(status: 'success',
context: 'continuous-integration/jenkins/pr-merge/integrationTest',
description: 'Integration Tests Passed',
targetUrl: "${env.JOB_URL}/testResults")
}
}
}
}
}
post {
always {
junit '**/build/test-results-xml/**/*.xml'
}
cleanup {
deleteDir() /* clean up our workspace */
}
}
}

101
.ci/dev/mswin/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,101 @@
#!groovy
/**
* Jenkins pipeline to build Corda on MS Windows server.
* Because it takes a long time to run tests sequentially, unit tests and
* integration tests are started in parallel on separate agents.
*
* Additionally, pull requests by default run only unit tests.
*/
/**
* Kill already started job.
* Assume new commit takes precendence and results from previous
* unfinished builds are not required.
* This feature doesn't play well with disableConcurrentBuilds() option
*/
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
/**
* Sense environment
*/
boolean isReleaseBranch = (env.BRANCH_NAME =~ /^release\/os\/.*/)
pipeline {
agent none
options {
ansiColor('xterm')
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
/*
* a bit awkward to read
* is parameter is true -> push events are *not* ignored
* if parameter is false -> push events *are* ignored
*/
overrideIndexTriggers (!isReleaseBranch)
}
parameters {
booleanParam defaultValue: (isReleaseBranch), description: 'Run integration tests?', name: 'DO_INTEGRATION_TESTS'
}
/*
* Do no receive Github's push events for release branches -> suitable for nightly builds
* but projects for pull requests will receive them as normal, and PR builds are started ASAP
*/
triggers {
pollSCM ignorePostCommitHooks: isReleaseBranch, scmpoll_spec: '@midnight'
}
stages {
stage('Tests') {
parallel {
stage('Unit Tests') {
agent { label 'mswin' }
steps {
bat "./gradlew --no-daemon " +
"--stacktrace " +
"-Pcompilation.warningsAsErrors=false " +
"-Ptests.failFast=true " +
"clean test"
}
post {
always {
archiveArtifacts allowEmptyArchive: true, artifacts: '**/logs/**/*.log'
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
bat '.ci/kill_corda_procs.cmd'
}
cleanup {
deleteDir() /* clean up our workspace */
}
}
}
stage('Integration Tests') {
when {
expression { params.DO_INTEGRATION_TESTS }
beforeAgent true
}
agent { label 'mswin' }
steps {
bat "./gradlew --no-daemon " +
"clean integrationTest"
}
post {
always {
archiveArtifacts allowEmptyArchive: true, artifacts: '**/logs/**/*.log'
junit testResults: '**/build/test-results/**/*.xml', keepLongStdio: true, allowEmptyResults: true
bat '.ci/kill_corda_procs.cmd'
}
cleanup {
deleteDir() /* clean up our workspace */
}
}
}
}
}
}
}

View File

@ -8,8 +8,8 @@ pipeline {
options {
timestamps()
overrideIndexTriggers(false)
buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7'))
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
triggers {
pollSCM ignorePostCommitHooks: true, scmpoll_spec: '@midnight'
@ -20,18 +20,30 @@ pipeline {
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
CORDA_USE_CACHE = "corda-remotes"
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
stage('Corda Pull Request - Generate Build Image') {
stage('Deploy Nodes') {
steps {
sh "./gradlew --no-daemon jar deployNodes"
}
}
stage('Generate Build Image') {
steps {
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
sh "./gradlew " +
"-Dkubenetize=true " +
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.container.env.parameter.CORDA_USE_CACHE=\"${CORDA_USE_CACHE}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean jar deployNodes install pushBuildImage --stacktrace"
" clean pushBuildImage --stacktrace"
}
sh "kubectl auth can-i get pods"
}
@ -69,15 +81,13 @@ pipeline {
}
}
post {
always {
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
junit testResults: '**/build/test-results-xml/**/*.xml', allowEmptyResults: true
junit testResults: '**/build/test-results-xml/**/*.xml', allowEmptyResults: true, keepLongStdio: true
}
cleanup {
deleteDir() /* clean up our workspace */
}
}
}

View File

@ -8,6 +8,7 @@ pipeline {
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {
@ -40,6 +41,12 @@ pipeline {
sh ".ci/check-api-changes.sh"
}
}
stage('Deploy Nodes') {
steps {
sh "./gradlew --no-daemon jar deployNodes"
}
}
}
post {

35
.ci/dev/publish-api-docs/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,35 @@
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
pipeline {
agent { label 'standard' }
options {
ansiColor('xterm')
timestamps()
timeout(time: 3, unit: 'HOURS')
}
environment {
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
stage('Publish Archived API Docs to Artifactory') {
when { tag pattern: /^release-os-V(\d+\.\d+)(\.\d+){0,1}(-GA){0,1}(-\d{4}-\d\d-\d\d-\d{4}){0,1}$/, comparator: 'REGEXP' }
steps {
sh "./gradlew :clean :docs:artifactoryPublish -DpublishApiDocs"
}
}
}
post {
cleanup {
deleteDir() /* clean up our workspace */
}
}
}

View File

@ -1,18 +1,34 @@
#!groovy
/**
* Jenkins pipeline to build Corda OS nightly snapshots
*/
/**
* Kill already started job.
* Assume new commit takes precendence and results from previous
* unfinished builds are not required.
* This feature doesn't play well with disableConcurrentBuilds() option
*/
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
/*
** calculate the stage for NexusIQ evaluation
** * build for snapshots
*/
def nexusIqStage = "build"
pipeline {
agent { label 'k8s' }
agent { label 'standard' }
options {
timestamps()
ansiColor('xterm')
overrideIndexTriggers(false)
buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7'))
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
triggers {
@ -24,9 +40,29 @@ pipeline {
// in the name
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish Nightly to Artifactory"
.replaceAll("/", " :: ")
DOCKER_URL = "https://index.docker.io/v1/"
}
stages {
stage('Sonatype Check') {
steps {
sh "./gradlew --no-daemon clean jar"
script {
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: //'").trim()
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
def artifactId = 'corda'
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
}
nexusPolicyEvaluation (
failBuildOnNetworkError: false,
iqApplication: manualApplication(nexusAppId),
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
iqStage: nexusIqStage
)
}
}
stage('Publish to Artifactory') {
steps {
rtServer (
@ -58,6 +94,17 @@ pipeline {
)
}
}
stage('Publish Nightly to Docker Hub') {
steps {
withCredentials([
usernamePassword(credentialsId: 'corda-publisher-docker-hub-credentials',
usernameVariable: 'DOCKER_USERNAME',
passwordVariable: 'DOCKER_PASSWORD')]) {
sh "./gradlew pushOfficialImages"
}
}
}
}

View File

@ -11,8 +11,8 @@ pipeline {
timestamps()
ansiColor('xterm')
overrideIndexTriggers(false)
buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7'))
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {

View File

@ -1,29 +1,98 @@
#!groovy
/**
* Jenkins pipeline to build Corda OS release branches and tags
*/
/**
* Kill already started job.
* Assume new commit takes precendence and results from previous
* unfinished builds are not required.
* This feature doesn't play well with disableConcurrentBuilds() option
*/
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
/**
* Sense environment
*/
boolean isReleaseTag = (env.TAG_NAME =~ /^release-.*(?<!_JDK11)$/)
boolean isInternalRelease = (env.TAG_NAME =~ /^internal-release-.*$/)
/*
** calculate the stage for NexusIQ evaluation
** * build for snapshots
** * stage-release: for release candidates and for health checks
** * operate: for final release
*/
def nexusIqStage = "build"
if (isReleaseTag) {
switch (env.TAG_NAME) {
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
default: nexusIqStage = "operate"
}
}
pipeline {
agent { label 'k8s' }
options {
timestamps()
buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7'))
disableConcurrentBuilds()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {
DOCKER_TAG_TO_USE = "${env.GIT_COMMIT.subSequence(0, 8)}"
DOCKER_URL = "https://index.docker.io/v1/"
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
ARTIFACTORY_BUILD_NAME = "Corda / Publish / Publish Release to Artifactory".replaceAll("/", "::")
CORDA_USE_CACHE = "corda-remotes"
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
stage('Corda Pull Request - Generate Build Image') {
stage('Sonatype Check') {
steps {
sh "./gradlew --no-daemon clean jar"
script {
sh "./gradlew --no-daemon properties | grep -E '^(version|group):' >version-properties"
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: //'").trim()
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
def artifactId = 'corda'
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
}
nexusPolicyEvaluation (
failBuildOnNetworkError: false,
iqApplication: manualApplication(nexusAppId),
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
iqStage: nexusIqStage
)
}
}
stage('Deploy Nodes') {
steps {
sh "./gradlew --no-daemon jar deployNodes"
}
}
stage('Generate Build Image') {
steps {
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
sh "./gradlew " +
"-Dkubenetize=true " +
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.container.env.parameter.CORDA_USE_CACHE=\"${CORDA_USE_CACHE}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean jar deployNodes install pushBuildImage --stacktrace"
" clean preAllocateForParallelRegressionTest preAllocateForAllParallelSlowIntegrationTest pushBuildImage --stacktrace"
}
sh "kubectl auth can-i get pods"
}
@ -59,13 +128,57 @@ pipeline {
}
}
}
stage('Publish to Artifactory') {
when {
expression { isReleaseTag }
}
steps {
rtServer(
id: 'R3-Artifactory',
url: 'https://software.r3.com/artifactory',
credentialsId: 'artifactory-credentials'
)
rtGradleDeployer(
id: 'deployer',
serverId: 'R3-Artifactory',
repo: 'corda-releases'
)
rtGradleRun(
usesPlugin: true,
useWrapper: true,
switches: '-s --info',
tasks: 'artifactoryPublish',
deployerId: 'deployer',
buildName: env.ARTIFACTORY_BUILD_NAME
)
rtPublishBuildInfo(
serverId: 'R3-Artifactory',
buildName: env.ARTIFACTORY_BUILD_NAME
)
}
}
stage('Publish Release to Docker Hub') {
when {
expression { !isInternalRelease && isReleaseTag }
}
steps {
withCredentials([
usernamePassword(credentialsId: 'corda-publisher-docker-hub-credentials',
usernameVariable: 'DOCKER_USERNAME',
passwordVariable: 'DOCKER_PASSWORD')]) {
sh "./gradlew pushOfficialImages"
}
}
}
}
post {
always {
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
junit '**/build/test-results-xml/**/*.xml'
junit testResults: '**/build/test-results-xml/**/*.xml', keepLongStdio: true, allowEmptyResults: true
script {
try {
@ -100,6 +213,7 @@ pipeline {
script
{
if (!isReleaseTag) {
// We want to send a summary email, but want to limit to once per day.
// Comparing the dates of the previous and current builds achieves this,
// i.e. we will only send an email for the first build on a given day.
@ -132,6 +246,7 @@ pipeline {
}
}
}
}
cleanup {
deleteDir() /* clean up our workspace */
}

View File

@ -1,60 +0,0 @@
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
@Library('corda-shared-build-pipeline-steps')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
pipeline {
agent { label 'k8s' }
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
}
environment {
DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}"
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
}
stages {
stage('Corda Pull Request - Generate Build Image') {
steps {
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
sh "./gradlew " +
"-Dkubenetize=true " +
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.provided.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean pushBuildImage"
}
sh "kubectl auth can-i get pods"
}
}
stage('Unit Tests') {
steps {
sh "./gradlew " +
"-DbuildId=\"\${BUILD_ID}\" " +
"-Dkubenetize=true " +
"-Ddocker.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" allParallelUnitTest"
if (env.CHANGE_ID) {
pullRequest.createStatus(status: 'success',
context: 'continuous-integration/jenkins/pr-merge/unitTest',
description: 'Unit Tests Passed',
targetUrl: "${env.JOB_URL}/testResults")
}
}
}
}
post {
always {
junit '**/build/test-results-xml/**/*.xml'
}
cleanup {
deleteDir() /* clean up our workspace */
}
}
}

62
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,62 @@
# All documentation should be reviewed by the technical writers
*.md @corda/technical-writers
# By default anything under core or node-api is the Kernel team
core @corda/kernel
node-api @corda/kernel
node/src/main/kotlin/net/corda/node/internal @corda/kernel
node/src/main/kotlin/net/corda/node/services @corda/kernel
# Determinstic components
core-deterministic @chrisr3
jdk8u-deterministic @chrisr3
node/djvm @chrisr3
serialization-deterministic @chrisr3
serialization-djvm @chrisr3
serialization-tests @chrisr3
# Demobench defaults to Chris, but Viktor for the main code
tools/demobench @chrisr3
tools/demobench/src/main/kotlin/net/corda/demobench @vkolomeyko
# General Corda code
client/rpc @vkolomeyko
core/src/main/kotlin/net/corda/core/flows @dimosr
core/src/main/kotlin/net/corda/core/internal/notary @thschroeter
core/src/main/kotlin/net/corda/core/messaging @vkolomeyko
node/src/integration-test/kotlin/net/corda/node/persistence @blsemo
node/src/integration-test/kotlin/net/corda/node/services/persistence @blsemo
node/src/main/kotlin/net/corda/node/internal/artemis @rekalov
node/src/main/kotlin/net/corda/node/services/identity @rekalov
node/src/main/kotlin/net/corda/node/services/keys @rekalov
node/src/main/kotlin/net/corda/node/services/messaging @dimosr
node/src/main/kotlin/net/corda/node/services/network @rekalov
node/src/main/kotlin/net/corda/node/services/persistence @blsemo
node/src/main/kotlin/net/corda/node/services/rpc @vkolomeyko
node/src/main/kotlin/net/corda/node/services/statemachine @lankydan
node/src/main/kotlin/net/corda/node/utilities/registration @rekalov
node/src/main/kotlin/net/corda/notary @thschroeter
node-api/src/main/kotlin/net/corda/nodeapi/internal/bridging @vkolomeyko
node-api/src/main/kotlin/net/corda/nodeapi/internal/crypto @rekalov
node-api/src/main/kotlin/net/corda/nodeapi/internal/cryptoservice @rekalov
node-api/src/main/kotlin/net/corda/nodeapi/internal/lifecycle @vkolomeyko
node-api/src/main/kotlin/net/corda/nodeapi/internal/persistence @blsemo
node-api/src/main/kotlin/net/corda/nodeapi/internal/protonwrapper @vkolomeyko
node-api/src/test/kotlin/net/corda/nodeapi/internal/bridging @rekalov
common/logging/src/main/kotlin/net/corda/common/logging/errorReporting @JamesHR3
common/logging/src/test/kotlin/net/corda/commmon/logging/errorReporting @JamesHR3
# Single file ownerships go at the end, as they are most specific and take precedence over other ownerships
core/src/main/kotlin/net/corda/core/internal/AbstractAttachment.kt @adelel1
core/src/main/kotlin/net/corda/core/internal/AttachmentTrustCalculator.kt @adelel1
core/src/main/kotlin/net/corda/core/internal/AttachmentWithContext.kt @adelel1
core/src/main/kotlin/net/corda/core/internal/CertRole.kt @rekalov
core/src/main/kotlin/net/corda/core/node/services/AttachmentStorage.kt @adelel1
core/src/main/kotlin/net/corda/core/node/services/IdentityService.kt @rekalov
core/src/main/kotlin/net/corda/core/node/services/NetworkMapCache.kt @rekalov

11
Jenkinsfile vendored
View File

@ -9,6 +9,7 @@ pipeline {
options {
timestamps()
timeout(time: 3, unit: 'HOURS')
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
}
environment {
@ -16,6 +17,9 @@ pipeline {
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials')
CORDA_USE_CACHE = "corda-remotes"
CORDA_ARTIFACTORY_USERNAME = "${env.ARTIFACTORY_CREDENTIALS_USR}"
CORDA_ARTIFACTORY_PASSWORD = "${env.ARTIFACTORY_CREDENTIALS_PSW}"
}
stages {
@ -26,8 +30,11 @@ pipeline {
"-Dkubenetize=true " +
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.container.env.parameter.CORDA_USE_CACHE=\"${CORDA_USE_CACHE}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_USERNAME=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Ddocker.container.env.parameter.CORDA_ARTIFACTORY_PASSWORD=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean jar deployNodes pushBuildImage preAllocateForAllParallelIntegrationTest preAllocateForAllParallelIntegrationTest --stacktrace"
" clean preAllocateForAllParallelUnitTest preAllocateForAllParallelIntegrationTest pushBuildImage --stacktrace"
}
sh "kubectl auth can-i get pods"
}
@ -72,7 +79,7 @@ pipeline {
post {
always {
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
junit '**/build/test-results-xml/**/*.xml'
junit testResults: '**/build/test-results-xml/**/*.xml', keepLongStdio: true, allowEmptyResults: true
}
cleanup {
deleteDir() /* clean up our workspace */

View File

@ -105,7 +105,7 @@ buildscript {
ext.eddsa_version = '0.3.0'
ext.dependency_checker_version = '5.2.0'
ext.commons_collections_version = '4.3'
ext.beanutils_version = '1.9.3'
ext.beanutils_version = '1.9.4'
ext.crash_version = '1.7.4'
ext.jsr305_version = constants.getProperty("jsr305Version")
ext.shiro_version = '1.4.1'
@ -155,22 +155,39 @@ buildscript {
ext.corda_docs_link = "https://docs.corda.net/docs/corda-os/$baseVersion"
repositories {
mavenLocal()
// Use system environment to activate caching with Artifactory,
// because it is actually easier to pass that during parallel build.
// NOTE: it has to be a name of a virtual repository with all
// required remote or local repositories!
if (System.getenv("CORDA_USE_CACHE")) {
maven {
name "R3 Maven remote repositories"
url "${artifactory_contextUrl}/${System.getenv("CORDA_USE_CACHE")}"
authentication {
basic(BasicAuthentication)
}
credentials {
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
}
}
} else {
mavenCentral()
jcenter()
maven {
url 'https://kotlin.bintray.com/kotlinx'
}
maven {
url "$artifactory_contextUrl/corda-dependencies-dev"
url "${artifactory_contextUrl}/corda-dependencies-dev"
}
maven {
url "$artifactory_contextUrl/corda-releases"
url "${artifactory_contextUrl}/corda-releases"
}
}
}
dependencies {
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
classpath "org.jetbrains.kotlin:kotlin-allopen:$kotlin_version"
classpath 'com.jfrog.bintray.gradle:gradle-bintray-plugin:1.4'
classpath "net.corda.plugins:publish-utils:$gradle_plugins_version"
classpath "net.corda.plugins:quasar-utils:$gradle_plugins_version"
classpath "net.corda.plugins:cordformation:$gradle_plugins_version"
@ -204,7 +221,6 @@ plugins {
apply plugin: 'project-report'
apply plugin: 'com.github.ben-manes.versions'
apply plugin: 'net.corda.plugins.publish-utils'
apply plugin: 'maven-publish'
apply plugin: 'com.jfrog.artifactory'
apply plugin: "com.bmuschko.docker-remote-api"
apply plugin: "com.r3.dependx.dependxies"
@ -275,7 +291,7 @@ allprojects {
toolVersion = "0.8.3"
}
tasks.withType(JavaCompile) {
tasks.withType(JavaCompile).configureEach {
options.compilerArgs << "-Xlint:unchecked" << "-Xlint:deprecation" << "-Xlint:-options" << "-parameters"
options.compilerArgs << '-XDenableSunApiLintControl'
if (warnings_as_errors) {
@ -287,7 +303,7 @@ allprojects {
options.encoding = 'UTF-8'
}
tasks.withType(org.jetbrains.kotlin.gradle.tasks.KotlinCompile) {
tasks.withType(org.jetbrains.kotlin.gradle.tasks.KotlinCompile).configureEach {
kotlinOptions {
languageVersion = "1.2"
apiVersion = "1.2"
@ -302,7 +318,7 @@ allprojects {
task.dependsOn tasks.withType(AbstractCompile)
}
tasks.withType(Jar) { task ->
tasks.withType(Jar).configureEach { task ->
// Includes War and Ear
manifest {
attributes('Corda-Release-Version': corda_release_version)
@ -314,7 +330,7 @@ allprojects {
}
}
tasks.withType(Test) {
tasks.withType(Test).configureEach {
forkEvery = 10
ignoreFailures = project.hasProperty('tests.ignoreFailures') ? project.property('tests.ignoreFailures').toBoolean() : false
failFast = project.hasProperty('tests.failFast') ? project.property('tests.failFast').toBoolean() : false
@ -339,7 +355,7 @@ allprojects {
systemProperty 'java.security.egd', 'file:/dev/./urandom'
}
tasks.withType(Test) {
tasks.withType(Test).configureEach {
if (name.contains("integrationTest")) {
maxParallelForks = (System.env.CORDA_INT_TESTING_FORKS == null) ? 1 : "$System.env.CORDA_INT_TESTING_FORKS".toInteger()
}
@ -357,11 +373,29 @@ allprojects {
repositories {
mavenLocal()
// Use system environment to activate caching with Artifactory,
// because it is actually easier to pass that during parallel build.
// NOTE: it has to be a name of a virtual repository with all
// required remote or local repositories!
if (System.getenv("CORDA_USE_CACHE")) {
maven {
name "R3 Maven remote repositories"
url "${artifactory_contextUrl}/${System.getenv("CORDA_USE_CACHE")}"
authentication {
basic(BasicAuthentication)
}
credentials {
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
}
}
} else {
mavenCentral()
jcenter()
maven { url "$artifactory_contextUrl/corda-dependencies" }
maven { url "${artifactory_contextUrl}/corda-dependencies" }
maven { url 'https://repo.gradle.org/gradle/libs-releases' }
maven { url "$artifactory_contextUrl/corda-dev" }
maven { url "${artifactory_contextUrl}/corda-dev" }
}
}
configurations {
@ -520,7 +554,7 @@ tasks.register('detektBaseline', JavaExec) {
args(params)
}
tasks.withType(Test) {
tasks.withType(Test).configureEach {
reports.html.destination = file("${reporting.baseDir}/${name}")
}
@ -626,7 +660,7 @@ dependxiesModule {
skipTasks = "test,integrationTest,smokeTest,slowIntegrationTest"
}
task generateApi(type: net.corda.plugins.GenerateApi) {
tasks.register('generateApi', net.corda.plugins.apiscanner.GenerateApi) {
baseName = "api-corda"
}
@ -662,7 +696,7 @@ if (file('corda-docs-only-build').exists() || (System.getenv('CORDA_DOCS_ONLY_BU
}
wrapper {
gradleVersion = "5.4.1"
gradleVersion = '5.6.4'
distributionType = Wrapper.DistributionType.ALL
}

View File

@ -13,6 +13,7 @@ import net.corda.node.internal.NodeWithInfo;
import net.corda.testing.internal.InternalTestUtilsKt;
import net.corda.testing.node.User;
import net.corda.testing.node.internal.NodeBasedTest;
import net.corda.testing.node.internal.TestCordappInternal;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@ -30,10 +31,18 @@ import static net.corda.node.services.Permissions.invokeRpc;
import static net.corda.node.services.Permissions.startFlow;
import static net.corda.testing.core.TestConstants.ALICE_NAME;
import static net.corda.testing.core.TestConstants.DUMMY_NOTARY_NAME;
import static net.corda.testing.node.internal.InternalTestUtilsKt.FINANCE_CORDAPPS;
import static net.corda.testing.node.internal.InternalTestUtilsKt.cordappWithPackages;
public class CordaRPCJavaClientTest extends NodeBasedTest {
public CordaRPCJavaClientTest() {
super(Arrays.asList("net.corda.finance.contracts", CashSchemaV1.class.getPackage().getName()), Collections.singletonList(DUMMY_NOTARY_NAME));
super(cordapps(), Collections.singletonList(DUMMY_NOTARY_NAME));
}
private static Set<TestCordappInternal> cordapps() {
Set<TestCordappInternal> cordapps = new HashSet<>(FINANCE_CORDAPPS);
cordapps.add(cordappWithPackages(CashSchemaV1.class.getPackage().getName()));
return cordapps;
}
private List<String> perms = Arrays.asList(

View File

@ -40,6 +40,7 @@ import net.corda.testing.core.expect
import net.corda.testing.core.expectEvents
import net.corda.testing.core.sequence
import net.corda.testing.node.User
import net.corda.testing.node.internal.FINANCE_CORDAPPS
import net.corda.testing.node.internal.NodeBasedTest
import net.corda.testing.node.internal.ProcessUtilities
import net.corda.testing.node.internal.poll
@ -62,7 +63,7 @@ import kotlin.test.assertEquals
import kotlin.test.assertFalse
import kotlin.test.assertTrue
class CordaRPCClientTest : NodeBasedTest(listOf("net.corda.finance"), notaries = listOf(DUMMY_NOTARY_NAME)) {
class CordaRPCClientTest : NodeBasedTest(FINANCE_CORDAPPS, notaries = listOf(DUMMY_NOTARY_NAME)) {
companion object {
val rpcUser = User("user1", "test", permissions = setOf(all()))
val log = contextLogger()

View File

@ -2,9 +2,7 @@ package net.corda.client.rpc
import net.corda.core.context.Actor
import net.corda.core.context.Trace
import net.corda.core.internal.packageName
import net.corda.core.messaging.CordaRPCOps
import net.corda.finance.schemas.CashSchemaV1
import net.corda.node.internal.NodeWithInfo
import net.corda.node.services.Permissions
import net.corda.testing.core.ALICE_NAME
@ -14,7 +12,7 @@ import org.assertj.core.api.Assertions.assertThat
import org.junit.Before
import org.junit.Test
class FlowsExecutionModeTests : NodeBasedTest(emptyList()) {
class FlowsExecutionModeTests : NodeBasedTest() {
private val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
private lateinit var node: NodeWithInfo

View File

@ -15,6 +15,7 @@ import net.corda.finance.DOLLARS
import net.corda.finance.contracts.asset.Cash
import net.corda.finance.flows.CashIssueFlow
import net.corda.node.services.Permissions
import net.corda.nodeapi.exceptions.RejectedCommandException
import net.corda.testing.core.CHARLIE_NAME
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeHandle
@ -49,6 +50,38 @@ class CordaRPCClientReconnectionTest {
val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
}
@Test(timeout=300_000)
fun `rpc node start when FlowsDrainingModeEnabled throws RejectedCommandException and won't attempt to reconnect`() {
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {
val address = NetworkHostAndPort("localhost", portAllocator.nextPort())
fun startNode(): NodeHandle {
return startNode(
providedName = CHARLIE_NAME,
rpcUsers = listOf(CordaRPCClientTest.rpcUser),
customOverrides = mapOf("rpcSettings.address" to address.toString())
).getOrThrow()
}
val node = startNode()
val client = CordaRPCClient(node.rpcAddress,
config.copy(maxReconnectAttempts = 1))
(client.start(rpcUser.username, rpcUser.password, gracefulReconnect = gracefulReconnect)).use {
val rpcOps = it.proxy as ReconnectingCordaRPCOps
rpcOps.setFlowsDrainingModeEnabled(true)
assertThatThrownBy { rpcOps.startTrackedFlow(::CashIssueFlow, 10.DOLLARS, OpaqueBytes.of(0), defaultNotaryIdentity).returnValue.get() }
.isInstanceOf(RejectedCommandException::class.java).hasMessage("Node is draining before shutdown. Cannot start new flows through RPC.")
}
}
}
@Test(timeout=300_000)
fun `rpc client calls and returned observables continue working when the server crashes and restarts`() {
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {

View File

@ -158,7 +158,8 @@ open class CordaRPCClientConfiguration @JvmOverloads constructor(
open val connectionRetryIntervalMultiplier: Double = 1.5,
/**
* Maximum reconnect attempts on failover or disconnection. The default is -1 which means unlimited.
* Maximum reconnect attempts on failover or disconnection.
* Any negative value would mean that there will be an infinite number of reconnect attempts.
*/
open val maxReconnectAttempts: Int = unlimitedReconnectAttempts,

View File

@ -76,10 +76,10 @@ import kotlin.reflect.jvm.javaMethod
* forwarded to the [UnicastSubject]. Note that the observations themselves may contain further [Observable]s, which are
* handled in the same way.
*
* To do the above we take advantage of Kryo's datastructure traversal. When the client is deserialising a message from
* the server that may contain Observables it is supplied with an [ObservableContext] that exposes the map used to demux
* the observations. When an [Observable] is encountered during traversal a new [UnicastSubject] is added to the map and
* we carry on. Each observation later contains the corresponding Observable ID, and we just forward that to the
* To do the above we take advantage of serialisation data structure traversal. When the client is deserialising a message from
* the server that may contain [Observable]s, it is supplied with an [ObservableContext] that exposes the map used to demux
* the observations. When a new [Observable] is encountered during traversal a new [UnicastSubject] is added to the map and
* we carry on. Each observation later contains the corresponding [Observable] ID, and we just forward that to the
* associated [UnicastSubject].
*
* The client may signal that it no longer consumes a particular [Observable]. This may be done explicitly by
@ -88,12 +88,12 @@ import kotlin.reflect.jvm.javaMethod
* The cleanup happens in batches using a dedicated reaper, scheduled on [reaperExecutor].
*
* The client will attempt to failover in case the server become unreachable. Depending on the [ServerLocator] instance
* passed in the constructor, failover is either handle at Artemis level or client level. If only one transport
* passed in the constructor, failover is either handled at Artemis level or client level. If only one transport
* was used to create the [ServerLocator], failover is handled by Artemis (retrying based on [CordaRPCClientConfiguration].
* If a list of transport configurations was used, failover is handled locally. Artemis is able to do it, however the
* brokers on server side need to be configured in HA mode and the [ServerLocator] needs to be created with HA as well.
*/
class RPCClientProxyHandler(
internal class RPCClientProxyHandler(
private val rpcConfiguration: CordaRPCClientConfiguration,
private val rpcUsername: String,
private val rpcPassword: String,
@ -247,7 +247,7 @@ class RPCClientProxyHandler(
try {
sessionFactory = serverLocator.createSessionFactory()
} catch (e: ActiveMQNotConnectedException) {
throw (RPCException("Cannot connect to server(s). Tried with all available servers.", e))
throw RPCException("Cannot connect to server(s). Tried with all available servers.", e)
}
// Depending on how the client is constructed, connection failure is treated differently
if (serverLocator.staticTransportConfigurations.size == 1) {
@ -380,9 +380,11 @@ class RPCClientProxyHandler(
is RPCApi.ServerToClient.Observation -> {
val observable: UnicastSubject<Notification<*>>? = observableContext.observableMap.getIfPresent(serverToClient.id)
if (observable == null) {
log.debug("Observation ${serverToClient.content} arrived to unknown Observable with ID ${serverToClient.id}. " +
log.debug {
"Observation ${serverToClient.content} arrived to unknown Observable with ID ${serverToClient.id}. " +
"This may be due to an observation arriving before the server was " +
"notified of observable shutdown")
"notified of observable shutdown"
}
} else {
// We schedule the onNext() on an executor sticky-pooled based on the Observable ID.
observationExecutorPool.run(serverToClient.id) { executor ->
@ -461,7 +463,7 @@ class RPCClientProxyHandler(
}
}
observableContext.observableMap.invalidateAll()
rpcReplyMap.forEach { _, replyFuture ->
rpcReplyMap.forEach { (_, replyFuture) ->
replyFuture.setException(ConnectionFailureException())
}
@ -528,23 +530,26 @@ class RPCClientProxyHandler(
}
private fun attemptReconnect() {
var reconnectAttempts = rpcConfiguration.maxReconnectAttempts.times(serverLocator.staticTransportConfigurations.size)
// This can be a negative number as `rpcConfiguration.maxReconnectAttempts = -1` means infinite number of re-connects
val maxReconnectCount = rpcConfiguration.maxReconnectAttempts.times(serverLocator.staticTransportConfigurations.size)
log.debug { "maxReconnectCount = $maxReconnectCount" }
var reconnectAttempt = 1
var retryInterval = rpcConfiguration.connectionRetryInterval
val maxRetryInterval = rpcConfiguration.connectionMaxRetryInterval
var transportIterator = serverLocator.staticTransportConfigurations.iterator()
while (transportIterator.hasNext() && reconnectAttempts != 0) {
val transport = transportIterator.next()
if (!transportIterator.hasNext())
transportIterator = serverLocator.staticTransportConfigurations.iterator()
fun shouldRetry(reconnectAttempt: Int) =
if (maxReconnectCount < 0) true else reconnectAttempt <= maxReconnectCount
log.debug("Trying to connect using ${transport.params}")
while (shouldRetry(reconnectAttempt)) {
val transport = serverLocator.staticTransportConfigurations.let { it[(reconnectAttempt - 1) % it.size] }
log.debug { "Trying to connect using ${transport.params}" }
try {
if (!serverLocator.isClosed) {
sessionFactory = serverLocator.createSessionFactory(transport)
} else {
log.warn("Stopping reconnect attempts.")
log.debug("Server locator is closed or garbage collected. Proxy may have been closed during reconnect.")
log.debug { "Server locator is closed or garbage collected. Proxy may have been closed during reconnect." }
break
}
} catch (e: ActiveMQException) {
@ -552,12 +557,12 @@ class RPCClientProxyHandler(
Thread.sleep(retryInterval.toMillis())
} catch (e: InterruptedException) {}
// Could not connect, try with next server transport.
reconnectAttempts--
reconnectAttempt++
retryInterval = minOf(maxRetryInterval, retryInterval.times(rpcConfiguration.connectionRetryIntervalMultiplier.toLong()))
continue
}
log.debug("Connected successfully after $reconnectAttempts attempts using ${transport.params}.")
log.debug { "Connected successfully after $reconnectAttempt attempts using ${transport.params}." }
log.info("RPC server available.")
sessionFactory!!.addFailoverListener(this::haFailoverHandler)
initSessions()
@ -566,8 +571,12 @@ class RPCClientProxyHandler(
break
}
if (reconnectAttempts == 0 || sessionFactory == null)
log.error("Could not reconnect to the RPC server.")
val maxReconnectReached = !shouldRetry(reconnectAttempt)
if (maxReconnectReached || sessionFactory == null) {
val errMessage = "Could not reconnect to the RPC server after trying $reconnectAttempt times." +
if (sessionFactory != null) "" else " It was never possible to to establish connection with any of the endpoints."
log.error(errMessage)
}
}
private fun initSessions() {
@ -620,10 +629,11 @@ class RPCClientProxyHandler(
sendingEnabled.set(false)
log.warn("Terminating observables.")
val m = observableContext.observableMap.asMap()
val connectionFailureException = ConnectionFailureException()
m.keys.forEach { k ->
observationExecutorPool.run(k) {
try {
m[k]?.onError(ConnectionFailureException())
m[k]?.onError(connectionFailureException)
} catch (e: Exception) {
log.error("Unexpected exception when RPC connection failure handling", e)
}
@ -631,8 +641,8 @@ class RPCClientProxyHandler(
}
observableContext.observableMap.invalidateAll()
rpcReplyMap.forEach { _, replyFuture ->
replyFuture.setException(ConnectionFailureException())
rpcReplyMap.forEach { (_, replyFuture) ->
replyFuture.setException(connectionFailureException)
}
rpcReplyMap.clear()
@ -666,5 +676,5 @@ class RPCClientProxyHandler(
}
}
private typealias RpcReplyMap = ConcurrentHashMap<Trace.InvocationId, SettableFuture<Any?>>
private typealias RpcReplyMap = ConcurrentHashMap<InvocationId, SettableFuture<Any?>>

View File

@ -325,8 +325,8 @@ class ReconnectingCordaRPCOps private constructor(
}
when (e.targetException) {
is RejectedCommandException -> {
log.warn("Node is being shutdown. Operation ${method.name} rejected. Retrying when node is up...", e)
reconnectingRPCConnection.reconnectOnError(e)
log.warn("Node is being shutdown. Operation ${method.name} rejected. Shutting down...", e)
throw e.targetException
}
is ConnectionFailureException -> {
log.warn("Failed to perform operation ${method.name}. Connection dropped. Retrying....", e)

View File

@ -32,8 +32,9 @@ fun Message.withErrorCodeFor(error: Throwable?, level: Level): Message {
return when {
error != null && level.isInRange(Level.FATAL, Level.WARN) -> {
val logMessage = this.formattedMessage
val message = error.walkExceptionCausedByList().asSequence().mapNotNull(Throwable::message).joinToString(" - ")
CompositeMessage("$message [errorCode=${error.errorCode()}, moreInformationAt=${error.errorCodeLocationUrl()}]", format, parameters, throwable)
CompositeMessage("$logMessage - $message [errorCode=${error.errorCode()}, moreInformationAt=${error.errorCodeLocationUrl()}]", format, parameters, throwable)
}
else -> this
}

View File

@ -1,6 +1,7 @@
package net.corda.common.logging.errorReporting
import org.slf4j.Logger
import java.lang.Exception
import java.text.MessageFormat
import java.util.*
@ -31,6 +32,10 @@ internal class ErrorReporterImpl(private val resourceLocation: String,
override fun report(error: ErrorCode<*>, logger: Logger) {
val errorResource = ErrorResource.fromErrorCode(error, resourceLocation, locale)
val message = "${errorResource.getErrorMessage(error.parameters.toTypedArray())} ${getErrorInfo(error)}"
if (error is Exception) {
logger.error(message, error)
} else {
logger.error(message)
}
}
}

View File

@ -1,4 +1,4 @@
errorTemplate = Failed to create the datasource. See the logs for further information and the cause.
errorTemplate = Failed to create the datasource: {0}. See the logs for further information and the cause.
shortDescription = The datasource could not be created for unknown reasons.
actionsToFix = The logs in the logs directory should contain more information on what went wrong.
aliases =

View File

@ -1,3 +1,3 @@
errorTemplate = Failed to create the datasource. See the logs for further information and the cause.
errorTemplate = Failed to create the datasource: {0}. See the logs for further information and the cause.
shortDescription = The datasource could not be created for unknown reasons.
actionsToFix = The logs in the logs directory should contain more information on what went wrong.

View File

@ -0,0 +1,34 @@
package net.corda.commmon.logging
import com.natpryce.hamkrest.assertion.assertThat
import com.natpryce.hamkrest.contains
import net.corda.common.logging.withErrorCodeFor
import org.apache.logging.log4j.Level
import org.apache.logging.log4j.message.SimpleMessage
import org.junit.Test
import kotlin.test.assertEquals
class ExceptionsErrorCodeFunctionsTest {
@Test(timeout=3_000)
fun `error code for message prints out message and full stack trace`() {
val originalMessage = SimpleMessage("This is a test message")
var previous: Exception? = null
val throwables = (0..10).map {
val current = TestThrowable(it, previous)
previous = current
current
}
val exception = throwables.last()
val message = originalMessage.withErrorCodeFor(exception, Level.ERROR)
assertThat(message.formattedMessage, contains("This is a test message".toRegex()))
for (i in (0..10)) {
assertThat(message.formattedMessage, contains("This is exception $i".toRegex()))
}
assertEquals(message.format, originalMessage.format)
assertEquals(message.parameters, originalMessage.parameters)
assertEquals(message.throwable, originalMessage.throwable)
}
private class TestThrowable(index: Int, cause: Exception?) : Exception("This is exception $index", cause)
}

View File

@ -6,7 +6,7 @@ import java.net.InetAddress
class DatabaseErrorsTest : ErrorCodeTest<NodeDatabaseErrors>(NodeDatabaseErrors::class.java) {
override val dataForCodes = mapOf(
NodeDatabaseErrors.COULD_NOT_CONNECT to listOf<Any>(),
NodeDatabaseErrors.FAILED_STARTUP to listOf(),
NodeDatabaseErrors.FAILED_STARTUP to listOf("This is a test message"),
NodeDatabaseErrors.MISSING_DRIVER to listOf(),
NodeDatabaseErrors.PASSWORD_REQUIRED_FOR_H2 to listOf(InetAddress.getLocalHost())
)

View File

@ -7,6 +7,7 @@ import net.corda.common.logging.errorReporting.ErrorContextProvider
import net.corda.common.logging.errorReporting.ErrorReporterImpl
import org.junit.After
import org.junit.Test
import org.mockito.ArgumentMatchers.any
import org.mockito.ArgumentMatchers.anyString
import org.mockito.Mockito
import org.slf4j.Logger
@ -24,6 +25,7 @@ class ErrorReporterImplTest {
private val loggerMock = Mockito.mock(Logger::class.java).also {
Mockito.`when`(it.error(anyString())).then { logs.addAll(it.arguments) }
Mockito.`when`(it.error(anyString(), any(Exception::class.java))).then { params -> logs.addAll(params.arguments) }
}
private val contextProvider: ErrorContextProvider = object : ErrorContextProvider {
@ -39,7 +41,8 @@ class ErrorReporterImplTest {
private enum class TestErrors : ErrorCodes {
CASE1,
CASE2,
CASE_3;
CASE_3,
CASE4;
override val namespace = TestNamespaces.TEST.toString()
}
@ -59,6 +62,11 @@ class ErrorReporterImplTest {
override val parameters = listOf<Any>()
}
private class TestError4(cause: Exception?) : Exception("This is test error 4", cause), ErrorCode<TestErrors> {
override val code = TestErrors.CASE4
override val parameters = listOf<Any>()
}
private fun createReporterImpl(localeTag: String?) : ErrorReporterImpl {
val locale = if (localeTag != null) Locale.forLanguageTag(localeTag) else Locale.getDefault()
return ErrorReporterImpl("errorReporting", locale, contextProvider)
@ -118,4 +126,12 @@ class ErrorReporterImplTest {
testReporter.report(error, loggerMock)
assertEquals(listOf("This is the third test message [Code: test-case-3 URL: $TEST_URL/en-US]"), logs)
}
@Test(timeout = 3_000)
fun `exception based error code logs the stack trace`() {
val error = TestError4(Exception("A test exception"))
val testReporter = createReporterImpl("en-US")
testReporter.report(error, loggerMock)
assertEquals(listOf("This is the fourth test message [Code: test-case4 URL: $TEST_URL/en-US]", error), logs)
}
}

View File

@ -0,0 +1,4 @@
errorTemplate = This is the fourth test message
shortDescription = Test description
actionsToFix = Actions
aliases =

View File

@ -0,0 +1,4 @@
errorTemplate = This is the fourth test message
shortDescription = Test description
actionsToFix = Actions
aliases =

View File

@ -4,7 +4,7 @@
cordaVersion=4.6
versionSuffix=SNAPSHOT
gradlePluginsVersion=5.0.10
gradlePluginsVersion=5.0.11
kotlinVersion=1.2.71
java8MinUpdateVersion=171
# ***************************************************************#
@ -25,7 +25,7 @@ classgraphVersion=4.8.78
disruptorVersion=3.4.2
typesafeConfigVersion=1.3.4
jsr305Version=3.0.2
artifactoryPluginVersion=4.7.3
artifactoryPluginVersion=4.16.1
snakeYamlVersion=1.19
caffeineVersion=2.7.0
metricsVersion=4.1.0

View File

@ -0,0 +1,2 @@
## corda-core-deterministic.
This artifact is a deterministic subset of the binary contents of `corda-core`.

View File

@ -54,8 +54,8 @@ tasks.named('jar', Jar) {
enabled = false
}
def coreJarTask = tasks.getByPath(':core:jar')
def originalJar = coreJarTask.outputs.files.singleFile
def coreJarTask = project(':core').tasks.named('jar', Jar)
def originalJar = coreJarTask.map { it.outputs.files.singleFile }
def patchCore = tasks.register('patchCore', Zip) {
dependsOn coreJarTask
@ -132,7 +132,7 @@ def jarFilter = tasks.register('jarFilter', JarFilterTask) {
}
}
task determinise(type: ProGuardTask) {
def determinise = tasks.register('determinise', ProGuardTask) {
injars jarFilter
outjars file("$buildDir/proguard/$jarBaseName-${project.version}.jar")
@ -166,17 +166,20 @@ task determinise(type: ProGuardTask) {
keepclassmembers 'class net.corda.core.** { public synthetic <methods>; }'
}
task metafix(type: MetaFixerTask) {
def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask)
def metafix = tasks.register('metafix', MetaFixerTask) {
outputDir file("$buildDir/libs")
jars determinise
suffix ""
// Strip timestamps from the JAR to make it reproducible.
preserveTimestamps = false
finalizedBy checkDeterminism
}
// DOCSTART 01
def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask) {
checkDeterminism.configure {
dependsOn jdkTask
injars metafix
@ -197,20 +200,31 @@ def checkDeterminism = tasks.register('checkDeterminism', ProGuardTask) {
// DOCEND 01
defaultTasks "determinise"
determinise.finalizedBy metafix
metafix.finalizedBy checkDeterminism
assemble.dependsOn checkDeterminism
determinise.configure {
finalizedBy metafix
}
tasks.named('assemble') {
dependsOn checkDeterminism
}
def deterministicJar = metafix.outputs.files.singleFile
def deterministicJar = metafix.map { it.outputs.files.singleFile }
artifacts {
deterministicArtifacts file: deterministicJar, name: jarBaseName, type: 'jar', extension: 'jar', builtBy: metafix
publish file: deterministicJar, name: jarBaseName, type: 'jar', extension: 'jar', builtBy: metafix
deterministicArtifacts deterministicJar
publish deterministicJar
}
tasks.named('sourceJar', Jar) {
from 'README.md'
include 'README.md'
}
tasks.named('javadocJar', Jar) {
from 'README.md'
include 'README.md'
}
publish {
dependenciesFrom configurations.deterministicArtifacts
publishSources = false
publishJavadoc = false
name jarBaseName
}

View File

@ -1,7 +1,6 @@
apply plugin: 'kotlin'
apply plugin: 'kotlin-jpa'
apply plugin: 'net.corda.plugins.quasar-utils'
apply plugin: 'net.corda.plugins.publish-utils'
description 'Corda core tests'
@ -99,7 +98,7 @@ configurations {
testArtifacts.extendsFrom testRuntimeClasspath
}
tasks.withType(Test) {
tasks.withType(Test).configureEach {
// fork a new test process for every test class
forkEvery = 10
}

View File

@ -53,7 +53,7 @@ class ReceiveFinalityFlowTest {
val paymentReceiverId = paymentReceiverFuture.getOrThrow()
assertThat(bob.services.vaultService.queryBy<FungibleAsset<*>>().states).isEmpty()
bob.assertFlowSentForObservationDueToConstraintError(paymentReceiverId)
bob.assertFlowSentForObservationDueToUntrustedAttachmentsException(paymentReceiverId)
// Restart Bob with the contracts CorDapp so that it can recover from the error
bob = mockNet.restartNode(bob,
@ -71,7 +71,7 @@ class ReceiveFinalityFlowTest {
.ofType(R::class.java)
}
private fun TestStartedNode.assertFlowSentForObservationDueToConstraintError(runId: StateMachineRunId) {
private fun TestStartedNode.assertFlowSentForObservationDueToUntrustedAttachmentsException(runId: StateMachineRunId) {
val observation = medicalRecordsOfType<Flow>()
.filter { it.flowId == runId }
.toBlocking()
@ -79,6 +79,6 @@ class ReceiveFinalityFlowTest {
assertThat(observation.outcome).isEqualTo(Outcome.OVERNIGHT_OBSERVATION)
assertThat(observation.by).contains(FinalityDoctor)
val error = observation.errors.single()
assertThat(error).isInstanceOf(TransactionVerificationException.ContractConstraintRejection::class.java)
assertThat(error).isInstanceOf(TransactionVerificationException.UntrustedAttachmentsException::class.java)
}
}

View File

@ -55,7 +55,7 @@ class AttachmentsClassLoaderSerializationTests {
arrayOf(isolatedId, att1, att2).map { storage.openAttachment(it)!! },
testNetworkParameters(),
SecureHash.zeroHash,
{ attachmentTrustCalculator.calculate(it) }) { classLoader ->
{ attachmentTrustCalculator.calculate(it) }, attachmentsClassLoaderCache = null) { classLoader ->
val contractClass = Class.forName(ISOLATED_CONTRACT_CLASS_NAME, true, classLoader)
val contract = contractClass.getDeclaredConstructor().newInstance() as Contract
assertEquals("helloworld", contract.declaredField<Any?>("magicString").value)

View File

@ -23,6 +23,7 @@ import net.corda.core.internal.inputStream
import net.corda.core.node.NetworkParameters
import net.corda.core.node.services.AttachmentId
import net.corda.core.serialization.internal.AttachmentsClassLoader
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
import net.corda.testing.common.internal.testNetworkParameters
import net.corda.node.services.attachments.NodeAttachmentTrustCalculator
import net.corda.testing.contracts.DummyContract
@ -521,6 +522,7 @@ class AttachmentsClassLoaderTests {
val id = SecureHash.randomSHA256()
val timeWindow: TimeWindow? = null
val privacySalt = PrivacySalt()
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(cacheFactory)
val transaction = createLedgerTransaction(
inputs,
outputs,
@ -532,7 +534,8 @@ class AttachmentsClassLoaderTests {
privacySalt,
testNetworkParameters(),
emptyList(),
isAttachmentTrusted = { true }
isAttachmentTrusted = { true },
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
transaction.verify()
}

View File

@ -10,6 +10,7 @@ import net.corda.core.internal.AbstractAttachment
import net.corda.core.internal.TESTDSL_UPLOADER
import net.corda.core.internal.createLedgerTransaction
import net.corda.core.node.NotaryInfo
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
import net.corda.core.transactions.SignedTransaction
import net.corda.core.transactions.WireTransaction
import net.corda.testing.common.internal.testNetworkParameters
@ -18,6 +19,7 @@ import net.corda.testing.core.*
import net.corda.testing.internal.createWireTransaction
import net.corda.testing.internal.fakeAttachment
import net.corda.coretesting.internal.rigorousMock
import net.corda.testing.internal.TestingNamedCacheFactory
import org.junit.Rule
import org.junit.Test
import java.math.BigInteger
@ -131,6 +133,7 @@ class TransactionTests {
val id = SecureHash.randomSHA256()
val timeWindow: TimeWindow? = null
val privacySalt = PrivacySalt()
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(TestingNamedCacheFactory())
val transaction = createLedgerTransaction(
inputs,
outputs,
@ -142,7 +145,8 @@ class TransactionTests {
privacySalt,
testNetworkParameters(),
emptyList(),
isAttachmentTrusted = { true }
isAttachmentTrusted = { true },
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
transaction.verify()
@ -183,6 +187,7 @@ class TransactionTests {
val id = SecureHash.randomSHA256()
val timeWindow: TimeWindow? = null
val privacySalt = PrivacySalt()
val attachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(TestingNamedCacheFactory())
fun buildTransaction() = createLedgerTransaction(
inputs,
@ -195,7 +200,8 @@ class TransactionTests {
privacySalt,
testNetworkParameters(notaries = listOf(NotaryInfo(DUMMY_NOTARY, true))),
emptyList(),
isAttachmentTrusted = { true }
isAttachmentTrusted = { true },
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
assertFailsWith<TransactionVerificationException.NotaryChangeInWrongTransactionType> { buildTransaction().verify() }

View File

@ -89,6 +89,7 @@ interface OwnableState : ContractState {
// DOCEND 3
/** Something which is scheduled to happen at a point in time. */
@KeepForDJVM
interface Scheduled {
val scheduledAt: Instant
}
@ -101,6 +102,7 @@ interface Scheduled {
* lifecycle processing needs to take place. e.g. a fixing or a late payment etc.
*/
@CordaSerializable
@KeepForDJVM
data class ScheduledStateRef(val ref: StateRef, override val scheduledAt: Instant) : Scheduled
/**
@ -115,7 +117,7 @@ data class ScheduledStateRef(val ref: StateRef, override val scheduledAt: Instan
* for a particular [ContractState] have been processed/fired etc. If the activity is not "on ledger" then the
* scheduled activity shouldn't be either.
*/
@DeleteForDJVM
@KeepForDJVM
data class ScheduledActivity(val logicRef: FlowLogicRef, override val scheduledAt: Instant) : Scheduled
// DOCSTART 2
@ -134,7 +136,7 @@ interface LinearState : ContractState {
val linearId: UniqueIdentifier
}
// DOCEND 2
@DeleteForDJVM
@KeepForDJVM
interface SchedulableState : ContractState {
/**
* Indicate whether there is some activity to be performed at some future point in time with respect to this

View File

@ -1,7 +1,9 @@
package net.corda.core.flows
import net.corda.core.CordaInternal
import net.corda.core.DeleteForDJVM
import net.corda.core.DoNotImplement
import net.corda.core.KeepForDJVM
import net.corda.core.serialization.CordaSerializable
/**
@ -11,11 +13,13 @@ import net.corda.core.serialization.CordaSerializable
* the flow to run at the scheduled time.
*/
@DoNotImplement
@KeepForDJVM
interface FlowLogicRefFactory {
/**
* Construct a FlowLogicRef. This is intended for cases where the calling code has the relevant class already
* and can provide it directly.
*/
@DeleteForDJVM
fun create(flowClass: Class<out FlowLogic<*>>, vararg args: Any?): FlowLogicRef
/**
@ -30,12 +34,14 @@ interface FlowLogicRefFactory {
* [SchedulableFlow] annotation.
*/
@CordaInternal
@DeleteForDJVM
fun createForRPC(flowClass: Class<out FlowLogic<*>>, vararg args: Any?): FlowLogicRef
/**
* Converts a [FlowLogicRef] object that was obtained from the calls above into a [FlowLogic], after doing some
* validation to ensure it points to a legitimate flow class.
*/
@DeleteForDJVM
fun toFlowLogic(ref: FlowLogicRef): FlowLogic<*>
}
@ -59,4 +65,5 @@ class IllegalFlowLogicException(val type: String, msg: String) :
// TODO: align this with the existing [FlowRef] in the bank-side API (probably replace some of the API classes)
@CordaSerializable
@DoNotImplement
@KeepForDJVM
interface FlowLogicRef

View File

@ -5,6 +5,7 @@ import net.corda.core.DeleteForDJVM
import net.corda.core.internal.notary.NotaryService
import net.corda.core.node.ServiceHub
import net.corda.core.node.StatesToRecord
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import java.util.concurrent.ExecutorService
// TODO: This should really be called ServiceHubInternal but that name is already taken by net.corda.node.services.api.ServiceHubInternal.
@ -21,6 +22,8 @@ interface ServiceHubCoreInternal : ServiceHub {
val notaryService: NotaryService?
fun createTransactionsResolver(flow: ResolveTransactionsFlow): TransactionsResolver
val attachmentsClassLoaderCache: AttachmentsClassLoaderCache
}
interface TransactionsResolver {

View File

@ -27,6 +27,12 @@ fun <V, W, X> CordaFuture<out V>.thenMatch(success: (V) -> W, failure: (Throwabl
/** When this future is done and the outcome is failure, log the throwable. */
fun CordaFuture<*>.andForget(log: Logger) = thenMatch({}, { log.error("Background task failed:", it) })
/**
* Returns a future that will also apply the passed closure when it completes.
*
* @param accept A function to execute when completing the original future.
* @return A future returning the same result as the original future that this function was executed on.
*/
fun <RESULT> CordaFuture<out RESULT>.doOnComplete(accept: (RESULT) -> Unit): CordaFuture<RESULT> {
return CordaFutureImpl<RESULT>().also { result ->
thenMatch({

View File

@ -47,7 +47,7 @@ data class CordappImpl(
}
companion object {
fun jarName(url: URL): String = url.toPath().fileName.toString().removeSuffix(".jar")
fun jarName(url: URL): String = (url.toPath().fileName ?: "").toString().removeSuffix(".jar")
/** CorDapp manifest entries */
const val CORDAPP_CONTRACT_NAME = "Cordapp-Contract-Name"
@ -81,7 +81,7 @@ data class CordappImpl(
serializationCustomSerializers = emptyList(),
customSchemas = emptySet(),
jarPath = Paths.get("").toUri().toURL(),
info = CordappImpl.UNKNOWN_INFO,
info = UNKNOWN_INFO,
allFlows = emptyList(),
jarHash = SecureHash.allOnesHash,
minimumPlatformVersion = 1,

View File

@ -302,7 +302,12 @@ interface CordaRPCOps : RPCOps {
/** Checks whether an attachment with the given hash is stored on the node. */
fun attachmentExists(id: SecureHash): Boolean
/** Download an attachment JAR by ID. */
/**
* Download an attachment JAR by ID.
* @param id the id of the attachment to open
* @return the stream of the JAR
* @throws RPCException if the attachment doesn't exist
* */
fun openAttachment(id: SecureHash): InputStream
/** Uploads a jar to the node, returns it's hash. */

View File

@ -1,5 +1,8 @@
package net.corda.core.serialization.internal
import com.github.benmanes.caffeine.cache.Cache
import com.github.benmanes.caffeine.cache.Caffeine
import net.corda.core.DeleteForDJVM
import net.corda.core.contracts.Attachment
import net.corda.core.contracts.ContractAttachment
import net.corda.core.contracts.TransactionVerificationException
@ -21,6 +24,7 @@ import java.lang.ref.WeakReference
import java.net.*
import java.security.Permission
import java.util.*
import java.util.function.Function
/**
* A custom ClassLoader that knows how to load classes from a set of attachments. The attachments themselves only
@ -289,31 +293,27 @@ class AttachmentsClassLoader(attachments: List<Attachment>,
*/
@VisibleForTesting
object AttachmentsClassLoaderBuilder {
private const val CACHE_SIZE = 1000
const val CACHE_SIZE = 16
// We use a set here because the ordering of attachments doesn't affect code execution, due to the no
// overlap rule, and attachments don't have any particular ordering enforced by the builders. So we
// can just do unordered comparisons here. But the same attachments run with different network parameters
// may behave differently, so that has to be a part of the cache key.
private data class Key(val hashes: Set<SecureHash>, val params: NetworkParameters)
// This runs in the DJVM so it can't use caffeine.
private val cache: MutableMap<Key, SerializationContext> = createSimpleCache<Key, SerializationContext>(CACHE_SIZE).toSynchronised()
private val fallBackCache: AttachmentsClassLoaderCache = AttachmentsClassLoaderSimpleCacheImpl(CACHE_SIZE)
/**
* Runs the given block with serialization execution context set up with a (possibly cached) attachments classloader.
*
* @param txId The transaction ID that triggered this request; it's unused except for error messages and exceptions that can occur during setup.
*/
@Suppress("LongParameterList")
fun <T> withAttachmentsClassloaderContext(attachments: List<Attachment>,
params: NetworkParameters,
txId: SecureHash,
isAttachmentTrusted: (Attachment) -> Boolean,
parent: ClassLoader = ClassLoader.getSystemClassLoader(),
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?,
block: (ClassLoader) -> T): T {
val attachmentIds = attachments.map(Attachment::id).toSet()
val serializationContext = cache.computeIfAbsent(Key(attachmentIds, params)) {
val cache = attachmentsClassLoaderCache ?: fallBackCache
val serializationContext = cache.computeIfAbsent(AttachmentsClassLoaderKey(attachmentIds, params), Function {
// Create classloader and load serializers, whitelisted classes
val transactionClassLoader = AttachmentsClassLoader(attachments, params, txId, isAttachmentTrusted, parent)
val serializers = try {
@ -336,7 +336,7 @@ object AttachmentsClassLoaderBuilder {
.withWhitelist(whitelistedClasses)
.withCustomSerializers(serializers)
.withoutCarpenter()
}
})
// Deserialize all relevant classes in the transaction classloader.
return SerializationFactory.defaultFactory.withCurrentContext(serializationContext) {
@ -420,6 +420,36 @@ private class AttachmentsHolderImpl : AttachmentsHolder {
}
}
interface AttachmentsClassLoaderCache {
fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext
}
@DeleteForDJVM
class AttachmentsClassLoaderCacheImpl(cacheFactory: NamedCacheFactory) : SingletonSerializeAsToken(), AttachmentsClassLoaderCache {
private val cache: Cache<AttachmentsClassLoaderKey, SerializationContext> = cacheFactory.buildNamed(Caffeine.newBuilder(), "AttachmentsClassLoader_cache")
override fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext {
return cache.get(key, mappingFunction) ?: throw NullPointerException("null returned from cache mapping function")
}
}
class AttachmentsClassLoaderSimpleCacheImpl(cacheSize: Int) : AttachmentsClassLoaderCache {
private val cache: MutableMap<AttachmentsClassLoaderKey, SerializationContext>
= createSimpleCache<AttachmentsClassLoaderKey, SerializationContext>(cacheSize).toSynchronised()
override fun computeIfAbsent(key: AttachmentsClassLoaderKey, mappingFunction: Function<in AttachmentsClassLoaderKey, out SerializationContext>): SerializationContext {
return cache.computeIfAbsent(key, mappingFunction)
}
}
// We use a set here because the ordering of attachments doesn't affect code execution, due to the no
// overlap rule, and attachments don't have any particular ordering enforced by the builders. So we
// can just do unordered comparisons here. But the same attachments run with different network parameters
// may behave differently, so that has to be a part of the cache key.
data class AttachmentsClassLoaderKey(val hashes: Set<SecureHash>, val params: NetworkParameters)
private class AttachmentURLConnection(url: URL, private val attachment: Attachment) : URLConnection(url) {
override fun getContentLengthLong(): Long = attachment.size.toLong()
override fun getInputStream(): InputStream = attachment.open()

View File

@ -153,7 +153,8 @@ data class ContractUpgradeWireTransaction(
listOf(legacyAttachment, upgradedAttachment),
params,
id,
{ (services as ServiceHubCoreInternal).attachmentTrustCalculator.calculate(it) }) { transactionClassLoader ->
{ (services as ServiceHubCoreInternal).attachmentTrustCalculator.calculate(it) },
attachmentsClassLoaderCache = (services as ServiceHubCoreInternal).attachmentsClassLoaderCache) { transactionClassLoader ->
val resolvedInput = binaryInput.deserialize()
val upgradedContract = upgradedContract(upgradedContractClassName, transactionClassLoader)
val outputState = calculateUpgradedState(resolvedInput, upgradedContract, upgradedAttachment)

View File

@ -26,6 +26,7 @@ import net.corda.core.internal.deserialiseComponentGroup
import net.corda.core.internal.isUploaderTrusted
import net.corda.core.internal.uncheckedCast
import net.corda.core.node.NetworkParameters
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.serialization.internal.AttachmentsClassLoaderBuilder
import net.corda.core.utilities.contextLogger
import java.util.Collections.unmodifiableList
@ -87,7 +88,8 @@ private constructor(
private val serializedInputs: List<SerializedStateAndRef>?,
private val serializedReferences: List<SerializedStateAndRef>?,
private val isAttachmentTrusted: (Attachment) -> Boolean,
private val verifierFactory: (LedgerTransaction, ClassLoader) -> Verifier
private val verifierFactory: (LedgerTransaction, ClassLoader) -> Verifier,
private val attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
) : FullTransaction() {
init {
@ -124,7 +126,8 @@ private constructor(
componentGroups: List<ComponentGroup>? = null,
serializedInputs: List<SerializedStateAndRef>? = null,
serializedReferences: List<SerializedStateAndRef>? = null,
isAttachmentTrusted: (Attachment) -> Boolean
isAttachmentTrusted: (Attachment) -> Boolean,
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
): LedgerTransaction {
return LedgerTransaction(
inputs = inputs,
@ -141,7 +144,8 @@ private constructor(
serializedInputs = protect(serializedInputs),
serializedReferences = protect(serializedReferences),
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = ::BasicVerifier
verifierFactory = ::BasicVerifier,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
}
@ -176,7 +180,8 @@ private constructor(
serializedInputs = null,
serializedReferences = null,
isAttachmentTrusted = { true },
verifierFactory = ::BasicVerifier
verifierFactory = ::BasicVerifier,
attachmentsClassLoaderCache = null
)
}
}
@ -218,7 +223,8 @@ private constructor(
txAttachments,
getParamsWithGoo(),
id,
isAttachmentTrusted = isAttachmentTrusted) { transactionClassLoader ->
isAttachmentTrusted = isAttachmentTrusted,
attachmentsClassLoaderCache = attachmentsClassLoaderCache) { transactionClassLoader ->
// Create a copy of the outer LedgerTransaction which deserializes all fields inside the [transactionClassLoader].
// Only the copy will be used for verification, and the outer shell will be discarded.
// This artifice is required to preserve backwards compatibility.
@ -254,7 +260,8 @@ private constructor(
serializedInputs = serializedInputs,
serializedReferences = serializedReferences,
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = alternateVerifier
verifierFactory = alternateVerifier,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
// Read network parameters with backwards compatibility goo.
@ -320,7 +327,8 @@ private constructor(
serializedInputs = serializedInputs,
serializedReferences = serializedReferences,
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = verifierFactory
verifierFactory = verifierFactory,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
} else {
// This branch is only present for backwards compatibility.
@ -704,7 +712,8 @@ private constructor(
serializedInputs = null,
serializedReferences = null,
isAttachmentTrusted = { it.isUploaderTrusted() },
verifierFactory = ::BasicVerifier
verifierFactory = ::BasicVerifier,
attachmentsClassLoaderCache = null
)
@Deprecated("LedgerTransaction should not be created directly, use WireTransaction.toLedgerTransaction instead.")
@ -733,7 +742,8 @@ private constructor(
serializedInputs = null,
serializedReferences = null,
isAttachmentTrusted = { it.isUploaderTrusted() },
verifierFactory = ::BasicVerifier
verifierFactory = ::BasicVerifier,
attachmentsClassLoaderCache = null
)
@Deprecated("LedgerTransactions should not be created directly, use WireTransaction.toLedgerTransaction instead.")
@ -761,7 +771,8 @@ private constructor(
serializedInputs = serializedInputs,
serializedReferences = serializedReferences,
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = verifierFactory
verifierFactory = verifierFactory,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
}
@ -791,7 +802,8 @@ private constructor(
serializedInputs = serializedInputs,
serializedReferences = serializedReferences,
isAttachmentTrusted = isAttachmentTrusted,
verifierFactory = verifierFactory
verifierFactory = verifierFactory,
attachmentsClassLoaderCache = attachmentsClassLoaderCache
)
}
}

View File

@ -15,6 +15,7 @@ import net.corda.core.node.ServicesForResolution
import net.corda.core.node.services.AttachmentId
import net.corda.core.serialization.CordaSerializable
import net.corda.core.serialization.SerializedBytes
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.serialization.serialize
import net.corda.core.utilities.OpaqueBytes
import java.security.PublicKey
@ -109,7 +110,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
services.networkParametersService.lookup(hashToResolve)
},
// `as?` is used due to [MockServices] not implementing [ServiceHubCoreInternal]
isAttachmentTrusted = { (services as? ServiceHubCoreInternal)?.attachmentTrustCalculator?.calculate(it) ?: true }
isAttachmentTrusted = { (services as? ServiceHubCoreInternal)?.attachmentTrustCalculator?.calculate(it) ?: true },
attachmentsClassLoaderCache = (services as? ServiceHubCoreInternal)?.attachmentsClassLoaderCache
)
)
}
@ -145,7 +147,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
resolveAttachment,
{ stateRef -> resolveStateRef(stateRef)?.serialize() },
{ null },
{ it.isUploaderTrusted() }
{ it.isUploaderTrusted() },
null
)
}
@ -161,16 +164,19 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
resolveAttachment,
{ stateRef -> resolveStateRef(stateRef)?.serialize() },
resolveParameters,
{ true } // Any attachment loaded through the DJVM should be trusted
{ true }, // Any attachment loaded through the DJVM should be trusted
null
)
}
@Suppress("LongParameterList", "ThrowsCount")
private fun toLedgerTransactionInternal(
resolveIdentity: (PublicKey) -> Party?,
resolveAttachment: (SecureHash) -> Attachment?,
resolveStateRefAsSerialized: (StateRef) -> SerializedBytes<TransactionState<ContractState>>?,
resolveParameters: (SecureHash?) -> NetworkParameters?,
isAttachmentTrusted: (Attachment) -> Boolean
isAttachmentTrusted: (Attachment) -> Boolean,
attachmentsClassLoaderCache: AttachmentsClassLoaderCache?
): LedgerTransaction {
// Look up public keys to authenticated identities.
val authenticatedCommands = commands.lazyMapped { cmd, _ ->
@ -206,7 +212,8 @@ class WireTransaction(componentGroups: List<ComponentGroup>, val privacySalt: Pr
componentGroups,
serializedResolvedInputs,
serializedResolvedReferences,
isAttachmentTrusted
isAttachmentTrusted,
attachmentsClassLoaderCache
)
checkTransactionSize(ltx, resolvedNetworkParameters.maxTransactionSize, serializedResolvedInputs, serializedResolvedReferences)

View File

@ -4,6 +4,7 @@ import net.corda.core.contracts.*
import net.corda.core.crypto.SecureHash
import net.corda.core.identity.Party
import net.corda.core.node.NetworkParameters
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.transactions.ComponentGroup
import net.corda.core.transactions.LedgerTransaction
import net.corda.core.transactions.WireTransaction
@ -17,6 +18,7 @@ fun WireTransaction.accessGroupHashes() = this.groupHashes
fun WireTransaction.accessGroupMerkleRoots() = this.groupsMerkleRoots
fun WireTransaction.accessAvailableComponentHashes() = this.availableComponentHashes
@Suppress("LongParameterList")
fun createLedgerTransaction(
inputs: List<StateAndRef<ContractState>>,
outputs: List<TransactionState<ContractState>>,
@ -31,8 +33,9 @@ fun createLedgerTransaction(
componentGroups: List<ComponentGroup>? = null,
serializedInputs: List<SerializedStateAndRef>? = null,
serializedReferences: List<SerializedStateAndRef>? = null,
isAttachmentTrusted: (Attachment) -> Boolean
): LedgerTransaction = LedgerTransaction.create(inputs, outputs, commands, attachments, id, notary, timeWindow, privacySalt, networkParameters, references, componentGroups, serializedInputs, serializedReferences, isAttachmentTrusted)
isAttachmentTrusted: (Attachment) -> Boolean,
attachmentsClassLoaderCache: AttachmentsClassLoaderCache
): LedgerTransaction = LedgerTransaction.create(inputs, outputs, commands, attachments, id, notary, timeWindow, privacySalt, networkParameters, references, componentGroups, serializedInputs, serializedReferences, isAttachmentTrusted, attachmentsClassLoaderCache)
fun createContractCreationError(txId: SecureHash, contractClass: String, cause: Throwable) = TransactionVerificationException.ContractCreationError(txId, contractClass, cause)
fun createContractRejection(txId: SecureHash, contract: Contract, cause: Throwable) = TransactionVerificationException.ContractRejection(txId, contract, cause)

View File

@ -1435,7 +1435,7 @@
<ID>ThrowsCount:JarScanningCordappLoader.kt$JarScanningCordappLoader$private fun parseVersion(versionStr: String?, attributeName: String): Int</ID>
<ID>ThrowsCount:LedgerDSLInterpreter.kt$Verifies$ fun failsWith(expectedMessage: String?): EnforceVerifyOrFail</ID>
<ID>ThrowsCount:MockServices.kt$ fun &lt;T : SerializeAsToken&gt; createMockCordaService(serviceHub: MockServices, serviceConstructor: (AppServiceHub) -&gt; T): T</ID>
<ID>ThrowsCount:NetworkRegistrationHelper.kt$NetworkRegistrationHelper$private fun validateCertificates(registeringPublicKey: PublicKey, certificates: List&lt;X509Certificate&gt;)</ID>
<ID>ThrowsCount:NetworkRegistrationHelper.kt$NetworkRegistrationHelper$private fun validateCertificates( registeringPublicKey: PublicKey, registeringLegalName: CordaX500Name, expectedCertRole: CertRole, certificates: List&lt;X509Certificate&gt; )</ID>
<ID>ThrowsCount:NodeInfoFilesCopier.kt$NodeInfoFilesCopier$private fun atomicCopy(source: Path, destination: Path)</ID>
<ID>ThrowsCount:NodeVaultService.kt$NodeVaultService$@Throws(VaultQueryException::class) private fun &lt;T : ContractState&gt; _queryBy(criteria: QueryCriteria, paging_: PageSpecification, sorting: Sort, contractStateType: Class&lt;out T&gt;, skipPagingChecks: Boolean): Vault.Page&lt;T&gt;</ID>
<ID>ThrowsCount:NodeVaultService.kt$NodeVaultService$private fun makeUpdates(batch: Iterable&lt;CoreTransaction&gt;, statesToRecord: StatesToRecord, previouslySeen: Boolean): List&lt;Vault.Update&lt;ContractState&gt;&gt;</ID>
@ -1598,6 +1598,7 @@
<ID>TooGenericExceptionCaught:ScheduledFlowIntegrationTests.kt$ScheduledFlowIntegrationTests$ex: Exception</ID>
<ID>TooGenericExceptionCaught:SerializationOutputTests.kt$SerializationOutputTests$t: Throwable</ID>
<ID>TooGenericExceptionCaught:ShutdownManager.kt$ShutdownManager$t: Throwable</ID>
<ID>TooGenericExceptionCaught:SimpleAMQPClient.kt$SimpleAMQPClient$e: Exception</ID>
<ID>TooGenericExceptionCaught:SimpleMQClient.kt$SimpleMQClient$e: Exception</ID>
<ID>TooGenericExceptionCaught:SingleThreadedStateMachineManager.kt$SingleThreadedStateMachineManager$e: Exception</ID>
<ID>TooGenericExceptionCaught:SingleThreadedStateMachineManager.kt$SingleThreadedStateMachineManager$ex: Exception</ID>
@ -1617,6 +1618,7 @@
<ID>TooGenericExceptionCaught:TransformTypes.kt$TransformTypes.Companion$e: IndexOutOfBoundsException</ID>
<ID>TooGenericExceptionCaught:TransitionExecutorImpl.kt$TransitionExecutorImpl$exception: Exception</ID>
<ID>TooGenericExceptionCaught:Try.kt$Try.Companion$t: Throwable</ID>
<ID>TooGenericExceptionCaught:UserValidationPlugin.kt$UserValidationPlugin$e: Throwable</ID>
<ID>TooGenericExceptionCaught:Utils.kt$e: Exception</ID>
<ID>TooGenericExceptionCaught:V1NodeConfigurationSpec.kt$V1NodeConfigurationSpec$e: Exception</ID>
<ID>TooGenericExceptionCaught:ValidatingNotaryFlow.kt$ValidatingNotaryFlow$e: Exception</ID>

View File

@ -11,12 +11,12 @@ evaluationDependsOn(':jdk8u-deterministic')
def jdk8uDeterministic = project(':jdk8u-deterministic')
ext {
jdkTask = jdk8uDeterministic.assemble
jdkTask = jdk8uDeterministic.tasks.named('assemble')
deterministic_jdk_home = jdk8uDeterministic.jdk_home
deterministic_rt_jar = jdk8uDeterministic.rt_jar
}
tasks.withType(AbstractCompile) {
tasks.withType(AbstractCompile).configureEach {
dependsOn jdkTask
// This is a bit ugly, but Gradle isn't recognising the KotlinCompile task
@ -29,7 +29,7 @@ tasks.withType(AbstractCompile) {
}
}
tasks.withType(JavaCompile) {
tasks.withType(JavaCompile).configureEach {
options.compilerArgs << '-bootclasspath' << deterministic_rt_jar
sourceCompatibility = VERSION_1_8
targetCompatibility = VERSION_1_8

View File

@ -7,6 +7,7 @@ RUN apt-get update && \
rm -rf /var/lib/apt/lists/* && \
mkdir -p /opt/corda/cordapps && \
mkdir -p /opt/corda/persistence && \
mkdir -p /opt/corda/artemis && \
mkdir -p /opt/corda/certificates && \
mkdir -p /opt/corda/drivers && \
mkdir -p /opt/corda/logs && \
@ -20,6 +21,7 @@ RUN apt-get update && \
ENV CORDAPPS_FOLDER="/opt/corda/cordapps" \
PERSISTENCE_FOLDER="/opt/corda/persistence" \
ARTEMIS_FOLDER="/opt/corda/artemis" \
CERTIFICATES_FOLDER="/opt/corda/certificates" \
DRIVERS_FOLDER="/opt/corda/drivers" \
CONFIG_FOLDER="/etc/corda" \
@ -34,6 +36,8 @@ ENV CORDAPPS_FOLDER="/opt/corda/cordapps" \
VOLUME ["/opt/corda/cordapps"]
##PERSISTENCE FOLDER
VOLUME ["/opt/corda/persistence"]
##ARTEMIS FOLDER
VOLUME ["/opt/corda/artemis"]
##CERTS FOLDER
VOLUME ["/opt/corda/certificates"]
##OPTIONAL JDBC DRIVERS FOLDER

View File

@ -19,6 +19,7 @@ RUN apt-get update && \
rm -rf /var/lib/apt/lists/* && \
mkdir -p /opt/corda/cordapps && \
mkdir -p /opt/corda/persistence && \
mkdir -p /opt/corda/artemis && \
mkdir -p /opt/corda/certificates && \
mkdir -p /opt/corda/drivers && \
mkdir -p /opt/corda/logs && \
@ -36,6 +37,7 @@ RUN apt-get update && \
ENV CORDAPPS_FOLDER="/opt/corda/cordapps" \
PERSISTENCE_FOLDER="/opt/corda/persistence" \
ARTEMIS_FOLDER="/opt/corda/artemis" \
CERTIFICATES_FOLDER="/opt/corda/certificates" \
DRIVERS_FOLDER="/opt/corda/drivers" \
CONFIG_FOLDER="/etc/corda" \
@ -50,6 +52,8 @@ ENV CORDAPPS_FOLDER="/opt/corda/cordapps" \
VOLUME ["/opt/corda/cordapps"]
##PERSISTENCE FOLDER
VOLUME ["/opt/corda/persistence"]
##ARTEMIS FOLDER
VOLUME ["/opt/corda/artemis"]
##CERTS FOLDER
VOLUME ["/opt/corda/certificates"]
##OPTIONAL JDBC DRIVERS FOLDER

View File

@ -10,6 +10,7 @@ RUN amazon-linux-extras enable corretto8 && \
rm -rf /var/cache/yum && \
mkdir -p /opt/corda/cordapps && \
mkdir -p /opt/corda/persistence && \
mkdir -p /opt/corda/artemis && \
mkdir -p /opt/corda/certificates && \
mkdir -p /opt/corda/drivers && \
mkdir -p /opt/corda/logs && \
@ -23,6 +24,7 @@ RUN amazon-linux-extras enable corretto8 && \
ENV CORDAPPS_FOLDER="/opt/corda/cordapps" \
PERSISTENCE_FOLDER="/opt/corda/persistence" \
ARTEMIS_FOLDER="/opt/corda/artemis" \
CERTIFICATES_FOLDER="/opt/corda/certificates" \
DRIVERS_FOLDER="/opt/corda/drivers" \
CONFIG_FOLDER="/etc/corda" \
@ -37,6 +39,8 @@ ENV CORDAPPS_FOLDER="/opt/corda/cordapps" \
VOLUME ["/opt/corda/cordapps"]
##PERSISTENCE FOLDER
VOLUME ["/opt/corda/persistence"]
##ARTEMIS FOLDER
VOLUME ["/opt/corda/artemis"]
##CERTS FOLDER
VOLUME ["/opt/corda/certificates"]
##OPTIONAL JDBC DRIVERS FOLDER

View File

@ -51,7 +51,7 @@ class ConfigExporter {
}
fun Config.parseAsNodeConfigWithFallback(): Validated<NodeConfiguration, Configuration.Validation.Error> {
val referenceConfig = ConfigFactory.parseResources("reference.conf")
val referenceConfig = ConfigFactory.parseResources("corda-reference.conf")
val nodeConfig = this
.withValue("baseDirectory", ConfigValueFactory.fromAnyRef("/opt/corda"))
.withFallback(referenceConfig)

122
docs/build.gradle Normal file
View File

@ -0,0 +1,122 @@
import org.apache.tools.ant.taskdefs.condition.Os
apply plugin: 'org.jetbrains.dokka'
apply plugin: 'net.corda.plugins.publish-utils'
apply plugin: 'maven-publish'
apply plugin: 'com.jfrog.artifactory'
def internalPackagePrefixes(sourceDirs) {
def prefixes = []
// Kotlin allows packages to deviate from the directory structure, but let's assume they don't:
sourceDirs.collect { sourceDir ->
sourceDir.traverse(type: groovy.io.FileType.DIRECTORIES) {
if (it.name == 'internal') {
prefixes.add sourceDir.toPath().relativize(it.toPath()).toString().replace(File.separator, '.')
}
}
}
prefixes
}
ext {
// TODO: Add '../client/jfx/src/main/kotlin' and '../client/mock/src/main/kotlin' if we decide to make them into public API
dokkaSourceDirs = files('../core/src/main/kotlin', '../client/rpc/src/main/kotlin', '../finance/workflows/src/main/kotlin', '../finance/contracts/src/main/kotlin', '../client/jackson/src/main/kotlin',
'../testing/test-utils/src/main/kotlin', '../testing/node-driver/src/main/kotlin')
internalPackagePrefixes = internalPackagePrefixes(dokkaSourceDirs)
archivedApiDocsBaseFilename = 'api-docs'
}
dokka {
outputDirectory = file("${rootProject.rootDir}/docs/build/html/api/kotlin")
}
task dokkaJavadoc(type: org.jetbrains.dokka.gradle.DokkaTask) {
outputFormat = "javadoc"
outputDirectory = file("${rootProject.rootDir}/docs/build/html/api/javadoc")
}
[dokka, dokkaJavadoc].collect {
it.configure {
moduleName = 'corda'
processConfigurations = ['compile']
sourceDirs = dokkaSourceDirs
includes = ['packages.md']
jdkVersion = 8
externalDocumentationLink {
url = new URL("http://fasterxml.github.io/jackson-core/javadoc/2.9/")
}
externalDocumentationLink {
url = new URL("https://docs.oracle.com/javafx/2/api/")
}
externalDocumentationLink {
url = new URL("http://www.bouncycastle.org/docs/docs1.5on/")
}
internalPackagePrefixes.collect { packagePrefix ->
packageOptions {
prefix = packagePrefix
suppress = true
}
}
}
}
task apidocs(dependsOn: ['dokka', 'dokkaJavadoc']) {
group "Documentation"
description "Build API documentation"
}
task makeHTMLDocs(type: Exec){
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
commandLine "docker", "run", "--rm", "-v", "${project.projectDir}:/opt/docs_builder", "-v", "${project.projectDir}/..:/opt", "corda/docs-builder:latest", "bash", "-c", "make-docsite-html.sh"
} else {
commandLine "bash", "-c", "docker run --rm --user \$(id -u):\$(id -g) -v ${project.projectDir}:/opt/docs_builder -v ${project.projectDir}/..:/opt corda/docs-builder:latest bash -c make-docsite-html.sh"
}
}
task makePDFDocs(type: Exec){
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
commandLine "docker", "run", "--rm", "-v", "${project.projectDir}:/opt/docs_builder", "-v", "${project.projectDir}/..:/opt", "corda/docs-builder:latest", "bash", "-c", "make-docsite-pdf.sh"
} else {
commandLine "bash", "-c", "docker run --rm --user \$(id -u):\$(id -g) -v ${project.projectDir}:/opt/docs_builder -v ${project.projectDir}/..:/opt corda/docs-builder:latest bash -c make-docsite-pdf.sh"
}
}
task makeDocs(dependsOn: ['makeHTMLDocs', 'makePDFDocs'])
apidocs.shouldRunAfter makeDocs
task archiveApiDocs(type: Tar) {
dependsOn apidocs
from buildDir
include 'html/**'
extension 'tgz'
compression Compression.GZIP
}
publishing {
publications {
if (System.getProperty('publishApiDocs') != null) {
archivedApiDocs(MavenPublication) {
artifact archiveApiDocs {
artifactId archivedApiDocsBaseFilename
}
}
}
}
}
artifactoryPublish {
publications('archivedApiDocs')
version = version.replaceAll('-SNAPSHOT', '')
publishPom = false
}
artifactory {
publish {
contextUrl = artifactory_contextUrl
repository {
repoKey = 'corda-dependencies-dev'
username = System.getenv('CORDA_ARTIFACTORY_USERNAME')
password = System.getenv('CORDA_ARTIFACTORY_PASSWORD')
}
}
}

View File

@ -22,4 +22,7 @@ jar.enabled = false
shadowJar {
baseName = "avalanche"
}
assemble.dependsOn shadowJar
artifacts {
archives shadowJar
}

View File

@ -1,6 +1,5 @@
#Wed Aug 21 10:48:19 BST 2019
distributionUrl=https\://gradleproxy:gradleproxy@software.r3.com/artifactory/gradle-proxy/gradle-5.4.1-all.zip
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStorePath=wrapper/dists
distributionUrl=https\://gradleproxy:gradleproxy@software.r3.com/artifactory/gradle-proxy/gradle-5.6.4-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

6
gradlew vendored
View File

@ -7,7 +7,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@ -125,8 +125,8 @@ if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
# For Cygwin or MSYS, switch paths to Windows format before running java
if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`

2
gradlew.bat vendored
View File

@ -5,7 +5,7 @@
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,

View File

@ -6,7 +6,7 @@ import static org.gradle.api.JavaVersion.VERSION_1_8
*/
apply plugin: 'kotlin'
tasks.withType(AbstractCompile) {
tasks.withType(AbstractCompile).configureEach {
// This is a bit ugly, but Gradle isn't recognising the KotlinCompile task
// as it does the built-in JavaCompile task.
if (it.class.name.startsWith('org.jetbrains.kotlin.gradle.tasks.KotlinCompile')) {
@ -16,7 +16,7 @@ tasks.withType(AbstractCompile) {
}
}
tasks.withType(JavaCompile) {
tasks.withType(JavaCompile).configureEach {
sourceCompatibility = VERSION_1_8
targetCompatibility = VERSION_1_8
}

View File

@ -37,7 +37,9 @@ def copyJdk = tasks.register('copyJdk', Copy) {
}
}
assemble.dependsOn copyJdk
tasks.named('assemble') {
dependsOn copyJdk
}
tasks.named('jar', Jar) {
enabled = false
}

View File

@ -7,6 +7,8 @@ import net.corda.core.internal.concurrent.openFuture
import net.corda.core.node.services.CordaServiceCriticalFailureException
import net.corda.core.utilities.Try
import net.corda.core.utilities.contextLogger
import net.corda.nodeapi.internal.persistence.contextDatabase
import net.corda.nodeapi.internal.persistence.contextDatabaseOrNull
import java.io.Closeable
import java.util.Collections.singleton
import java.util.LinkedList
@ -93,7 +95,14 @@ class NodeLifecycleEventsDistributor : Closeable {
log.warn("Not distributing $event as executor been already shutdown. Double close() case?")
result.set(null)
} else {
val passTheDbToTheThread = contextDatabaseOrNull
executor.execute {
if (passTheDbToTheThread != null)
contextDatabase = passTheDbToTheThread
val orderedSnapshot = if (event.reversedPriority) snapshot.reversed() else snapshot
orderedSnapshot.forEach {
log.debug("Distributing event $event to: $it")

View File

@ -6,6 +6,7 @@ import org.hibernate.Session
import org.hibernate.Transaction
import rx.subjects.PublishSubject
import java.sql.Connection
import java.sql.SQLException
import java.util.UUID
import javax.persistence.EntityManager
@ -87,6 +88,7 @@ class DatabaseTransaction(
committed = true
}
@Throws(SQLException::class)
fun rollback() {
if (sessionDelegate.isInitialized() && session.isOpen) {
session.clear()
@ -97,16 +99,20 @@ class DatabaseTransaction(
clearException()
}
@Throws(SQLException::class)
fun close() {
try {
if (sessionDelegate.isInitialized() && session.isOpen) {
session.close()
}
if (database.closeConnection) {
connection.close()
}
} finally {
clearException()
contextTransactionOrNull = outerTransaction
}
if (outerTransaction == null) {
synchronized(this) {
closed = true

View File

@ -10,6 +10,7 @@ import io.netty.handler.proxy.ProxyConnectionEvent
import io.netty.handler.ssl.SniCompletionEvent
import io.netty.handler.ssl.SslHandler
import io.netty.handler.ssl.SslHandshakeCompletionEvent
import io.netty.handler.ssl.SslHandshakeTimeoutException
import io.netty.util.ReferenceCountUtil
import net.corda.core.identity.CordaX500Name
import net.corda.core.utilities.contextLogger
@ -295,8 +296,8 @@ internal class AMQPChannelHandler(private val serverMode: Boolean,
// This happens when the peer node is closed during SSL establishment.
when {
cause is ClosedChannelException -> logWarnWithMDC("SSL Handshake closed early.")
cause is SslHandshakeTimeoutException -> logWarnWithMDC("SSL Handshake timed out")
// Sadly the exception thrown by Netty wrapper requires that we check the message.
cause is SSLException && cause.message == "handshake timed out" -> logWarnWithMDC("SSL Handshake timed out")
cause is SSLException && (cause.message?.contains("close_notify") == true)
-> logWarnWithMDC("Received close_notify during handshake")
// io.netty.handler.ssl.SslHandler.setHandshakeFailureTransportFailure()

View File

@ -20,6 +20,11 @@ ext {
jolokia_version = constants.getProperty('jolokiaAgentVersion')
}
evaluationDependsOn(':core-deterministic')
evaluationDependsOn(':serialization-deterministic')
evaluationDependsOn(':serialization-djvm:deserializers')
evaluationDependsOn(':node:djvm')
//noinspection GroovyAssignabilityCheck
configurations {
integrationTestCompile.extendsFrom testCompile
@ -191,6 +196,7 @@ dependencies {
// Integration test helpers
integrationTestCompile "junit:junit:$junit_version"
integrationTestCompile "org.assertj:assertj-core:${assertj_version}"
integrationTestCompile "org.apache.qpid:qpid-jms-client:${protonj_version}"
// BFT-Smart dependencies
compile 'com.github.bft-smart:library:master-v1.1-beta-g6215ec8-87'
@ -242,12 +248,12 @@ dependencies {
testCompile project(':testing:cordapps:dbfailure:dbfworkflows')
}
tasks.withType(JavaCompile) {
tasks.withType(JavaCompile).configureEach {
// Resolves a Gradle warning about not scanning for pre-processors.
options.compilerArgs << '-proc:none'
}
tasks.withType(Test) {
tasks.withType(Test).configureEach {
if (JavaVersion.current() == JavaVersion.VERSION_11) {
jvmArgs '-Djdk.attach.allowAttachSelf=true'
}
@ -255,13 +261,13 @@ tasks.withType(Test) {
systemProperty 'deterministic-sources.path', configurations.deterministic.asPath
}
task integrationTest(type: Test) {
tasks.register('integrationTest', Test) {
testClassesDirs = sourceSets.integrationTest.output.classesDirs
classpath = sourceSets.integrationTest.runtimeClasspath
maxParallelForks = (System.env.CORDA_NODE_INT_TESTING_FORKS == null) ? 1 : "$System.env.CORDA_NODE_INT_TESTING_FORKS".toInteger()
}
task slowIntegrationTest(type: Test) {
tasks.register('slowIntegrationTest', Test) {
testClassesDirs = sourceSets.slowIntegrationTest.output.classesDirs
classpath = sourceSets.slowIntegrationTest.runtimeClasspath
maxParallelForks = 1
@ -319,7 +325,7 @@ publish {
name jar.baseName
}
test {
tasks.named('test', Test) {
maxHeapSize = "3g"
maxParallelForks = (System.env.CORDA_NODE_TESTING_FORKS == null) ? 1 : "$System.env.CORDA_NODE_TESTING_FORKS".toInteger()
}

View File

@ -39,9 +39,9 @@ capsule {
def nodeProject = project(':node')
task buildCordaJAR(type: FatCapsule, dependsOn: [
nodeProject.tasks.jar,
project(':core-deterministic').tasks.assemble,
project(':serialization-deterministic').tasks.assemble
nodeProject.tasks.named('jar'),
project(':core-deterministic').tasks.named('assemble'),
project(':serialization-deterministic').tasks.named('assemble')
]) {
applicationClass 'net.corda.node.Corda'
archiveBaseName = 'corda'
@ -51,7 +51,7 @@ task buildCordaJAR(type: FatCapsule, dependsOn: [
applicationSource = files(
nodeProject.configurations.runtimeClasspath,
nodeProject.tasks.jar,
nodeProject.buildDir.toString() + '/resources/main/reference.conf',
nodeProject.buildDir.toString() + '/resources/main/corda-reference.conf',
"$rootDir/config/dev/log4j2.xml",
'NOTICE' // Copy CDDL notice
)
@ -119,9 +119,8 @@ task buildCordaJAR(type: FatCapsule, dependsOn: [
}
}
assemble.dependsOn buildCordaJAR
artifacts {
archives buildCordaJAR
runtimeArtifacts buildCordaJAR
publish buildCordaJAR {
classifier ''

View File

@ -37,7 +37,7 @@ public class CordaCaplet extends Capsule {
File configFile = (config == null) ? new File(baseDir, "node.conf") : new File(config);
try {
ConfigParseOptions parseOptions = ConfigParseOptions.defaults().setAllowMissing(false);
Config defaultConfig = ConfigFactory.parseResources("reference.conf", parseOptions);
Config defaultConfig = ConfigFactory.parseResources("corda-reference.conf", parseOptions);
Config baseDirectoryConfig = ConfigFactory.parseMap(Collections.singletonMap("baseDirectory", baseDir));
Config nodeConfig = ConfigFactory.parseFile(configFile, parseOptions);
return baseDirectoryConfig.withFallback(nodeConfig).withFallback(defaultConfig).resolve();

View File

@ -1,6 +1,7 @@
package net.corda.node.flows
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.CordaException
import net.corda.core.flows.*
import net.corda.core.identity.CordaX500Name
import net.corda.core.identity.Party
@ -16,7 +17,6 @@ import net.corda.testing.driver.DriverDSL
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeParameters
import net.corda.testing.driver.driver
import net.corda.testing.node.internal.ListenProcessDeathException
import net.corda.testing.node.internal.assertUncompletedCheckpoints
import net.corda.testing.node.internal.enclosedCordapp
import org.assertj.core.api.Assertions.assertThat
@ -78,7 +78,7 @@ class FlowCheckpointVersionNodeStartupCheckTest {
private fun DriverDSL.assertBobFailsToStartWithLogMessage(logMessage: String) {
assertUncompletedCheckpoints(BOB_NAME, 1)
assertFailsWith(ListenProcessDeathException::class) {
assertFailsWith(CordaException::class) {
startNode(NodeParameters(
providedName = BOB_NAME,
customOverrides = mapOf("devMode" to false)

View File

@ -29,6 +29,8 @@ import org.junit.Ignore
import org.junit.Test
import rx.Observable
import java.util.*
import kotlin.test.assertEquals
import kotlin.test.assertTrue
class DistributedServiceTests {
private lateinit var alice: NodeHandle
@ -157,9 +159,9 @@ class DistributedServiceTests {
// The distribution of requests should be very close to sg like 16/17/17 as by default artemis does round robin
println("Notarisation distribution: $notarisationsPerNotary")
require(notarisationsPerNotary.size == 3)
assertEquals(3, notarisationsPerNotary.size)
// We allow some leeway for artemis as it doesn't always produce perfect distribution
require(notarisationsPerNotary.values.all { it > 10 })
assertTrue { notarisationsPerNotary.values.all { it > 10 } }
}
private fun issueCash(amount: Amount<Currency>) {

View File

@ -1,355 +0,0 @@
package net.corda.node.services.rpc
import net.corda.client.rpc.CordaRPCClient
import net.corda.client.rpc.CordaRPCClientConfiguration
import net.corda.client.rpc.GracefulReconnect
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps
import net.corda.client.rpc.notUsed
import net.corda.core.contracts.Amount
import net.corda.core.flows.StateMachineRunId
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.StateMachineUpdate
import net.corda.core.node.services.Vault
import net.corda.core.node.services.vault.PageSpecification
import net.corda.core.node.services.vault.QueryCriteria
import net.corda.core.node.services.vault.builder
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.core.utilities.OpaqueBytes
import net.corda.core.utilities.contextLogger
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.finance.contracts.asset.Cash
import net.corda.finance.flows.CashIssueAndPaymentFlow
import net.corda.finance.schemas.CashSchemaV1
import net.corda.node.services.Permissions
import net.corda.node.services.rpc.RpcReconnectTests.Companion.NUMBER_OF_FLOWS_TO_RUN
import net.corda.testing.core.DUMMY_BANK_A_NAME
import net.corda.testing.core.DUMMY_BANK_B_NAME
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeHandle
import net.corda.testing.driver.OutOfProcess
import net.corda.testing.driver.driver
import net.corda.testing.driver.internal.OutOfProcessImpl
import net.corda.testing.driver.internal.incrementalPortAllocation
import net.corda.testing.node.User
import net.corda.testing.node.internal.FINANCE_CORDAPPS
import org.assertj.core.api.Assertions.assertThat
import org.junit.Test
import java.util.*
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import kotlin.concurrent.thread
import kotlin.math.absoluteValue
import kotlin.math.max
import kotlin.test.assertEquals
import kotlin.test.assertTrue
import kotlin.test.currentStackTrace
/**
* This is a stress test for the rpc reconnection logic, which triggers failures in a probabilistic way.
*
* You can adjust the variable [NUMBER_OF_FLOWS_TO_RUN] to adjust the number of flows to run and the duration of the test.
*/
class RpcReconnectTests {
companion object {
// this many flows take ~5 minutes
const val NUMBER_OF_FLOWS_TO_RUN = 100
private val log = contextLogger()
}
private val portAllocator = incrementalPortAllocation()
private lateinit var proxy: RandomFailingProxy
private lateinit var node: NodeHandle
private lateinit var currentAddressPair: AddressPair
/**
* This test showcases and stress tests the demo [ReconnectingCordaRPCOps].
*
* Note that during node failure events can be lost and starting flows can become unreliable.
* The only available way to retry failed flows is to attempt a "logical retry" which is also showcased.
*
* This test runs flows in a loop and in the background kills the node or restarts it.
* Also the RPC connection is made through a proxy that introduces random latencies and is also periodically killed.
*/
@Suppress("ComplexMethod")
@Test(timeout=420_000)
fun `test that the RPC client is able to reconnect and proceed after node failure, restart, or connection reset`() {
val nodeRunningTime = { Random().nextInt(12000) + 8000 }
val demoUser = User("demo", "demo", setOf(Permissions.all()))
// When this reaches 0 - the test will end.
val flowsCountdownLatch = CountDownLatch(NUMBER_OF_FLOWS_TO_RUN)
// These are the expected progress steps for the CashIssueAndPayFlow.
val expectedProgress = listOf(
"Starting",
"Issuing cash",
"Generating transaction",
"Signing transaction",
"Finalising transaction",
"Broadcasting transaction to participants",
"Paying recipient",
"Generating anonymous identities",
"Generating transaction",
"Signing transaction",
"Finalising transaction",
"Requesting signature by notary service",
"Requesting signature by Notary service",
"Validating response from Notary service",
"Broadcasting transaction to participants",
"Done"
)
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS, startNodesInProcess = false, inMemoryDB = false)) {
fun startBankA(address: NetworkHostAndPort) = startNode(providedName = DUMMY_BANK_A_NAME, rpcUsers = listOf(demoUser), customOverrides = mapOf("rpcSettings.address" to address.toString()))
fun startProxy(addressPair: AddressPair) = RandomFailingProxy(serverPort = addressPair.proxyAddress.port, remotePort = addressPair.nodeAddress.port).start()
val addresses = (1..2).map { getRandomAddressPair() }
currentAddressPair = addresses[0]
proxy = startProxy(currentAddressPair)
val (bankA, bankB) = listOf(
startBankA(currentAddressPair.nodeAddress),
startNode(providedName = DUMMY_BANK_B_NAME, rpcUsers = listOf(demoUser))
).transpose().getOrThrow()
node = bankA
val notary = defaultNotaryIdentity
val baseAmount = Amount.parseCurrency("0 USD")
val issuerRef = OpaqueBytes.of(0x01)
var numDisconnects = 0
var numReconnects = 0
val maxStackOccurrences = AtomicInteger()
val addressesForRpc = addresses.map { it.proxyAddress }
// DOCSTART rpcReconnectingRPC
val onReconnect = {
numReconnects++
// We only expect to see a single reconnectOnError in the stack trace. Otherwise we're in danger of stack overflow recursion
maxStackOccurrences.set(max(maxStackOccurrences.get(), currentStackTrace().count { it.methodName == "reconnectOnError" }))
Unit
}
val reconnect = GracefulReconnect(onDisconnect = { numDisconnects++ }, onReconnect = onReconnect)
val config = CordaRPCClientConfiguration.DEFAULT.copy(
connectionRetryInterval = 1.seconds,
connectionRetryIntervalMultiplier = 1.0
)
val client = CordaRPCClient(addressesForRpc, configuration = config)
val bankAReconnectingRPCConnection = client.start(demoUser.username, demoUser.password, gracefulReconnect = reconnect)
val bankAReconnectingRpc = bankAReconnectingRPCConnection.proxy as ReconnectingCordaRPCOps
// DOCEND rpcReconnectingRPC
// Observe the vault and collect the observations.
val vaultEvents = Collections.synchronizedList(mutableListOf<Vault.Update<Cash.State>>())
// DOCSTART rpcReconnectingRPCVaultTracking
val vaultFeed = bankAReconnectingRpc.vaultTrackByWithPagingSpec(
Cash.State::class.java,
QueryCriteria.VaultQueryCriteria(),
PageSpecification(1, 1))
val vaultSubscription = vaultFeed.updates.subscribe { update: Vault.Update<Cash.State> ->
log.info("vault update produced ${update.produced.map { it.state.data.amount }} consumed ${update.consumed.map { it.ref }}")
vaultEvents.add(update)
}
// DOCEND rpcReconnectingRPCVaultTracking
// Observe the stateMachine and collect the observations.
val stateMachineEvents = Collections.synchronizedList(mutableListOf<StateMachineUpdate>())
val stateMachineSubscription = bankAReconnectingRpc.stateMachinesFeed().updates.subscribe { update ->
log.info(update.toString())
stateMachineEvents.add(update)
}
// While the flows are running, randomly apply a different failure scenario.
val nrRestarts = AtomicInteger()
thread(name = "Node killer") {
while (true) {
if (flowsCountdownLatch.count == 0L) break
// Let the node run for a random time interval.
nodeRunningTime().also { ms ->
log.info("Running node for ${ms / 1000} s.")
Thread.sleep(ms.toLong())
}
if (flowsCountdownLatch.count == 0L) break
when (Random().nextInt().rem(7).absoluteValue) {
0 -> {
log.info("Forcefully killing node and proxy.")
(node as OutOfProcessImpl).onStopCallback()
(node as OutOfProcess).process.destroyForcibly()
proxy.stop()
node = startBankA(currentAddressPair.nodeAddress).get()
proxy.start()
}
1 -> {
log.info("Forcefully killing node.")
(node as OutOfProcessImpl).onStopCallback()
(node as OutOfProcess).process.destroyForcibly()
node = startBankA(currentAddressPair.nodeAddress).get()
}
2 -> {
log.info("Shutting down node.")
node.stop()
proxy.stop()
node = startBankA(currentAddressPair.nodeAddress).get()
proxy.start()
}
3, 4 -> {
log.info("Killing proxy.")
proxy.stop()
Thread.sleep(Random().nextInt(5000).toLong())
proxy.start()
}
5 -> {
log.info("Dropping connection.")
proxy.failConnection()
}
6 -> {
log.info("Performing failover to a different node")
node.stop()
proxy.stop()
currentAddressPair = (addresses - currentAddressPair).first()
node = startBankA(currentAddressPair.nodeAddress).get()
proxy = startProxy(currentAddressPair)
}
}
nrRestarts.incrementAndGet()
}
}
// Start nrOfFlowsToRun and provide a logical retry function that checks the vault.
val flowProgressEvents = mutableMapOf<StateMachineRunId, MutableList<String>>()
for (amount in (1..NUMBER_OF_FLOWS_TO_RUN)) {
// DOCSTART rpcReconnectingRPCFlowStarting
bankAReconnectingRpc.runFlowWithLogicalRetry(
runFlow = { rpc ->
log.info("Starting CashIssueAndPaymentFlow for $amount")
val flowHandle = rpc.startTrackedFlowDynamic(
CashIssueAndPaymentFlow::class.java,
baseAmount.plus(Amount.parseCurrency("$amount USD")),
issuerRef,
bankB.nodeInfo.legalIdentities.first(),
false,
notary
)
val flowId = flowHandle.id
log.info("Started flow $amount with flowId: $flowId")
flowProgressEvents.addEvent(flowId, null)
flowHandle.stepsTreeFeed?.updates?.notUsed()
flowHandle.stepsTreeIndexFeed?.updates?.notUsed()
// No reconnecting possible.
flowHandle.progress.subscribe(
{ prog ->
flowProgressEvents.addEvent(flowId, prog)
log.info("Progress $flowId : $prog")
},
{ error ->
log.error("Error thrown in the flow progress observer", error)
})
flowHandle.id
},
hasFlowStarted = { rpc ->
// Query for a state that is the result of this flow.
val criteria = QueryCriteria.VaultCustomQueryCriteria(builder { CashSchemaV1.PersistentCashState::pennies.equal(amount.toLong() * 100) }, status = Vault.StateStatus.ALL)
val results = rpc.vaultQueryByCriteria(criteria, Cash.State::class.java)
log.info("$amount - Found states ${results.states}")
// The flow has completed if a state is found
results.states.isNotEmpty()
},
onFlowConfirmed = {
flowsCountdownLatch.countDown()
log.info("Flow started for $amount. Remaining flows: ${flowsCountdownLatch.count}")
}
)
// DOCEND rpcReconnectingRPCFlowStarting
Thread.sleep(Random().nextInt(250).toLong())
}
log.info("Started all flows")
// Wait until all flows have been started.
val flowsConfirmed = flowsCountdownLatch.await(10, TimeUnit.MINUTES)
if (flowsConfirmed) {
log.info("Confirmed all flows have started.")
} else {
log.info("Timed out waiting for confirmation that all flows have started. Remaining flows: ${flowsCountdownLatch.count}")
}
// Wait for all events to come in and flows to finish.
Thread.sleep(4000)
val nrFailures = nrRestarts.get()
log.info("Checking results after $nrFailures restarts.")
// We should get one disconnect and one reconnect for each failure
assertThat(numDisconnects).isEqualTo(numReconnects)
assertThat(numReconnects).isLessThanOrEqualTo(nrFailures)
assertThat(maxStackOccurrences.get()).isLessThan(2)
// Query the vault and check that states were created for all flows.
fun readCashStates() = bankAReconnectingRpc
.vaultQueryByWithPagingSpec(Cash.State::class.java, QueryCriteria.VaultQueryCriteria(status = Vault.StateStatus.CONSUMED), PageSpecification(1, 10000))
.states
var allCashStates = readCashStates()
var nrRetries = 0
// It might be necessary to wait more for all events to arrive when the node is slow.
while (allCashStates.size < NUMBER_OF_FLOWS_TO_RUN && nrRetries++ < 50) {
Thread.sleep(2000)
allCashStates = readCashStates()
}
val allCash = allCashStates.map { it.state.data.amount.quantity }.toSet()
val missingCash = (1..NUMBER_OF_FLOWS_TO_RUN).filterNot { allCash.contains(it.toLong() * 100) }
log.info("Missing cash states: $missingCash")
assertEquals(NUMBER_OF_FLOWS_TO_RUN, allCashStates.size, "Not all flows were executed successfully")
// The progress status for each flow can only miss the last events, because the node might have been killed.
val missingProgressEvents = flowProgressEvents.filterValues { expectedProgress.subList(0, it.size) != it }
assertTrue(missingProgressEvents.isEmpty(), "The flow progress tracker is missing events: $missingProgressEvents")
// DOCSTART missingVaultEvents
// Check that enough vault events were received.
// This check is fuzzy because events can go missing during node restarts.
// Ideally there should be nrOfFlowsToRun events receive but some might get lost for each restart.
assertThat(vaultEvents!!.size + nrFailures * 3).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN)
// DOCEND missingVaultEvents
// Check that no flow was triggered twice.
val duplicates = allCashStates.groupBy { it.state.data.amount }.filterValues { it.size > 1 }
assertTrue(duplicates.isEmpty(), "${duplicates.size} flows were retried illegally.")
log.info("State machine events seen: ${stateMachineEvents!!.size}")
// State machine events are very likely to get lost more often because they seem to be sent with a delay.
assertThat(stateMachineEvents.count { it is StateMachineUpdate.Added }).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN / 3)
assertThat(stateMachineEvents.count { it is StateMachineUpdate.Removed }).isGreaterThanOrEqualTo(NUMBER_OF_FLOWS_TO_RUN / 3)
// Stop the observers.
vaultSubscription.unsubscribe()
stateMachineSubscription.unsubscribe()
bankAReconnectingRPCConnection.close()
}
proxy.close()
}
@Synchronized
fun MutableMap<StateMachineRunId, MutableList<String>>.addEvent(id: StateMachineRunId, progress: String?): Boolean {
return getOrPut(id) { mutableListOf() }.let { if (progress != null) it.add(progress) else false }
}
private fun getRandomAddressPair() = AddressPair(getRandomAddress(), getRandomAddress())
private fun getRandomAddress() = NetworkHostAndPort("localhost", portAllocator.nextPort())
data class AddressPair(val proxyAddress: NetworkHostAndPort, val nodeAddress: NetworkHostAndPort)
}

View File

@ -0,0 +1,288 @@
package net.corda.node.services.statemachine
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowSession
import net.corda.core.flows.HospitalizeFlowException
import net.corda.core.flows.InitiatedBy
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.CordaX500Name
import net.corda.core.identity.Party
import net.corda.core.internal.list
import net.corda.core.internal.readAllLines
import net.corda.core.messaging.CordaRPCOps
import net.corda.core.messaging.startFlow
import net.corda.core.node.AppServiceHub
import net.corda.core.node.services.CordaService
import net.corda.core.serialization.CordaSerializable
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.core.utilities.unwrap
import net.corda.node.services.Permissions
import net.corda.testing.core.DUMMY_NOTARY_NAME
import net.corda.testing.driver.DriverDSL
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeHandle
import net.corda.testing.driver.NodeParameters
import net.corda.testing.driver.driver
import net.corda.testing.driver.internal.OutOfProcessImpl
import net.corda.testing.node.NotarySpec
import net.corda.testing.node.TestCordapp
import net.corda.testing.node.User
import net.corda.testing.node.internal.InternalDriverDSL
import org.jboss.byteman.agent.submit.ScriptText
import org.jboss.byteman.agent.submit.Submit
import org.junit.Before
import java.time.Duration
import java.util.concurrent.TimeUnit
import kotlin.test.assertEquals
abstract class StateMachineErrorHandlingTest {
val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
var counter = 0
@Before
fun setup() {
counter = 0
}
internal fun startDriver(notarySpec: NotarySpec = NotarySpec(DUMMY_NOTARY_NAME), dsl: DriverDSL.() -> Unit) {
driver(
DriverParameters(
notarySpecs = listOf(notarySpec),
startNodesInProcess = false,
inMemoryDB = false,
systemProperties = mapOf("co.paralleluniverse.fibers.verifyInstrumentation" to "true")
)
) {
dsl()
}
}
internal fun DriverDSL.createBytemanNode(
providedName: CordaX500Name,
additionalCordapps: Collection<TestCordapp> = emptyList()
): Pair<NodeHandle, Int> {
val port = nextPort()
val nodeHandle = (this as InternalDriverDSL).startNode(
NodeParameters(
providedName = providedName,
rpcUsers = listOf(rpcUser),
additionalCordapps = additionalCordapps
),
bytemanPort = port
).getOrThrow()
return nodeHandle to port
}
internal fun DriverDSL.createNode(providedName: CordaX500Name, additionalCordapps: Collection<TestCordapp> = emptyList()): NodeHandle {
return startNode(
NodeParameters(
providedName = providedName,
rpcUsers = listOf(rpcUser),
additionalCordapps = additionalCordapps
)
).getOrThrow()
}
internal fun submitBytemanRules(rules: String, port: Int) {
val submit = Submit("localhost", port)
submit.addScripts(listOf(ScriptText("Test script", rules)))
}
internal fun getBytemanOutput(nodeHandle: NodeHandle): List<String> {
return nodeHandle.baseDirectory
.list()
.first { it.toString().contains("net.corda.node.Corda") && it.toString().contains("stdout.log") }
.readAllLines()
}
internal fun OutOfProcessImpl.stop(timeout: Duration): Boolean {
return process.run {
destroy()
waitFor(timeout.seconds, TimeUnit.SECONDS)
}.also { onStopCallback() }
}
@Suppress("LongParameterList")
internal fun CordaRPCOps.assertHospitalCounts(
discharged: Int = 0,
observation: Int = 0,
propagated: Int = 0,
dischargedRetry: Int = 0,
observationRetry: Int = 0,
propagatedRetry: Int = 0
) {
val counts = startFlow(StateMachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.getOrThrow(20.seconds)
assertEquals(discharged, counts.discharged)
assertEquals(observation, counts.observation)
assertEquals(propagated, counts.propagated)
assertEquals(dischargedRetry, counts.dischargeRetry)
assertEquals(observationRetry, counts.observationRetry)
assertEquals(propagatedRetry, counts.propagatedRetry)
}
internal fun CordaRPCOps.assertHospitalCountsAllZero() = assertHospitalCounts()
internal fun CordaRPCOps.assertNumberOfCheckpoints(
runnable: Int = 0,
failed: Int = 0,
completed: Int = 0,
hospitalized: Int = 0
) {
val counts = startFlow(StateMachineErrorHandlingTest::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds)
assertEquals(runnable, counts.runnable, "There should be $runnable runnable checkpoints")
assertEquals(failed, counts.failed, "There should be $failed failed checkpoints")
assertEquals(completed, counts.completed, "There should be $completed completed checkpoints")
assertEquals(hospitalized, counts.hospitalized, "There should be $hospitalized hospitalized checkpoints")
}
internal fun CordaRPCOps.assertNumberOfCheckpointsAllZero() = assertNumberOfCheckpoints()
@StartableByRPC
@InitiatingFlow
class SendAMessageFlow(private val party: Party) : FlowLogic<String>() {
@Suspendable
override fun call(): String {
val session = initiateFlow(party)
session.send("hello there")
logger.info("Finished my flow")
return "Finished executing test flow - ${this.runId}"
}
}
@InitiatedBy(SendAMessageFlow::class)
class SendAMessageResponder(private val session: FlowSession) : FlowLogic<Unit>() {
@Suspendable
override fun call() {
session.receive<String>().unwrap { it }
logger.info("Finished my flow")
}
}
@StartableByRPC
class ThrowAnErrorFlow : FlowLogic<String>() {
@Suspendable
override fun call(): String {
throwException()
return "cant get here"
}
private fun throwException() {
logger.info("Throwing exception in flow")
throw IllegalStateException("throwing exception in flow")
}
}
@StartableByRPC
class ThrowAHospitalizeErrorFlow : FlowLogic<String>() {
@Suspendable
override fun call(): String {
throwException()
return "cant get here"
}
private fun throwException() {
logger.info("Throwing exception in flow")
throw HospitalizeFlowException("throwing exception in flow")
}
}
@StartableByRPC
class GetNumberOfCheckpointsFlow : FlowLogic<NumberOfCheckpoints>() {
override fun call() = NumberOfCheckpoints(
runnable = getNumberOfCheckpointsWithStatus(Checkpoint.FlowStatus.RUNNABLE),
failed = getNumberOfCheckpointsWithStatus(Checkpoint.FlowStatus.FAILED),
completed = getNumberOfCheckpointsWithStatus(Checkpoint.FlowStatus.COMPLETED),
hospitalized = getNumberOfCheckpointsWithStatus(Checkpoint.FlowStatus.HOSPITALIZED)
)
private fun getNumberOfCheckpointsWithStatus(status: Checkpoint.FlowStatus): Int {
return serviceHub.jdbcSession()
.prepareStatement("select count(*) from node_checkpoints where status = ? and flow_id != ?")
.apply {
setInt(1, status.ordinal)
setString(2, runId.uuid.toString())
}
.use { ps ->
ps.executeQuery().use { rs ->
rs.next()
rs.getLong(1)
}
}.toInt()
}
}
@CordaSerializable
data class NumberOfCheckpoints(
val runnable: Int = 0,
val failed: Int = 0,
val completed: Int = 0,
val hospitalized: Int = 0
)
// Internal use for testing only!!
@StartableByRPC
class GetHospitalCountersFlow : FlowLogic<HospitalCounts>() {
override fun call(): HospitalCounts =
HospitalCounts(
serviceHub.cordaService(HospitalCounter::class.java).dischargedCounter,
serviceHub.cordaService(HospitalCounter::class.java).observationCounter,
serviceHub.cordaService(HospitalCounter::class.java).propagatedCounter,
serviceHub.cordaService(HospitalCounter::class.java).dischargeRetryCounter,
serviceHub.cordaService(HospitalCounter::class.java).observationRetryCounter,
serviceHub.cordaService(HospitalCounter::class.java).propagatedRetryCounter
)
}
@CordaSerializable
data class HospitalCounts(
val discharged: Int,
val observation: Int,
val propagated: Int,
val dischargeRetry: Int,
val observationRetry: Int,
val propagatedRetry: Int
)
@Suppress("UNUSED_PARAMETER")
@CordaService
class HospitalCounter(services: AppServiceHub) : SingletonSerializeAsToken() {
var dischargedCounter: Int = 0
var observationCounter: Int = 0
var propagatedCounter: Int = 0
var dischargeRetryCounter: Int = 0
var observationRetryCounter: Int = 0
var propagatedRetryCounter: Int = 0
init {
StaffedFlowHospital.onFlowDischarged.add { _, _ ->
dischargedCounter++
}
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
observationCounter++
}
StaffedFlowHospital.onFlowErrorPropagated.add { _, _ ->
propagatedCounter++
}
StaffedFlowHospital.onFlowResuscitated.add { _, _, outcome ->
when (outcome) {
StaffedFlowHospital.Outcome.DISCHARGE -> dischargeRetryCounter++
StaffedFlowHospital.Outcome.OVERNIGHT_OBSERVATION -> observationRetryCounter++
StaffedFlowHospital.Outcome.UNTREATABLE -> propagatedRetryCounter++
}
}
}
}
internal val actionExecutorClassName: String by lazy {
Class.forName("net.corda.node.services.statemachine.ActionExecutorImpl").name
}
internal val stateMachineManagerClassName: String by lazy {
Class.forName("net.corda.node.services.statemachine.SingleThreadedStateMachineManager").name
}
}

View File

@ -1,6 +1,5 @@
package net.corda.node.services.statemachine
import net.corda.client.rpc.CordaRPCClient
import net.corda.core.flows.ReceiveFinalityFlow
import net.corda.core.internal.ResolveTransactionsFlow
import net.corda.core.messaging.startFlow
@ -22,7 +21,7 @@ import kotlin.test.assertEquals
import kotlin.test.assertFailsWith
@Suppress("MaxLineLength") // Byteman rules cannot be easily wrapped
class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
class StateMachineFinalityErrorHandlingTest : StateMachineErrorHandlingTest() {
/**
* Throws an exception when recoding a transaction inside of [ReceiveFinalityFlow] on the responding
@ -36,7 +35,7 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error recording a transaction inside of ReceiveFinalityFlow will keep the flow in for observation`() {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val charlie = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
// could not get rule for FinalityDoctor + observation counter to work
@ -67,14 +66,9 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
submitBytemanRules(rules, port)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val charlieClient =
CordaRPCClient(charlie.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
aliceClient.startFlow(
alice.rpc.startFlow(
::CashIssueAndPaymentFlow,
500.DOLLARS,
OpaqueBytes.of(0x01),
@ -83,15 +77,11 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
defaultNotaryIdentity
).returnValue.getOrThrow(30.seconds)
val (discharge, observation) = charlieClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(0, discharge)
assertEquals(1, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
assertEquals(1, charlieClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
// 1 ReceiveFinalityFlow and 1 for GetNumberOfCheckpointsFlow
assertEquals(2, charlieClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
alice.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertNumberOfCheckpoints(hospitalized = 1)
charlie.rpc.assertHospitalCounts(observation = 1)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
assertEquals(1, charlie.rpc.stateMachinesSnapshot().size)
}
}
@ -107,7 +97,7 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error resolving a transaction's dependencies inside of ReceiveFinalityFlow will keep the flow in for observation`() {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val charlie = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
// could not get rule for FinalityDoctor + observation counter to work
@ -138,14 +128,9 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
submitBytemanRules(rules, port)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val charlieClient =
CordaRPCClient(charlie.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
aliceClient.startFlow(
alice.rpc.startFlow(
::CashIssueAndPaymentFlow,
500.DOLLARS,
OpaqueBytes.of(0x01),
@ -154,15 +139,11 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
defaultNotaryIdentity
).returnValue.getOrThrow(30.seconds)
val (discharge, observation) = charlieClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(0, discharge)
assertEquals(1, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
assertEquals(1, charlieClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
// 1 for ReceiveFinalityFlow and 1 for GetNumberOfCheckpointsFlow
assertEquals(2, charlieClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
alice.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertNumberOfCheckpoints(hospitalized = 1)
charlie.rpc.assertHospitalCounts(observation = 1)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
assertEquals(1, charlie.rpc.stateMachinesSnapshot().size)
}
}
@ -170,7 +151,7 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
* Throws an exception when executing [Action.CommitTransaction] as part of receiving a transaction to record inside of [ReceiveFinalityFlow] on the responding
* flow's node.
*
* The exception is thrown 5 times.
* The exception is thrown 3 times.
*
* The responding flow is retried 3 times and then completes successfully.
*
@ -180,12 +161,12 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and complete successfully`() {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val charlie = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
val rules = """
RULE Create Counter
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
@ -201,38 +182,17 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("finality_flag") && readCounter("counter") < 5
IF flagged("finality_flag") && readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
submitBytemanRules(rules, port)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val charlieClient =
CordaRPCClient(charlie.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
aliceClient.startFlow(
alice.rpc.startFlow(
::CashIssueAndPaymentFlow,
500.DOLLARS,
OpaqueBytes.of(0x01),
@ -241,20 +201,14 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
defaultNotaryIdentity
).returnValue.getOrThrow(30.seconds)
val output = getBytemanOutput(charlie)
// This sleep is a bit suspect...
Thread.sleep(1000)
// Check the stdout for the lines generated by byteman
assertEquals(3, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(0, output.filter { it.contains("Byteman test - overnight observation") }.size)
val (discharge, observation) = charlieClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(3, discharge)
assertEquals(0, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
assertEquals(0, charlieClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, charlieClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
alice.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
assertEquals(0, charlie.rpc.stateMachinesSnapshot().size)
}
}
@ -262,7 +216,7 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
* Throws an exception when executing [Action.CommitTransaction] as part of receiving a transaction to record inside of [ReceiveFinalityFlow] on the responding
* flow's node.
*
* The exception is thrown 7 times.
* The exception is thrown 4 times.
*
* The responding flow is retried 3 times and is then kept in for observation.
*
@ -275,12 +229,12 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action while receiving a transaction inside of ReceiveFinalityFlow will be retried and be kept for observation is error persists`() {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val charlie = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val (charlie, port) = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)
val rules = """
RULE Create Counter
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
@ -296,39 +250,18 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("finality_flag") && readCounter("counter") < 7
IF flagged("finality_flag") && readCounter("counter") < 4
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val charlieClient =
CordaRPCClient(charlie.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
submitBytemanRules(rules, port)
assertFailsWith<TimeoutException> {
aliceClient.startFlow(
alice.rpc.startFlow(
::CashIssueAndPaymentFlow,
500.DOLLARS,
OpaqueBytes.of(0x01),
@ -338,20 +271,14 @@ class StatemachineFinalityErrorHandlingTest : StatemachineErrorHandlingTest() {
).returnValue.getOrThrow(30.seconds)
}
val output = getBytemanOutput(charlie)
// Check the stdout for the lines generated by byteman
assertEquals(3, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(1, output.filter { it.contains("Byteman test - overnight observation") }.size)
val (discharge, observation) = charlieClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(3, discharge)
assertEquals(1, observation)
assertEquals(1, aliceClient.stateMachinesSnapshot().size)
assertEquals(1, charlieClient.stateMachinesSnapshot().size)
// 1 for CashIssueAndPaymentFlow and 1 for GetNumberOfCheckpointsFlow
assertEquals(2, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
// 1 for ReceiveFinalityFlow and 1 for GetNumberOfCheckpointsFlow
assertEquals(2, charlieClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
alice.rpc.assertNumberOfCheckpoints(runnable = 1)
charlie.rpc.assertNumberOfCheckpoints(hospitalized = 1)
charlie.rpc.assertHospitalCounts(
discharged = 3,
observation = 1
)
assertEquals(1, alice.rpc.stateMachinesSnapshot().size)
assertEquals(1, charlie.rpc.stateMachinesSnapshot().size)
}
}
}

View File

@ -0,0 +1,581 @@
package net.corda.node.services.statemachine
import net.corda.core.CordaRuntimeException
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.node.services.api.CheckpointStorage
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.CHARLIE_NAME
import net.corda.testing.core.singleIdentity
import net.corda.testing.driver.internal.OutOfProcessImpl
import org.junit.Test
import java.sql.Connection
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.TimeoutException
import kotlin.test.assertEquals
import kotlin.test.assertFailsWith
import kotlin.test.assertTrue
@Suppress("MaxLineLength") // Byteman rules cannot be easily wrapped
class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
private companion object {
val executor: ExecutorService = Executors.newSingleThreadExecutor()
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has initialised and saved its first checkpoint
* (remains in an unstarted state).
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
*
* Each time the flow retries, it starts from the beginning of the flow (due to being in an unstarted state).
*
*/
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when calling [FlowStateMachineImpl.processEvent].
*
* This is not an expected place for an exception to occur, but allows us to test what happens when a random exception is propagated
* up to [FlowStateMachineImpl.run] during flow initialisation.
*
* A "Transaction context is missing" exception is thrown due to where the exception is thrown (no transaction is created so this is
* thrown when leaving [FlowStateMachineImpl.processEventsUntilFlowIsResumed] due to the finally block).
*/
@Test(timeout = 300_000)
fun `unexpected error during flow initialisation throws exception to client`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS ${FlowStateMachineImpl::class.java.name}
METHOD processEvent
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception
CLASS ${FlowStateMachineImpl::class.java.name}
METHOD processEvent
AT ENTRY
IF readCounter("counter") < 1
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
assertFailsWith<CordaRuntimeException> {
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(30.seconds)
}
alice.rpc.assertNumberOfCheckpoints(failed = 1)
alice.rpc.assertHospitalCounts(propagated = 1)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has initialised and saved its first checkpoint
* (remains in an unstarted state).
*
* A [SQLException] is then thrown when trying to rollback the flow's database transaction.
*
* The [SQLException] should be suppressed and the flow should continue to retry and complete successfully.
*/
@Test(timeout = 300_000)
fun `error during initialisation when trying to rollback the flow's database transaction the flow is able to retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") == 0
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Throw exception when rolling back transaction in transition executor
INTERFACE ${Connection::class.java.name}
METHOD rollback
AT ENTRY
IF readCounter("counter") == 1
DO incrementCounter("counter"); traceln("Throwing exception in transition executor"); throw new java.sql.SQLException("could not reach db", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(30.seconds)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 1)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has initialised and saved its first checkpoint
* (remains in an unstarted state).
*
* A [SQLException] is then thrown when trying to close the flow's database transaction.
*
* The [SQLException] should be suppressed and the flow should continue to retry and complete successfully.
*/
@Test(timeout = 300_000)
fun `error during initialisation when trying to close the flow's database transaction the flow is able to retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") == 0
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Throw exception when rolling back transaction in transition executor
INTERFACE ${Connection::class.java.name}
METHOD close
AT ENTRY
IF readCounter("counter") == 1
DO incrementCounter("counter"); traceln("Throwing exception in transition executor"); throw new java.sql.SQLException("could not reach db", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(30.seconds)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 1)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has initialised and saved its first checkpoint
* (remains in an unstarted state).
*
* The exception is thrown 4 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times) and then be kept in for observation.
*
* Each time the flow retries, it starts from the beginning of the flow (due to being in an unstarted state).
*/
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") < 4
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
executor.execute {
alice.rpc.startFlow(StateMachineErrorHandlingTest::SendAMessageFlow, charlie.nodeInfo.singleIdentity())
}
// flow is not signaled as started calls to [getOrThrow] will hang, sleeping instead
Thread.sleep(30.seconds.toMillis())
alice.rpc.assertNumberOfCheckpoints(hospitalized = 1)
alice.rpc.assertHospitalCounts(
discharged = 3,
observation = 1
)
assertEquals(1, alice.rpc.stateMachinesSnapshot().size)
val terminated = (alice as OutOfProcessImpl).stop(60.seconds)
assertTrue(terminated, "The node must be shutdown before it can be restarted")
val (alice2, _) = createBytemanNode(ALICE_NAME)
Thread.sleep(20.seconds.toMillis())
alice2.rpc.assertNumberOfCheckpointsAllZero()
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has initialised and saved its first checkpoint
* (remains in an unstarted state).
*
* An exception is thrown when committing a database transaction during a transition to trigger the retry of the flow. Another
* exception is then thrown during the retry itself.
*
* The flow then retries the retry causing the flow to complete successfully.
*/
@Test(timeout = 300_000)
fun `error during retrying a flow that failed when committing its original checkpoint will retry the flow again and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Throw exception on executeCommitTransaction action after first suspend + commit
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF !flagged("commit_exception_flag")
DO flag("commit_exception_flag"); traceln("Throwing exception"); throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
RULE Throw exception on retry
CLASS $stateMachineManagerClassName
METHOD onExternalStartFlow
AT ENTRY
IF flagged("commit_exception_flag") && !flagged("retry_exception_flag")
DO flag("retry_exception_flag"); traceln("Throwing retry exception"); throw new java.lang.RuntimeException("Here we go again")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(
discharged = 1,
dischargedRetry = 1
)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event on a responding node before the flow has initialised and
* saved its first checkpoint (remains in an unstarted state).
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
*
* Each time the flow retries, it starts from the beginning of the flow (due to being in an unstarted state).
*/
@Test(timeout = 300_000)
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and complete successfully`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertHospitalCounts(discharged = 3)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event on a responding node before the flow has initialised and
* saved its first checkpoint (remains in an unstarted state).
*
* The exception is thrown 4 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times) and then be kept in for observation.
*
* Each time the flow retries, it starts from the beginning of the flow (due to being in an unstarted state).
*/
@Test(timeout = 300_000)
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") < 4
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
executor.execute {
alice.rpc.startFlow(StateMachineErrorHandlingTest::SendAMessageFlow, charlie.nodeInfo.singleIdentity())
}
// flow is not signaled as started calls to [getOrThrow] will hang, sleeping instead
Thread.sleep(30.seconds.toMillis())
alice.rpc.assertNumberOfCheckpoints(runnable = 1)
charlie.rpc.assertNumberOfCheckpoints(hospitalized = 1)
charlie.rpc.assertHospitalCounts(
discharged = 3,
observation = 1
)
assertEquals(1, alice.rpc.stateMachinesSnapshot().size)
assertEquals(1, charlie.rpc.stateMachinesSnapshot().size)
val terminated = (charlie as OutOfProcessImpl).stop(60.seconds)
assertTrue(terminated, "The node must be shutdown before it can be restarted")
val (charlie2, _) = createBytemanNode(CHARLIE_NAME)
Thread.sleep(10.seconds.toMillis())
alice.rpc.assertNumberOfCheckpointsAllZero()
charlie2.rpc.assertNumberOfCheckpointsAllZero()
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has suspended (remains in an unstarted
* state) on a responding node.
*
* The exception is thrown 3 times.
*
* An exception is also thrown from [CheckpointStorage.getCheckpoint].
*
* This test is to prevent a regression, where a transient database connection error can be thrown retrieving a flow's checkpoint when
* retrying the flow after it failed to commit it's original checkpoint.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
*/
@Test(timeout = 300_000)
fun `responding flow - session init can be retried when there is a transient connection error to the database`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Throw exception on getCheckpoint
INTERFACE ${CheckpointStorage::class.java.name}
METHOD getCheckpoint
AT ENTRY
IF true
DO traceln("Throwing exception getting checkpoint"); throw new java.sql.SQLTransientConnectionException("Connection is not available")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertHospitalCounts(
discharged = 3,
observation = 0
)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
assertEquals(0, charlie.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has suspended (remains in an unstarted
* state) on a responding node.
*
* The exception is thrown 4 times.
*
* An exception is also thrown from [CheckpointStorage.getCheckpoint].
*
* This test is to prevent a regression, where a transient database connection error can be thrown retrieving a flow's checkpoint when
* retrying the flow after it failed to commit it's original checkpoint.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* fails and is kept for in for observation.
*/
@Test(timeout = 300_000)
fun `responding flow - session init can be retried when there is a transient connection error to the database goes to observation if error persists`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") < 4
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Throw exception on getCheckpoint
INTERFACE ${CheckpointStorage::class.java.name}
METHOD getCheckpoint
AT ENTRY
IF true
DO traceln("Throwing exception getting checkpoint"); throw new java.sql.SQLTransientConnectionException("Connection is not available")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
assertFailsWith<TimeoutException> {
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
}
charlie.rpc.assertNumberOfCheckpoints(hospitalized = 1)
charlie.rpc.assertHospitalCounts(
discharged = 3,
observation = 1
)
assertEquals(1, alice.rpc.stateMachinesSnapshot().size)
assertEquals(1, charlie.rpc.stateMachinesSnapshot().size)
}
}
}

View File

@ -0,0 +1,661 @@
package net.corda.node.services.statemachine
import net.corda.core.CordaRuntimeException
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.node.services.api.CheckpointStorage
import net.corda.node.services.messaging.DeduplicationHandler
import net.corda.node.services.statemachine.transitions.TopLevelTransition
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.CHARLIE_NAME
import net.corda.testing.core.singleIdentity
import org.junit.Test
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.TimeoutException
import kotlin.test.assertEquals
import kotlin.test.assertFailsWith
@Suppress("MaxLineLength") // Byteman rules cannot be easily wrapped
class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
private companion object {
val executor: ExecutorService = Executors.newSingleThreadExecutor()
}
/**
* Throws an exception when performing an [Action.SendInitial] action.
*
* The exception is thrown 4 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times) and is then kept in
* the hospital for observation.
*/
@Test(timeout = 300_000)
fun `error during transition with SendInitial action is retried 3 times and kept for observation if error persists`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeSendMultiple
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeSendMultiple action
CLASS $actionExecutorClassName
METHOD executeSendMultiple
AT ENTRY
IF readCounter("counter") < 4
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
assertFailsWith<TimeoutException> {
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
}
alice.rpc.assertNumberOfCheckpoints(hospitalized = 1)
alice.rpc.assertHospitalCounts(
discharged = 3,
observation = 1
)
assertEquals(1, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.SendInitial] event.
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
*/
@Test(timeout = 300_000)
fun `error during transition with SendInitial action that does not persist will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeSendMultiple
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeSendMultiple action
CLASS $actionExecutorClassName
METHOD executeSendMultiple
AT ENTRY
IF readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when executing [DeduplicationHandler.afterDatabaseTransaction] from inside an [Action.AcknowledgeMessages] action.
*
* The exception is thrown every time [DeduplicationHandler.afterDatabaseTransaction] is executed inside of
* [ActionExecutorImpl.executeAcknowledgeMessages]
*
* The exceptions should be swallowed. Therefore there should be no trips to the hospital and no retries.
* The flow should complete successfully as the error is swallowed.
*/
@Test(timeout = 300_000)
fun `error during transition with AcknowledgeMessages action is swallowed and flow completes successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Set flag when inside executeAcknowledgeMessages
CLASS $actionExecutorClassName
METHOD executeAcknowledgeMessages
AT INVOKE ${DeduplicationHandler::class.java.name}.afterDatabaseTransaction()
IF !flagged("exception_flag")
DO flag("exception_flag"); traceln("Setting flag to true")
ENDRULE
RULE Throw exception when executing ${DeduplicationHandler::class.java.name}.afterDatabaseTransaction when inside executeAcknowledgeMessages
INTERFACE ${DeduplicationHandler::class.java.name}
METHOD afterDatabaseTransaction
AT ENTRY
IF flagged("exception_flag")
DO traceln("Throwing exception"); clear("exception_flag"); traceln("SETTING FLAG TO FALSE"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCountsAllZero()
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event when trying to propagate an error (processing an
* [Event.StartErrorPropagation] event)
*
* The exception is thrown 3 times.
*
* This causes the flow to retry the [Event.StartErrorPropagation] event until it succeeds. This this scenario it is retried 3 times,
* on the final retry the flow successfully propagates the error and completes exceptionally.
*/
@Test(timeout = 300_000)
fun `error during error propagation the flow is able to retry and recover`() {
startDriver {
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS ${ThrowAnErrorFlow::class.java.name}
METHOD throwException
AT ENTRY
IF !flagged("my_flag")
DO traceln("SETTING FLAG TO TRUE"); flag("my_flag")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("my_flag") && readCounter("counter") < 3
DO traceln("Throwing exception"); incrementCounter("counter"); throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
assertFailsWith<CordaRuntimeException> {
alice.rpc.startFlow(StateMachineErrorHandlingTest::ThrowAnErrorFlow).returnValue.getOrThrow(60.seconds)
}
alice.rpc.assertNumberOfCheckpoints(failed = 1)
alice.rpc.assertHospitalCounts(
propagated = 1,
propagatedRetry = 3
)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when replaying a flow that has already successfully created its initial checkpoint.
*
* An exception is thrown when committing a database transaction during a transition to trigger the retry of the flow. Another
* exception is then thrown during the retry itself.
*
* The flow is discharged and replayed from the hospital. An exception is then thrown during the retry that causes the flow to be
* retried again.
*/
@Test(timeout = 300_000)
fun `error during flow retry when executing retryFlowFromSafePoint the flow is able to retry and recover`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Set flag when executing first suspend
CLASS ${TopLevelTransition::class.java.name}
METHOD suspendTransition
AT ENTRY
IF !flagged("suspend_flag")
DO flag("suspend_flag"); traceln("Setting suspend flag to true")
ENDRULE
RULE Throw exception on executeCommitTransaction action after first suspend + commit
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("suspend_flag") && flagged("commit_flag") && !flagged("commit_exception_flag")
DO flag("commit_exception_flag"); traceln("Throwing exception"); throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
RULE Set flag when executing first commit
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("suspend_flag") && !flagged("commit_flag")
DO flag("commit_flag"); traceln("Setting commit flag to true")
ENDRULE
RULE Throw exception on retry
CLASS $stateMachineManagerClassName
METHOD addAndStartFlow
AT ENTRY
IF flagged("suspend_flag") && flagged("commit_flag") && !flagged("retry_exception_flag")
DO flag("retry_exception_flag"); traceln("Throwing retry exception"); throw new java.lang.RuntimeException("Here we go again")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(40.seconds)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(
discharged = 1,
dischargedRetry = 1
)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event after the flow has suspended (has moved to a started state).
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
*
* Each time the flow retries, it begins from the previous checkpoint where it suspended before failing.
*/
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action that occurs after the first suspend will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
// seems to be restarting the flow from the beginning every time
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Set flag when executing first suspend
CLASS ${TopLevelTransition::class.java.name}
METHOD suspendTransition
AT ENTRY
IF !flagged("suspend_flag")
DO flag("suspend_flag"); traceln("Setting suspend flag to true")
ENDRULE
RULE Throw exception on executeCommitTransaction action after first suspend + commit
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("suspend_flag") && flagged("commit_flag") && readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Set flag when executing first commit
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("suspend_flag") && !flagged("commit_flag")
DO flag("commit_flag"); traceln("Setting commit flag to true")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event when the flow is finishing.
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
*
* Each time the flow retries, it begins from the previous checkpoint where it suspended before failing.
*/
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
// seems to be restarting the flow from the beginning every time
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Set flag when adding action to remove checkpoint
CLASS ${TopLevelTransition::class.java.name}
METHOD flowFinishTransition
AT ENTRY
IF !flagged("remove_checkpoint_flag")
DO flag("remove_checkpoint_flag"); traceln("Setting remove checkpoint flag to true")
ENDRULE
RULE Throw exception on executeCommitTransaction when removing checkpoint
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("remove_checkpoint_flag") && readCounter("counter") < 3
DO incrementCounter("counter"); clear("remove_checkpoint_flag"); traceln("Throwing exception"); throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws a [ConstraintViolationException] when performing an [Action.CommitTransaction] event when the flow is finishing.
*
* The exception is thrown 4 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times) and then be kept in for observation.
*
* Each time the flow retries, it begins from the previous checkpoint where it suspended before failing.
*/
@Test(timeout = 300_000)
fun `error during transition with CommitTransaction action and ConstraintViolationException that occurs when completing a flow will retry and be kept for observation if error persists`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Set flag when adding action to remove checkpoint
CLASS ${TopLevelTransition::class.java.name}
METHOD flowFinishTransition
AT ENTRY
IF !flagged("remove_checkpoint_flag")
DO flag("remove_checkpoint_flag"); traceln("Setting remove checkpoint flag to true")
ENDRULE
RULE Throw exception on executeCommitTransaction when removing checkpoint
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("remove_checkpoint_flag") && readCounter("counter") < 4
DO incrementCounter("counter");
clear("remove_checkpoint_flag");
traceln("Throwing exception");
throw new org.hibernate.exception.ConstraintViolationException("This flow has a terminal condition", new java.sql.SQLException(), "made up constraint")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
assertFailsWith<TimeoutException> {
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
}
alice.rpc.assertNumberOfCheckpoints(hospitalized = 1)
alice.rpc.assertHospitalCounts(
discharged = 3,
observation = 1
)
assertEquals(1, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has suspended (remains in an unstarted
* state).
*
* The exception is thrown 3 times.
*
* An exception is also thrown from [CheckpointStorage.getCheckpoint].
*
* This test is to prevent a regression, where a transient database connection error can be thrown retrieving a flow's checkpoint when
* retrying the flow after it failed to commit it's original checkpoint.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
*/
@Test(timeout = 300_000)
fun `flow can be retried when there is a transient connection error to the database`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Throw exception on getCheckpoint
INTERFACE ${CheckpointStorage::class.java.name}
METHOD getCheckpoint
AT ENTRY
IF true
DO traceln("Throwing exception getting checkpoint"); throw new java.sql.SQLTransientConnectionException("Connection is not available")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(
discharged = 3,
observation = 0
)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event before the flow has suspended (remains in an unstarted
* state).
*
* The exception is thrown 4 times.
*
* An exception is also thrown from [CheckpointStorage.getCheckpoint].
*
* This test is to prevent a regression, where a transient database connection error can be thrown retrieving a flow's checkpoint when
* retrying the flow after it failed to commit it's original checkpoint.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* fails and is kept for in for observation.
*/
@Test(timeout = 300_000)
fun `flow can be retried when there is a transient connection error to the database goes to observation if error persists`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeCommitTransaction action
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF readCounter("counter") < 4
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Throw exception on getCheckpoint
INTERFACE ${CheckpointStorage::class.java.name}
METHOD getCheckpoint
AT ENTRY
IF true
DO traceln("Throwing exception getting checkpoint"); throw new java.sql.SQLTransientConnectionException("Connection is not available")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
executor.execute {
alice.rpc.startFlow(StateMachineErrorHandlingTest::SendAMessageFlow, charlie.nodeInfo.singleIdentity())
}
// flow is not signaled as started calls to [getOrThrow] will hang, sleeping instead
Thread.sleep(30.seconds.toMillis())
alice.rpc.assertNumberOfCheckpoints(hospitalized = 1)
alice.rpc.assertHospitalCounts(
discharged = 3,
observation = 1
)
assertEquals(1, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Throws an exception when performing an [Action.CommitTransaction] event when the flow is finishing on a responding node.
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
*/
@Test(timeout = 300_000)
fun `responding flow - error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
startDriver {
val (charlie, port) = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Set flag when adding action to remove checkpoint
CLASS ${TopLevelTransition::class.java.name}
METHOD flowFinishTransition
AT ENTRY
IF !flagged("remove_checkpoint_flag")
DO flag("remove_checkpoint_flag"); traceln("Setting remove checkpoint flag to true")
ENDRULE
RULE Throw exception on executeCommitTransaction when removing checkpoint
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("remove_checkpoint_flag") && readCounter("counter") < 3
DO incrementCounter("counter");
clear("remove_checkpoint_flag");
traceln("Throwing exception");
throw new java.sql.SQLException("die dammit die", "1")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
alice.rpc.startFlow(
StateMachineErrorHandlingTest::SendAMessageFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
alice.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertNumberOfCheckpointsAllZero()
charlie.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
assertEquals(0, charlie.rpc.stateMachinesSnapshot().size)
}
}
}

View File

@ -0,0 +1,181 @@
package net.corda.node.services.statemachine
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.KilledFlowException
import net.corda.core.flows.StartableByRPC
import net.corda.core.messaging.startFlow
import net.corda.core.messaging.startTrackedFlow
import net.corda.core.utilities.ProgressTracker
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.CHARLIE_NAME
import net.corda.testing.core.singleIdentity
import org.junit.Test
import java.time.Duration
import java.time.temporal.ChronoUnit
import java.util.concurrent.TimeoutException
import kotlin.test.assertEquals
import kotlin.test.assertFailsWith
import kotlin.test.assertTrue
@Suppress("MaxLineLength") // Byteman rules cannot be easily wrapped
class StateMachineKillFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
/**
* Triggers `killFlow` while the flow is suspended causing a [InterruptedException] to be thrown and passed through the hospital.
*
* The flow terminates and is not retried.
*
* No pass through the hospital is recorded. As the flow is marked as `isRemoved`.
*/
@Test(timeout = 300_000)
fun `error during transition due to killing a flow will terminate the flow`() {
startDriver {
val alice = createNode(ALICE_NAME)
val flow = alice.rpc.startTrackedFlow(StateMachineKillFlowErrorHandlingTest::SleepFlow)
var flowKilled = false
flow.progress.subscribe {
if (it == SleepFlow.STARTED.label) {
Thread.sleep(5000)
flowKilled = alice.rpc.killFlow(flow.id)
}
}
assertFailsWith<KilledFlowException> { flow.returnValue.getOrThrow(20.seconds) }
assertTrue(flowKilled)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCountsAllZero()
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Triggers `killFlow` during user application code.
*
* The user application code is mimicked by a [Thread.sleep] which is importantly not placed inside the [Suspendable]
* call function. Placing it inside a [Suspendable] function causes quasar to behave unexpectedly.
*
* Although the call to kill the flow is made during user application code. It will not be removed / stop processing
* until the next suspension point is reached within the flow.
*
* The flow terminates and is not retried.
*
* No pass through the hospital is recorded. As the flow is marked as `isRemoved`.
*/
@Test(timeout = 300_000)
fun `flow killed during user code execution stops and removes the flow correctly`() {
startDriver {
val alice = createNode(ALICE_NAME)
val flow = alice.rpc.startTrackedFlow(StateMachineKillFlowErrorHandlingTest::ThreadSleepFlow)
var flowKilled = false
flow.progress.subscribe {
if (it == ThreadSleepFlow.STARTED.label) {
Thread.sleep(5000)
flowKilled = alice.rpc.killFlow(flow.id)
}
}
assertFailsWith<KilledFlowException> { flow.returnValue.getOrThrow(30.seconds) }
assertTrue(flowKilled)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCountsAllZero()
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
/**
* Triggers `killFlow` after the flow has already been sent to observation. The flow is not running at this point and
* all that remains is its checkpoint in the database.
*
* The flow terminates and is not retried.
*
* Killing the flow does not lead to any passes through the hospital. All the recorded passes through the hospital are
* from the original flow that was put in for observation.
*/
@Test(timeout = 300_000)
fun `flow killed when it is in the flow hospital for observation is removed correctly`() {
startDriver {
val (alice, port) = createBytemanNode(ALICE_NAME)
val charlie = createNode(CHARLIE_NAME)
val rules = """
RULE Create Counter
CLASS $actionExecutorClassName
METHOD executeSendMultiple
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeSendMultiple action
CLASS $actionExecutorClassName
METHOD executeSendMultiple
AT ENTRY
IF readCounter("counter") < 4
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
""".trimIndent()
submitBytemanRules(rules, port)
val flow = alice.rpc.startFlow(StateMachineErrorHandlingTest::SendAMessageFlow, charlie.nodeInfo.singleIdentity())
assertFailsWith<TimeoutException> { flow.returnValue.getOrThrow(20.seconds) }
alice.rpc.killFlow(flow.id)
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(
discharged = 3,
observation = 1
)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
@StartableByRPC
class SleepFlow : FlowLogic<Unit>() {
object STARTED : ProgressTracker.Step("I am ready to die")
override val progressTracker = ProgressTracker(STARTED)
@Suspendable
override fun call() {
sleep(Duration.of(1, ChronoUnit.SECONDS))
progressTracker.currentStep = STARTED
sleep(Duration.of(2, ChronoUnit.MINUTES))
}
}
@StartableByRPC
class ThreadSleepFlow : FlowLogic<Unit>() {
object STARTED : ProgressTracker.Step("I am ready to die")
override val progressTracker = ProgressTracker(STARTED)
@Suspendable
override fun call() {
sleep(Duration.of(1, ChronoUnit.SECONDS))
progressTracker.currentStep = STARTED
logger.info("Starting ${ThreadSleepFlow::class.qualifiedName} application sleep")
sleep()
logger.info("Finished ${ThreadSleepFlow::class.qualifiedName} application sleep")
sleep(Duration.of(2, ChronoUnit.MINUTES))
}
// Sleep is moved outside of `@Suspendable` function to prevent issues with Quasar
private fun sleep() {
Thread.sleep(20000)
}
}
}

View File

@ -1,7 +1,6 @@
package net.corda.node.services.statemachine
import co.paralleluniverse.fibers.Suspendable
import net.corda.client.rpc.CordaRPCClient
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowSession
import net.corda.core.flows.InitiatedBy
@ -20,13 +19,14 @@ import org.junit.Test
import kotlin.test.assertEquals
@Suppress("MaxLineLength") // Byteman rules cannot be easily wrapped
class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
class StateMachineSubFlowErrorHandlingTest : StateMachineErrorHandlingTest() {
/**
* This test checks that flow calling an initiating subflow will recover correctly.
*
* Throws an exception when performing an [Action.CommitTransaction] event during the subflow's first send to a counterparty.
* The exception is thrown 5 times.
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
@ -41,11 +41,11 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
fun `initiating subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val alice = createBytemanNode(ALICE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
@ -69,66 +69,34 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
ENDRULE
RULE Throw exception on executeCommitTransaction action after first suspend + commit
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("subflow_flag") && flagged("suspend_flag") && flagged("commit_flag") && readCounter("counter") < 5
IF flagged("subflow_flag") && flagged("suspend_flag") && flagged("commit_flag") && readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Set flag when executing first commit
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("subflow_flag") && flagged("suspend_flag") && !flagged("commit_flag")
DO flag("commit_flag"); traceln("Setting commit flag to true")
ENDRULE
RULE Entering internal error staff member
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT ENTRY
IF true
DO traceln("Reached internal transition error staff member")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
submitBytemanRules(rules, port)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
aliceClient.startFlow(StatemachineSubflowErrorHandlingTest::SendAMessageInAnInitiatingSubflowFlow, charlie.nodeInfo.singleIdentity()).returnValue.getOrThrow(
alice.rpc.startFlow(
StateMachineSubFlowErrorHandlingTest::SendAMessageInAnInitiatingSubflowFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
val output = getBytemanOutput(alice)
// Check the stdout for the lines generated by byteman
assertEquals(3, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(0, output.filter { it.contains("Byteman test - overnight observation") }.size)
val (discharge, observation) = aliceClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(3, discharge)
assertEquals(0, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
@ -136,7 +104,8 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
* This test checks that flow calling an initiating subflow will recover correctly.
*
* Throws an exception when performing an [Action.CommitTransaction] event during the subflow's first receive from a counterparty.
* The exception is thrown 5 times.
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
@ -151,11 +120,11 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
fun `initiating subflow - error during transition with CommitTransaction action that occurs after the first receive will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val alice = createBytemanNode(ALICE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
@ -179,58 +148,26 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
ENDRULE
RULE Throw exception on executeCommitTransaction action after first suspend + commit
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("subflow_flag") && flagged("suspend_flag") && readCounter("counter") < 5
IF flagged("subflow_flag") && flagged("suspend_flag") && readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Entering internal error staff member
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT ENTRY
IF true
DO traceln("Reached internal transition error staff member")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
submitBytemanRules(rules, port)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
aliceClient.startFlow(StatemachineSubflowErrorHandlingTest::SendAMessageInAnInitiatingSubflowFlow, charlie.nodeInfo.singleIdentity()).returnValue.getOrThrow(
alice.rpc.startFlow(
StateMachineSubFlowErrorHandlingTest::SendAMessageInAnInitiatingSubflowFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
val output = getBytemanOutput(alice)
// Check the stdout for the lines generated by byteman
assertEquals(3, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(0, output.filter { it.contains("Byteman test - overnight observation") }.size)
val (discharge, observation) = aliceClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(3, discharge)
assertEquals(0, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
@ -238,7 +175,8 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
* This test checks that flow calling an inline subflow will recover correctly.
*
* Throws an exception when performing an [Action.CommitTransaction] event during the subflow's first send to a counterparty.
* The exception is thrown 5 times.
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
@ -253,11 +191,11 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first send will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val alice = createBytemanNode(ALICE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
@ -273,58 +211,26 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
ENDRULE
RULE Throw exception on executeCommitTransaction action after first suspend + commit
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("subflow_flag") && readCounter("counter") < 5
IF flagged("subflow_flag") && readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Entering internal error staff member
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT ENTRY
IF true
DO traceln("Reached internal transition error staff member")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
submitBytemanRules(rules, port)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
aliceClient.startFlow(StatemachineSubflowErrorHandlingTest::SendAMessageInAnInlineSubflowFlow, charlie.nodeInfo.singleIdentity()).returnValue.getOrThrow(
alice.rpc.startFlow(
StateMachineSubFlowErrorHandlingTest::SendAMessageInAnInlineSubflowFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
val output = getBytemanOutput(alice)
// Check the stdout for the lines generated by byteman
assertEquals(3, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(0, output.filter { it.contains("Byteman test - overnight observation") }.size)
val (discharge, observation) = aliceClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(3, discharge)
assertEquals(0, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}
@ -332,7 +238,8 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
* This test checks that flow calling an inline subflow will recover correctly.
*
* Throws an exception when performing an [Action.CommitTransaction] event during the subflow's first receive from a counterparty.
* The exception is thrown 5 times.
*
* The exception is thrown 3 times.
*
* This causes the transition to be discharged from the hospital 3 times (retries 3 times). On the final retry the transition
* succeeds and the flow finishes.
@ -347,11 +254,11 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
fun `inline subflow - error during transition with CommitTransaction action that occurs during the first receive will retry and complete successfully`() {
startDriver {
val charlie = createNode(CHARLIE_NAME)
val alice = createBytemanNode(ALICE_NAME)
val (alice, port) = createBytemanNode(ALICE_NAME)
val rules = """
RULE Create Counter
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF createCounter("counter", $counter)
@ -367,66 +274,34 @@ class StatemachineSubflowErrorHandlingTest : StatemachineErrorHandlingTest() {
ENDRULE
RULE Throw exception on executeCommitTransaction action after first suspend + commit
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("subflow_flag") && flagged("commit_flag") && readCounter("counter") < 5
IF flagged("subflow_flag") && flagged("commit_flag") && readCounter("counter") < 3
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Set flag when executing first commit
CLASS ${ActionExecutorImpl::class.java.name}
CLASS $actionExecutorClassName
METHOD executeCommitTransaction
AT ENTRY
IF flagged("subflow_flag") && !flagged("commit_flag")
DO flag("commit_flag"); traceln("Setting commit flag to true")
ENDRULE
RULE Entering internal error staff member
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT ENTRY
IF true
DO traceln("Reached internal transition error staff member")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
submitBytemanRules(rules, port)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
aliceClient.startFlow(StatemachineSubflowErrorHandlingTest::SendAMessageInAnInlineSubflowFlow, charlie.nodeInfo.singleIdentity()).returnValue.getOrThrow(
alice.rpc.startFlow(
StateMachineSubFlowErrorHandlingTest::SendAMessageInAnInlineSubflowFlow,
charlie.nodeInfo.singleIdentity()
).returnValue.getOrThrow(
30.seconds
)
val output = getBytemanOutput(alice)
// Check the stdout for the lines generated by byteman
assertEquals(3, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(0, output.filter { it.contains("Byteman test - overnight observation") }.size)
val (discharge, observation) = aliceClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(3, discharge)
assertEquals(0, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
alice.rpc.assertNumberOfCheckpointsAllZero()
alice.rpc.assertHospitalCounts(discharged = 3)
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
}
}

View File

@ -1,166 +0,0 @@
package net.corda.node.services.statemachine
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowSession
import net.corda.core.flows.InitiatedBy
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.identity.CordaX500Name
import net.corda.core.identity.Party
import net.corda.core.internal.list
import net.corda.core.internal.readAllLines
import net.corda.core.node.AppServiceHub
import net.corda.core.node.services.CordaService
import net.corda.core.serialization.CordaSerializable
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.unwrap
import net.corda.node.services.Permissions
import net.corda.testing.core.DUMMY_NOTARY_NAME
import net.corda.testing.driver.DriverDSL
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeHandle
import net.corda.testing.driver.NodeParameters
import net.corda.testing.driver.driver
import net.corda.testing.node.NotarySpec
import net.corda.testing.node.TestCordapp
import net.corda.testing.node.User
import net.corda.testing.node.internal.InternalDriverDSL
import org.jboss.byteman.agent.submit.ScriptText
import org.jboss.byteman.agent.submit.Submit
import org.junit.Before
abstract class StatemachineErrorHandlingTest {
val rpcUser = User("user1", "test", permissions = setOf(Permissions.all()))
var counter = 0
@Before
fun setup() {
counter = 0
}
internal fun startDriver(notarySpec: NotarySpec = NotarySpec(DUMMY_NOTARY_NAME), dsl: DriverDSL.() -> Unit) {
driver(
DriverParameters(
notarySpecs = listOf(notarySpec),
startNodesInProcess = false,
inMemoryDB = false,
systemProperties = mapOf("co.paralleluniverse.fibers.verifyInstrumentation" to "true")
)
) {
dsl()
}
}
internal fun DriverDSL.createBytemanNode(
providedName: CordaX500Name,
additionalCordapps: Collection<TestCordapp> = emptyList()
): NodeHandle {
return (this as InternalDriverDSL).startNode(
NodeParameters(
providedName = providedName,
rpcUsers = listOf(rpcUser),
additionalCordapps = additionalCordapps
),
bytemanPort = 12000
).getOrThrow()
}
internal fun DriverDSL.createNode(providedName: CordaX500Name, additionalCordapps: Collection<TestCordapp> = emptyList()): NodeHandle {
return startNode(
NodeParameters(
providedName = providedName,
rpcUsers = listOf(rpcUser),
additionalCordapps = additionalCordapps
)
).getOrThrow()
}
internal fun submitBytemanRules(rules: String) {
val submit = Submit("localhost", 12000)
submit.addScripts(listOf(ScriptText("Test script", rules)))
}
internal fun getBytemanOutput(nodeHandle: NodeHandle): List<String> {
return nodeHandle.baseDirectory
.list()
.filter { it.toString().contains("net.corda.node.Corda") && it.toString().contains("stdout.log") }
.flatMap { it.readAllLines() }
}
@StartableByRPC
@InitiatingFlow
class SendAMessageFlow(private val party: Party) : FlowLogic<String>() {
@Suspendable
override fun call(): String {
val session = initiateFlow(party)
session.send("hello there")
return "Finished executing test flow - ${this.runId}"
}
}
@InitiatedBy(SendAMessageFlow::class)
class SendAMessageResponder(private val session: FlowSession) : FlowLogic<Unit>() {
@Suspendable
override fun call() {
session.receive<String>().unwrap { it }
}
}
@StartableByRPC
class GetNumberOfUncompletedCheckpointsFlow : FlowLogic<Long>() {
override fun call(): Long {
val sqlStatement = "select count(*) from node_checkpoints where status not in (${Checkpoint.FlowStatus.COMPLETED.ordinal})"
return serviceHub.jdbcSession().prepareStatement(sqlStatement).use { ps ->
ps.executeQuery().use { rs ->
rs.next()
rs.getLong(1)
}
}
}
}
@StartableByRPC
class GetNumberOfHospitalizedCheckpointsFlow : FlowLogic<Long>() {
override fun call(): Long {
val sqlStatement = "select count(*) from node_checkpoints where status in (${Checkpoint.FlowStatus.HOSPITALIZED.ordinal})"
return serviceHub.jdbcSession().prepareStatement(sqlStatement).use { ps ->
ps.executeQuery().use { rs ->
rs.next()
rs.getLong(1)
}
}
}
}
// Internal use for testing only!!
@StartableByRPC
class GetHospitalCountersFlow : FlowLogic<HospitalCounts>() {
override fun call(): HospitalCounts =
HospitalCounts(
serviceHub.cordaService(HospitalCounter::class.java).dischargeCounter,
serviceHub.cordaService(HospitalCounter::class.java).observationCounter
)
}
@CordaSerializable
data class HospitalCounts(val discharge: Int, val observation: Int)
@Suppress("UNUSED_PARAMETER")
@CordaService
class HospitalCounter(services: AppServiceHub) : SingletonSerializeAsToken() {
var observationCounter: Int = 0
var dischargeCounter: Int = 0
init {
StaffedFlowHospital.onFlowDischarged.add { _, _ ->
++dischargeCounter
}
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
++observationCounter
}
}
}
}

View File

@ -1,321 +0,0 @@
package net.corda.node.services.statemachine
import co.paralleluniverse.fibers.Suspendable
import net.corda.client.rpc.CordaRPCClient
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.KilledFlowException
import net.corda.core.flows.StartableByRPC
import net.corda.core.messaging.startFlow
import net.corda.core.messaging.startTrackedFlow
import net.corda.core.utilities.ProgressTracker
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.seconds
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.CHARLIE_NAME
import net.corda.testing.core.singleIdentity
import org.junit.Test
import java.time.Duration
import java.time.temporal.ChronoUnit
import java.util.concurrent.TimeoutException
import kotlin.test.assertEquals
import kotlin.test.assertFailsWith
import kotlin.test.assertTrue
@Suppress("MaxLineLength") // Byteman rules cannot be easily wrapped
class StatemachineKillFlowErrorHandlingTest : StatemachineErrorHandlingTest() {
/**
* Triggers `killFlow` while the flow is suspended causing a [InterruptedException] to be thrown and passed through the hospital.
*
* The flow terminates and is not retried.
*
* No pass through the hospital is recorded. As the flow is marked as `isRemoved`.
*/
@Test(timeout=300_000)
fun `error during transition due to killing a flow will terminate the flow`() {
startDriver {
val alice = createBytemanNode(ALICE_NAME)
val rules = """
RULE Entering internal error staff member
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT ENTRY
IF true
DO traceln("Reached internal transition error staff member")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
RULE Increment terminal counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ TERMINAL
IF true
DO traceln("Byteman test - terminal")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val flow = aliceClient.startTrackedFlow(StatemachineKillFlowErrorHandlingTest::SleepFlow)
var flowKilled = false
flow.progress.subscribe {
if (it == SleepFlow.STARTED.label) {
Thread.sleep(5000)
flowKilled = aliceClient.killFlow(flow.id)
}
}
assertFailsWith<KilledFlowException> { flow.returnValue.getOrThrow(20.seconds) }
val output = getBytemanOutput(alice)
assertTrue(flowKilled)
// Check the stdout for the lines generated by byteman
assertEquals(0, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(0, output.filter { it.contains("Byteman test - overnight observation") }.size)
val (discharge, observation) = aliceClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(0, discharge)
assertEquals(0, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
}
}
/**
* Triggers `killFlow` during user application code.
*
* The user application code is mimicked by a [Thread.sleep] which is importantly not placed inside the [Suspendable]
* call function. Placing it inside a [Suspendable] function causes quasar to behave unexpectedly.
*
* Although the call to kill the flow is made during user application code. It will not be removed / stop processing
* until the next suspension point is reached within the flow.
*
* The flow terminates and is not retried.
*
* No pass through the hospital is recorded. As the flow is marked as `isRemoved`.
*/
@Test(timeout=300_000)
fun `flow killed during user code execution stops and removes the flow correctly`() {
startDriver {
val alice = createBytemanNode(ALICE_NAME)
val rules = """
RULE Entering internal error staff member
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT ENTRY
IF true
DO traceln("Reached internal transition error staff member")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
RULE Increment terminal counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ TERMINAL
IF true
DO traceln("Byteman test - terminal")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val flow = aliceClient.startTrackedFlow(StatemachineKillFlowErrorHandlingTest::ThreadSleepFlow)
var flowKilled = false
flow.progress.subscribe {
if (it == ThreadSleepFlow.STARTED.label) {
Thread.sleep(5000)
flowKilled = aliceClient.killFlow(flow.id)
}
}
assertFailsWith<KilledFlowException> { flow.returnValue.getOrThrow(30.seconds) }
val output = getBytemanOutput(alice)
assertTrue(flowKilled)
// Check the stdout for the lines generated by byteman
assertEquals(0, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(0, output.filter { it.contains("Byteman test - overnight observation") }.size)
val numberOfTerminalDiagnoses = output.filter { it.contains("Byteman test - terminal") }.size
println(numberOfTerminalDiagnoses)
assertEquals(0, numberOfTerminalDiagnoses)
val (discharge, observation) = aliceClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(0, discharge)
assertEquals(0, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
}
}
/**
* Triggers `killFlow` after the flow has already been sent to observation. The flow is not running at this point and
* all that remains is its checkpoint in the database.
*
* The flow terminates and is not retried.
*
* Killing the flow does not lead to any passes through the hospital. All the recorded passes through the hospital are
* from the original flow that was put in for observation.
*/
@Test(timeout=300_000)
fun `flow killed when it is in the flow hospital for observation is removed correctly`() {
startDriver {
val alice = createBytemanNode(ALICE_NAME)
val charlie = createNode(CHARLIE_NAME)
val rules = """
RULE Create Counter
CLASS ${ActionExecutorImpl::class.java.name}
METHOD executeSendMultiple
AT ENTRY
IF createCounter("counter", $counter)
DO traceln("Counter created")
ENDRULE
RULE Throw exception on executeSendMultiple action
CLASS ${ActionExecutorImpl::class.java.name}
METHOD executeSendMultiple
AT ENTRY
IF readCounter("counter") < 4
DO incrementCounter("counter"); traceln("Throwing exception"); throw new java.lang.RuntimeException("die dammit die")
ENDRULE
RULE Entering internal error staff member
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT ENTRY
IF true
DO traceln("Reached internal transition error staff member")
ENDRULE
RULE Increment discharge counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ DISCHARGE
IF true
DO traceln("Byteman test - discharging")
ENDRULE
RULE Increment observation counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ OVERNIGHT_OBSERVATION
IF true
DO traceln("Byteman test - overnight observation")
ENDRULE
RULE Increment terminal counter
CLASS ${StaffedFlowHospital.TransitionErrorGeneralPractitioner::class.java.name}
METHOD consult
AT READ TERMINAL
IF true
DO traceln("Byteman test - terminal")
ENDRULE
""".trimIndent()
submitBytemanRules(rules)
val aliceClient =
CordaRPCClient(alice.rpcAddress).start(rpcUser.username, rpcUser.password).proxy
val flow = aliceClient.startFlow(StatemachineErrorHandlingTest::SendAMessageFlow, charlie.nodeInfo.singleIdentity())
assertFailsWith<TimeoutException> { flow.returnValue.getOrThrow(20.seconds) }
aliceClient.killFlow(flow.id)
val output = getBytemanOutput(alice)
// Check the stdout for the lines generated by byteman
assertEquals(3, output.filter { it.contains("Byteman test - discharging") }.size)
assertEquals(1, output.filter { it.contains("Byteman test - overnight observation") }.size)
val numberOfTerminalDiagnoses = output.filter { it.contains("Byteman test - terminal") }.size
assertEquals(0, numberOfTerminalDiagnoses)
val (discharge, observation) = aliceClient.startFlow(StatemachineErrorHandlingTest::GetHospitalCountersFlow).returnValue.get()
assertEquals(3, discharge)
assertEquals(1, observation)
assertEquals(0, aliceClient.stateMachinesSnapshot().size)
// 1 for GetNumberOfCheckpointsFlow
assertEquals(1, aliceClient.startFlow(StatemachineErrorHandlingTest::GetNumberOfUncompletedCheckpointsFlow).returnValue.get())
}
}
@StartableByRPC
class SleepFlow : FlowLogic<Unit>() {
object STARTED : ProgressTracker.Step("I am ready to die")
override val progressTracker = ProgressTracker(STARTED)
@Suspendable
override fun call() {
sleep(Duration.of(1, ChronoUnit.SECONDS))
progressTracker.currentStep = STARTED
sleep(Duration.of(2, ChronoUnit.MINUTES))
}
}
@StartableByRPC
class ThreadSleepFlow : FlowLogic<Unit>() {
object STARTED : ProgressTracker.Step("I am ready to die")
override val progressTracker = ProgressTracker(STARTED)
@Suspendable
override fun call() {
sleep(Duration.of(1, ChronoUnit.SECONDS))
progressTracker.currentStep = STARTED
logger.info("Starting ${ThreadSleepFlow::class.qualifiedName} application sleep")
sleep()
logger.info("Finished ${ThreadSleepFlow::class.qualifiedName} application sleep")
sleep(Duration.of(2, ChronoUnit.MINUTES))
}
// Sleep is moved outside of `@Suspendable` function to prevent issues with Quasar
private fun sleep() {
Thread.sleep(20000)
}
}
}

View File

@ -0,0 +1,14 @@
package net.corda.contracts.serialization.generics
import net.corda.core.serialization.CordaSerializable
@CordaSerializable
data class DataObject(val value: Long) : Comparable<DataObject> {
override fun toString(): String {
return "$value data points"
}
override fun compareTo(other: DataObject): Int {
return value.compareTo(other.value)
}
}

View File

@ -0,0 +1,47 @@
package net.corda.contracts.serialization.generics
import net.corda.core.contracts.CommandData
import net.corda.core.contracts.Contract
import net.corda.core.contracts.ContractState
import net.corda.core.contracts.requireThat
import net.corda.core.identity.AbstractParty
import net.corda.core.transactions.LedgerTransaction
import java.util.Optional
@Suppress("unused")
class GenericTypeContract : Contract {
override fun verify(tx: LedgerTransaction) {
val states = tx.outputsOfType<State>()
requireThat {
"Requires at least one data state" using states.isNotEmpty()
}
val purchases = tx.commandsOfType<Purchase>()
requireThat {
"Requires at least one purchase" using purchases.isNotEmpty()
}
for (purchase in purchases) {
requireThat {
"Purchase has a price" using purchase.value.price.isPresent
}
}
}
@Suppress("CanBeParameter", "MemberVisibilityCanBePrivate")
class State(val owner: AbstractParty, val data: DataObject?) : ContractState {
override val participants: List<AbstractParty> = listOf(owner)
@Override
override fun toString(): String {
return data.toString()
}
}
/**
* The [price] field is the important feature of the [Purchase]
* class because its type is [Optional] with a CorDapp-specific
* generic type parameter. It does not matter that the [price]
* is not used; it only matters that the [Purchase] command
* must be serialized as part of building a new transaction.
*/
class Purchase(val price: Optional<DataObject>) : CommandData
}

View File

@ -0,0 +1,27 @@
package net.corda.flows.serialization.generics
import co.paralleluniverse.fibers.Suspendable
import net.corda.contracts.serialization.generics.DataObject
import net.corda.contracts.serialization.generics.GenericTypeContract.Purchase
import net.corda.contracts.serialization.generics.GenericTypeContract.State
import net.corda.core.contracts.Command
import net.corda.core.crypto.SecureHash
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.StartableByRPC
import net.corda.core.transactions.TransactionBuilder
import java.util.Optional
@StartableByRPC
class GenericTypeFlow(private val purchase: DataObject?) : FlowLogic<SecureHash>() {
@Suspendable
override fun call(): SecureHash {
val notary = serviceHub.networkMapCache.notaryIdentities[0]
val stx = serviceHub.signInitialTransaction(
TransactionBuilder(notary)
.addOutputState(State(ourIdentity, purchase))
.addCommand(Command(Purchase(Optional.ofNullable(purchase)), ourIdentity.owningKey))
)
stx.verify(serviceHub, checkSufficientSignatures = false)
return stx.id
}
}

View File

@ -15,6 +15,7 @@ import net.corda.node.services.Permissions
import net.corda.node.services.config.PasswordEncryption
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.node.internal.NodeBasedTest
import net.corda.testing.node.internal.cordappForClasses
import org.apache.activemq.artemis.api.core.ActiveMQSecurityException
import org.apache.shiro.authc.credential.DefaultPasswordService
import org.junit.After
@ -32,7 +33,7 @@ import kotlin.test.assertFailsWith
* check authentication/authorization of RPC connections.
*/
@RunWith(Parameterized::class)
class AuthDBTests : NodeBasedTest() {
class AuthDBTests : NodeBasedTest(cordappPackages = CORDAPPS) {
private lateinit var node: NodeWithInfo
private lateinit var client: CordaRPCClient
private lateinit var db: UsersDB
@ -43,6 +44,9 @@ class AuthDBTests : NodeBasedTest() {
@JvmStatic
@Parameterized.Parameters(name = "password encryption format = {0}")
fun encFormats() = arrayOf(PasswordEncryption.NONE, PasswordEncryption.SHIRO_1_CRYPT)
@Suppress("SpreadOperator")
private val CORDAPPS = setOf(cordappForClasses(*AuthDBTests::class.nestedClasses.map { it.java }.toTypedArray()))
}
@Suppress("MemberVisibilityCanBePrivate")

View File

@ -0,0 +1,78 @@
package net.corda.node
import net.corda.client.rpc.CordaRPCClient
import net.corda.contracts.serialization.generics.DataObject
import net.corda.core.contracts.TransactionVerificationException.ContractRejection
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.loggerFor
import net.corda.flows.serialization.generics.GenericTypeFlow
import net.corda.node.services.Permissions
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.DUMMY_NOTARY_NAME
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.driver.internal.incrementalPortAllocation
import net.corda.testing.node.NotarySpec
import net.corda.testing.node.User
import net.corda.testing.node.internal.cordappWithPackages
import org.assertj.core.api.Assertions.assertThat
import org.junit.Test
import org.junit.jupiter.api.assertThrows
@Suppress("FunctionName")
class ContractWithGenericTypeTest {
companion object {
const val DATA_VALUE = 5000L
@JvmField
val logger = loggerFor<ContractWithGenericTypeTest>()
@JvmField
val user = User("u", "p", setOf(Permissions.all()))
fun parameters(): DriverParameters {
return DriverParameters(
portAllocation = incrementalPortAllocation(),
startNodesInProcess = false,
notarySpecs = listOf(NotarySpec(DUMMY_NOTARY_NAME, validating = true)),
cordappsForAllNodes = listOf(
cordappWithPackages("net.corda.flows.serialization.generics").signed(),
cordappWithPackages("net.corda.contracts.serialization.generics").signed()
)
)
}
}
@Test(timeout = 300_000)
fun `flow with value of generic type`() {
driver(parameters()) {
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val txID = CordaRPCClient(hostAndPort = alice.rpcAddress)
.start(user.username, user.password)
.use { client ->
client.proxy.startFlow(::GenericTypeFlow, DataObject(DATA_VALUE))
.returnValue
.getOrThrow()
}
logger.info("TX-ID=$txID")
}
}
@Test(timeout = 300_000)
fun `flow without value of generic type`() {
driver(parameters()) {
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val ex = assertThrows<ContractRejection> {
CordaRPCClient(hostAndPort = alice.rpcAddress)
.start(user.username, user.password)
.use { client ->
client.proxy.startFlow(::GenericTypeFlow, null)
.returnValue
.getOrThrow()
}
}
assertThat(ex).hasMessageContaining("Contract verification failed: Failed requirement: Purchase has a price,")
}
}
}

View File

@ -1,114 +0,0 @@
package net.corda.node
import net.corda.core.utilities.getOrThrow
import net.corda.node.logging.logFile
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.NodeParameters
import net.corda.testing.driver.driver
import net.corda.testing.driver.internal.incrementalPortAllocation
import org.assertj.core.api.Assertions.assertThatThrownBy
import org.junit.Test
import org.junit.Assert.assertTrue
class NodeConfigParsingTests {
@Test(timeout=300_000)
fun `config is overriden by underscore variable`() {
val portAllocator = incrementalPortAllocation()
val sshPort = portAllocator.nextPort()
driver(DriverParameters(
environmentVariables = mapOf("corda_sshd_port" to sshPort.toString()),
startNodesInProcess = false,
portAllocation = portAllocator,
cordappsForAllNodes = emptyList())) {
val hasSsh = startNode().get()
.logFile()
.readLines()
.filter { it.contains("SSH server listening on port") }
.any { it.contains(sshPort.toString()) }
assertTrue(hasSsh)
}
}
@Test(timeout=300_000)
fun `config is overriden by case insensitive underscore variable`() {
val portAllocator = incrementalPortAllocation()
val sshPort = portAllocator.nextPort()
driver(DriverParameters(
environmentVariables = mapOf("CORDA_sshd_port" to sshPort.toString()),
startNodesInProcess = false,
portAllocation = portAllocator,
cordappsForAllNodes = emptyList())) {
val hasSsh = startNode().get()
.logFile()
.readLines()
.filter { it.contains("SSH server listening on port") }
.any { it.contains(sshPort.toString()) }
assertTrue(hasSsh)
}
}
@Test(timeout=300_000)
fun `config is overriden by case insensitive dot variable`() {
val portAllocator = incrementalPortAllocation()
val sshPort = portAllocator.nextPort()
driver(DriverParameters(
environmentVariables = mapOf("CORDA.sshd.port" to sshPort.toString(),
"corda.devMode" to true.toString()),
startNodesInProcess = false,
portAllocation = portAllocator,
cordappsForAllNodes = emptyList())) {
val hasSsh = startNode(NodeParameters()).get()
.logFile()
.readLines()
.filter { it.contains("SSH server listening on port") }
.any { it.contains(sshPort.toString()) }
assertTrue(hasSsh)
}
}
@Test(timeout=300_000)
fun `shadowing is forbidden`() {
val portAllocator = incrementalPortAllocation()
val sshPort = portAllocator.nextPort()
driver(DriverParameters(
environmentVariables = mapOf(
"CORDA_sshd_port" to sshPort.toString(),
"corda.sshd.port" to sshPort.toString()),
startNodesInProcess = false,
portAllocation = portAllocator,
notarySpecs = emptyList())) {
assertThatThrownBy {
startNode().getOrThrow()
}
}
}
@Test(timeout=300_000)
fun `bad keys are ignored and warned for`() {
val portAllocator = incrementalPortAllocation()
driver(DriverParameters(
environmentVariables = mapOf(
"corda_bad_key" to "2077"),
startNodesInProcess = false,
portAllocation = portAllocator,
notarySpecs = emptyList(),
cordappsForAllNodes = emptyList())) {
val hasWarning = startNode()
.getOrThrow()
.logFile()
.readLines()
.any {
it.contains("(property or environment variable) cannot be mapped to an existing Corda")
}
assertTrue(hasWarning)
}
}
}

View File

@ -11,7 +11,7 @@ import kotlin.test.assertEquals
import kotlin.test.assertTrue
class NodeRPCTests {
private val CORDA_VERSION_REGEX = "\\d+(\\.\\d+)?(-\\w+)?".toRegex()
private val CORDA_VERSION_REGEX = "\\d+(\\.\\d+)?(\\.\\d+)?(-\\w+)?".toRegex()
private val CORDA_VENDOR = "Corda Open Source"
private val CORDAPPS = listOf(FINANCE_CONTRACTS_CORDAPP, FINANCE_WORKFLOWS_CORDAPP)
private val CORDAPP_TYPES = setOf("Contract CorDapp", "Workflow CorDapp")

View File

@ -0,0 +1,85 @@
package net.corda.node.services
import net.corda.client.rpc.CordaRPCClient
import net.corda.contracts.serialization.generics.DataObject
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.loggerFor
import net.corda.flows.serialization.generics.GenericTypeFlow
import net.corda.node.DeterministicSourcesRule
import net.corda.node.internal.djvm.DeterministicVerificationException
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.DUMMY_NOTARY_NAME
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.driver.internal.incrementalPortAllocation
import net.corda.testing.node.NotarySpec
import net.corda.testing.node.User
import net.corda.testing.node.internal.cordappWithPackages
import org.assertj.core.api.Assertions.assertThat
import org.junit.ClassRule
import org.junit.Test
import org.junit.jupiter.api.assertThrows
@Suppress("FunctionName")
class DeterministicContractWithGenericTypeTest {
companion object {
const val DATA_VALUE = 5000L
@JvmField
val logger = loggerFor<DeterministicContractWithGenericTypeTest>()
@JvmField
val user = User("u", "p", setOf(Permissions.all()))
@ClassRule
@JvmField
val djvmSources = DeterministicSourcesRule()
fun parameters(): DriverParameters {
return DriverParameters(
portAllocation = incrementalPortAllocation(),
startNodesInProcess = false,
notarySpecs = listOf(NotarySpec(DUMMY_NOTARY_NAME, validating = true)),
cordappsForAllNodes = listOf(
cordappWithPackages("net.corda.flows.serialization.generics").signed(),
cordappWithPackages("net.corda.contracts.serialization.generics").signed()
),
djvmBootstrapSource = djvmSources.bootstrap,
djvmCordaSource = djvmSources.corda
)
}
}
@Test(timeout = 300_000)
fun `test DJVM can deserialise command with generic type`() {
driver(parameters()) {
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val txID = CordaRPCClient(hostAndPort = alice.rpcAddress)
.start(user.username, user.password)
.use { client ->
client.proxy.startFlow(::GenericTypeFlow, DataObject(DATA_VALUE))
.returnValue
.getOrThrow()
}
logger.info("TX-ID=$txID")
}
}
@Test(timeout = 300_000)
fun `test DJVM can deserialise command without value of generic type`() {
driver(parameters()) {
val alice = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
val ex = assertThrows<DeterministicVerificationException> {
CordaRPCClient(hostAndPort = alice.rpcAddress)
.start(user.username, user.password)
.use { client ->
client.proxy.startFlow(::GenericTypeFlow, null)
.returnValue
.getOrThrow()
}
}
assertThat(ex).hasMessageContaining("Contract verification failed: Failed requirement: Purchase has a price,")
}
}
}

View File

@ -0,0 +1,32 @@
package net.corda.node.services.config
import net.corda.core.utilities.getOrThrow
import net.corda.node.logging.logFile
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import net.corda.testing.driver.internal.incrementalPortAllocation
import org.junit.Assert.assertTrue
import org.junit.Test
class NodeConfigParsingTests {
@Test(timeout = 300_000)
fun `bad keys are ignored and warned for`() {
val portAllocator = incrementalPortAllocation()
driver(DriverParameters(
environmentVariables = mapOf(
"corda_bad_key" to "2077"),
startNodesInProcess = false,
portAllocation = portAllocator,
notarySpecs = emptyList())) {
val hasWarning = startNode()
.getOrThrow()
.logFile()
.readLines()
.any {
it.contains("(property or environment variable) cannot be mapped to an existing Corda")
}
assertTrue(hasWarning)
}
}
}

View File

@ -0,0 +1,72 @@
package net.corda.node.services.messaging
import co.paralleluniverse.fibers.Suspendable
import net.corda.core.flows.Destination
import net.corda.core.flows.FlowLogic
import net.corda.core.flows.FlowSession
import net.corda.core.flows.InitiatedBy
import net.corda.core.flows.InitiatingFlow
import net.corda.core.flows.StartableByRPC
import net.corda.core.internal.concurrent.transpose
import net.corda.core.messaging.startFlow
import net.corda.core.utilities.getOrThrow
import net.corda.core.utilities.unwrap
import net.corda.testing.core.ALICE_NAME
import net.corda.testing.core.BOB_NAME
import net.corda.testing.core.singleIdentity
import net.corda.testing.driver.DriverParameters
import net.corda.testing.driver.driver
import org.junit.Test
import kotlin.test.assertEquals
class MessagingSendAllTest {
@Test(timeout=300_000)
fun `flow can exchange messages with multiple sessions to the same party in parallel`() {
driver(DriverParameters(startNodesInProcess = true)) {
val (alice, bob) = listOf(
startNode(providedName = ALICE_NAME),
startNode(providedName = BOB_NAME)
).transpose().getOrThrow()
val bobIdentity = bob.nodeInfo.singleIdentity()
val messages = listOf(
bobIdentity to "hey bob 1",
bobIdentity to "hey bob 2"
)
alice.rpc.startFlow(::SenderFlow, messages).returnValue.getOrThrow()
}
}
@StartableByRPC
@InitiatingFlow
class SenderFlow(private val parties: List<Pair<Destination, String>>): FlowLogic<String>() {
@Suspendable
override fun call(): String {
val messagesPerSession = parties.toList().map { (party, messageType) ->
val session = initiateFlow(party)
Pair(session, messageType)
}.toMap()
sendAllMap(messagesPerSession)
val messages = receiveAll(String::class.java, messagesPerSession.keys.toList())
messages.map { it.unwrap { payload -> assertEquals("pong", payload) } }
return "ok"
}
}
@InitiatedBy(SenderFlow::class)
class RecipientFlow(private val otherPartySession: FlowSession): FlowLogic<String>() {
@Suspendable
override fun call(): String {
otherPartySession.receive<String>().unwrap { it }
otherPartySession.send("pong")
return "ok"
}
}
}

View File

@ -1,6 +1,7 @@
package net.corda.services.messaging
import net.corda.core.crypto.Crypto
import net.corda.core.crypto.toStringShort
import net.corda.core.identity.CordaX500Name
import net.corda.core.internal.createDirectories
import net.corda.core.internal.exists
@ -14,6 +15,9 @@ import net.corda.nodeapi.internal.crypto.CertificateType
import net.corda.nodeapi.internal.crypto.X509Utilities
import net.corda.nodeapi.internal.loadDevCaTrustStore
import net.corda.coretesting.internal.stubs.CertificateStoreStubs
import net.corda.nodeapi.internal.ArtemisMessagingComponent
import net.corda.services.messaging.SimpleAMQPClient.Companion.sendAndVerify
import net.corda.testing.core.singleIdentity
import org.apache.activemq.artemis.api.config.ActiveMQDefaultConfiguration
import org.apache.activemq.artemis.api.core.ActiveMQClusterSecurityException
import org.apache.activemq.artemis.api.core.ActiveMQNotConnectedException
@ -24,6 +28,8 @@ import org.bouncycastle.asn1.x509.GeneralSubtree
import org.bouncycastle.asn1.x509.NameConstraints
import org.junit.Test
import java.nio.file.Files
import javax.jms.JMSSecurityException
import kotlin.test.assertEquals
/**
* Runs the security tests with the attacker pretending to be a node on the network.
@ -39,7 +45,7 @@ class MQSecurityAsNodeTest : P2PMQSecurityTest() {
@Test(timeout=300_000)
fun `send message to RPC requests address`() {
assertSendAttackFails(RPCApi.RPC_SERVER_QUEUE_NAME)
assertProducerQueueCreationAttackFails(RPCApi.RPC_SERVER_QUEUE_NAME)
}
@Test(timeout=300_000)
@ -117,4 +123,53 @@ class MQSecurityAsNodeTest : P2PMQSecurityTest() {
attacker.start(PEER_USER, PEER_USER)
}
}
override fun `send message to notifications address`() {
assertProducerQueueCreationAttackFails(ArtemisMessagingComponent.NOTIFICATIONS_ADDRESS)
}
@Test(timeout=300_000)
fun `send message on core protocol`() {
val attacker = clientTo(alice.node.configuration.p2pAddress)
attacker.start(PEER_USER, PEER_USER)
val message = attacker.createMessage()
assertEquals(true, attacker.producer.isBlockOnNonDurableSend)
assertThatExceptionOfType(ActiveMQSecurityException::class.java).isThrownBy {
attacker.producer.send("${ArtemisMessagingComponent.P2P_PREFIX}${alice.info.singleIdentity().owningKey.toStringShort()}", message)
}.withMessageContaining("CoreMessage").withMessageContaining("AMQPMessage")
}
@Test(timeout = 300_000)
fun `send AMQP message with correct validated user in header`() {
val attacker = amqpClientTo(alice.node.configuration.p2pAddress)
val session = attacker.start(PEER_USER, PEER_USER)
val message = session.createMessage()
message.setStringProperty("_AMQ_VALIDATED_USER", "O=MegaCorp, L=London, C=GB")
val queue = session.createQueue("${ArtemisMessagingComponent.P2P_PREFIX}${alice.info.singleIdentity().owningKey.toStringShort()}")
val producer = session.createProducer(queue)
producer.sendAndVerify(message)
}
@Test(timeout = 300_000)
fun `send AMQP message with incorrect validated user in header`() {
val attacker = amqpClientTo(alice.node.configuration.p2pAddress)
val session = attacker.start(PEER_USER, PEER_USER)
val message = session.createMessage()
message.setStringProperty("_AMQ_VALIDATED_USER", "O=Bob, L=New York, C=US")
val queue = session.createQueue("${ArtemisMessagingComponent.P2P_PREFIX}${alice.info.singleIdentity().owningKey.toStringShort()}")
val producer = session.createProducer(queue)
assertThatExceptionOfType(JMSSecurityException::class.java).isThrownBy {
producer.sendAndVerify(message)
}.withMessageContaining("_AMQ_VALIDATED_USER mismatch")
}
@Test(timeout = 300_000)
fun `send AMQP message without header`() {
val attacker = amqpClientTo(alice.node.configuration.p2pAddress)
val session = attacker.start(PEER_USER, PEER_USER)
val message = session.createMessage()
val queue = session.createQueue("${ArtemisMessagingComponent.P2P_PREFIX}${alice.info.singleIdentity().owningKey.toStringShort()}")
val producer = session.createProducer(queue)
producer.sendAndVerify(message)
}
}

View File

@ -45,7 +45,7 @@ abstract class MQSecurityTest : NodeBasedTest() {
private val rpcUser = User("user1", "pass", permissions = emptySet())
lateinit var alice: NodeWithInfo
lateinit var attacker: SimpleMQClient
private val clients = ArrayList<SimpleMQClient>()
private val runOnStop = ArrayList<() -> Any?>()
@Before
override fun setUp() {
@ -62,8 +62,8 @@ abstract class MQSecurityTest : NodeBasedTest() {
abstract fun startAttacker(attacker: SimpleMQClient)
@After
fun stopClients() {
clients.forEach { it.stop() }
fun tearDown() {
runOnStop.forEach { it() }
}
@Test(timeout=300_000)
@ -79,7 +79,7 @@ abstract class MQSecurityTest : NodeBasedTest() {
}
@Test(timeout=300_000)
fun `send message to notifications address`() {
open fun `send message to notifications address`() {
assertSendAttackFails(NOTIFICATIONS_ADDRESS)
}
@ -97,18 +97,21 @@ abstract class MQSecurityTest : NodeBasedTest() {
fun clientTo(target: NetworkHostAndPort, sslConfiguration: MutualSslConfiguration? = configureTestSSL(CordaX500Name("MegaCorp", "London", "GB"))): SimpleMQClient {
val client = SimpleMQClient(target, sslConfiguration)
clients += client
runOnStop += client::stop
return client
}
fun amqpClientTo(target: NetworkHostAndPort,
sslConfiguration: MutualSslConfiguration = configureTestSSL(CordaX500Name("MegaCorp", "London", "GB"))
): SimpleAMQPClient {
val client = SimpleAMQPClient(target, sslConfiguration)
runOnStop += client::stop
return client
}
private val rpcConnections = mutableListOf<CordaRPCConnection>()
private fun loginToRPC(target: NetworkHostAndPort, rpcUser: User): CordaRPCOps {
return CordaRPCClient(target).start(rpcUser.username, rpcUser.password).also { rpcConnections.add(it) }.proxy
}
@After
fun closeRPCConnections() {
rpcConnections.forEach { it.forceClose() }
return CordaRPCClient(target).start(rpcUser.username, rpcUser.password).also { runOnStop += it::forceClose }.proxy
}
fun loginToRPCAndGetClientQueue(): String {
@ -152,7 +155,7 @@ abstract class MQSecurityTest : NodeBasedTest() {
}
}
fun assertSendAttackFails(address: String) {
open fun assertSendAttackFails(address: String) {
val message = attacker.createMessage()
assertEquals(true, attacker.producer.isBlockOnNonDurableSend)
assertAttackFails(address, "SEND") {

View File

@ -3,17 +3,43 @@ package net.corda.services.messaging
import net.corda.core.crypto.generateKeyPair
import net.corda.core.crypto.toStringShort
import net.corda.nodeapi.RPCApi
import net.corda.nodeapi.internal.ArtemisMessagingComponent
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.P2P_PREFIX
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.PEERS_PREFIX
import net.corda.services.messaging.SimpleAMQPClient.Companion.sendAndVerify
import net.corda.testing.core.BOB_NAME
import net.corda.testing.core.singleIdentity
import org.assertj.core.api.Assertions.assertThatExceptionOfType
import org.junit.Test
import javax.jms.JMSException
/**
* Runs a series of MQ-related attacks against a node. Subclasses need to call [startAttacker] to connect
* the attacker to [alice].
*/
abstract class P2PMQSecurityTest : MQSecurityTest() {
override fun assertSendAttackFails(address: String) {
val attacker = amqpClientTo(alice.node.configuration.p2pAddress)
val session = attacker.start(ArtemisMessagingComponent.PEER_USER, ArtemisMessagingComponent.PEER_USER)
val message = session.createMessage()
message.setStringProperty("_AMQ_VALIDATED_USER", "O=MegaCorp, L=London, C=GB")
val queue = session.createQueue(address)
assertThatExceptionOfType(JMSException::class.java).isThrownBy {
session.createProducer(queue).sendAndVerify(message)
}.withMessageContaining(address).withMessageContaining("SEND")
}
fun assertProducerQueueCreationAttackFails(address: String) {
val attacker = amqpClientTo(alice.node.configuration.p2pAddress)
val session = attacker.start(ArtemisMessagingComponent.PEER_USER, ArtemisMessagingComponent.PEER_USER)
val message = session.createMessage()
message.setStringProperty("_AMQ_VALIDATED_USER", "O=MegaCorp, L=London, C=GB")
val queue = session.createQueue(address)
assertThatExceptionOfType(JMSException::class.java).isThrownBy {
session.createProducer(queue)
}.withMessageContaining(address).withMessageContaining("CREATE_DURABLE_QUEUE")
}
@Test(timeout=300_000)
fun `consume message from P2P queue`() {
assertConsumeAttackFails("$P2P_PREFIX${alice.info.singleIdentity().owningKey.toStringShort()}")

View File

@ -0,0 +1,141 @@
package net.corda.services.messaging
import net.corda.core.internal.concurrent.openFuture
import net.corda.core.utilities.NetworkHostAndPort
import net.corda.nodeapi.internal.config.MutualSslConfiguration
import org.apache.qpid.jms.JmsConnectionFactory
import org.apache.qpid.jms.meta.JmsConnectionInfo
import org.apache.qpid.jms.provider.Provider
import org.apache.qpid.jms.provider.ProviderFuture
import org.apache.qpid.jms.provider.amqp.AmqpProvider
import org.apache.qpid.jms.provider.amqp.AmqpSaslAuthenticator
import org.apache.qpid.jms.sasl.PlainMechanism
import org.apache.qpid.jms.transports.TransportOptions
import org.apache.qpid.jms.transports.netty.NettyTcpTransport
import org.apache.qpid.proton.engine.Sasl
import org.apache.qpid.proton.engine.SaslListener
import org.apache.qpid.proton.engine.Transport
import java.net.URI
import java.security.SecureRandom
import java.util.concurrent.ExecutionException
import java.util.concurrent.TimeUnit
import javax.jms.CompletionListener
import javax.jms.Connection
import javax.jms.Message
import javax.jms.MessageProducer
import javax.jms.Session
import javax.net.ssl.KeyManagerFactory
import javax.net.ssl.SSLContext
import javax.net.ssl.TrustManagerFactory
/**
* Simple AMQP client connecting to broker using JMS.
*/
class SimpleAMQPClient(private val target: NetworkHostAndPort, private val config: MutualSslConfiguration) {
companion object {
/**
* Send message and wait for completion.
* @throws Exception on failure
*/
fun MessageProducer.sendAndVerify(message: Message) {
val request = openFuture<Unit>()
send(message, object : CompletionListener {
override fun onException(message: Message, exception: Exception) {
request.setException(exception)
}
override fun onCompletion(message: Message) {
request.set(Unit)
}
})
try {
request.get(10, TimeUnit.SECONDS)
} catch (e: ExecutionException) {
throw e.cause!!
}
}
}
private lateinit var connection: Connection
private fun sslContext(): SSLContext {
val keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()).apply {
init(config.keyStore.get().value.internal, config.keyStore.entryPassword.toCharArray())
}
val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()).apply {
init(config.trustStore.get().value.internal)
}
val sslContext = SSLContext.getInstance("TLS")
val keyManagers = keyManagerFactory.keyManagers
val trustManagers = trustManagerFactory.trustManagers
sslContext.init(keyManagers, trustManagers, SecureRandom())
return sslContext
}
fun start(username: String, password: String): Session {
val connectionFactory = TestJmsConnectionFactory("amqps://${target.host}:${target.port}", username, password)
connectionFactory.setSslContext(sslContext())
connection = connectionFactory.createConnection()
connection.start()
return connection.createSession(false, Session.AUTO_ACKNOWLEDGE)
}
fun stop() {
try {
connection.close()
} catch (e: Exception) {
// connection might not have initialised.
}
}
private class TestJmsConnectionFactory(uri: String, private val user: String, private val pwd: String) : JmsConnectionFactory(uri) {
override fun createProvider(remoteURI: URI): Provider {
val transportOptions = TransportOptions().apply {
// Disable SNI check for server certificate
isVerifyHost = false
}
val transport = NettyTcpTransport(remoteURI, transportOptions, true)
// Manually override SASL negotiations to accept failure in SASL-OUTCOME, which is produced by node Artemis server
return object : AmqpProvider(remoteURI, transport) {
override fun connect(connectionInfo: JmsConnectionInfo?) {
super.connect(connectionInfo)
val sasl = protonTransport.sasl()
sasl.client()
sasl.setRemoteHostname(remoteURI.host)
val authenticator = AmqpSaslAuthenticator {
PlainMechanism().apply {
username = user
password = pwd
}
}
val saslRequest = ProviderFuture()
sasl.setListener(object : SaslListener {
override fun onSaslMechanisms(sasl: Sasl, transport: Transport) {
authenticator.handleSaslMechanisms(sasl, transport)
}
override fun onSaslChallenge(sasl: Sasl, transport: Transport) {
authenticator.handleSaslChallenge(sasl, transport)
}
override fun onSaslOutcome(sasl: Sasl, transport: Transport) {
authenticator.handleSaslOutcome(sasl, transport)
saslRequest.onSuccess()
}
override fun onSaslInit(sasl: Sasl, transport: Transport) {
}
override fun onSaslResponse(sasl: Sasl, transport: Transport) {
}
})
pumpToProtonTransport()
saslRequest.sync()
}
}.apply {
isSaslLayer = false
}
}
}
}

View File

@ -34,7 +34,7 @@ open class SharedNodeCmdLineOptions {
description = ["The path to the config file. By default this is node.conf in the base directory."]
)
private var _configFile: Path? = null
val configFile: Path get() = _configFile ?: (baseDirectory / "node.conf")
val configFile: Path get() = if (_configFile != null) baseDirectory.resolve(_configFile) else (baseDirectory / "node.conf")
@Option(
names = ["--on-unknown-config-keys"],

View File

@ -59,6 +59,8 @@ import net.corda.core.schemas.MappedSchema
import net.corda.core.serialization.SerializationWhitelist
import net.corda.core.serialization.SerializeAsToken
import net.corda.core.serialization.SingletonSerializeAsToken
import net.corda.core.serialization.internal.AttachmentsClassLoaderCache
import net.corda.core.serialization.internal.AttachmentsClassLoaderCacheImpl
import net.corda.core.toFuture
import net.corda.core.transactions.LedgerTransaction
import net.corda.core.utilities.NetworkHostAndPort
@ -358,6 +360,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
} else {
BasicVerifierFactoryService()
}
private val attachmentsClassLoaderCache: AttachmentsClassLoaderCache = AttachmentsClassLoaderCacheImpl(cacheFactory).tokenize()
val contractUpgradeService = ContractUpgradeServiceImpl(cacheFactory).tokenize()
val auditService = DummyAuditService().tokenize()
@Suppress("LeakingThis")
@ -699,11 +702,22 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
val myNotaryIdentity = configuration.notary?.let {
if (it.serviceLegalName != null) {
val (notaryIdentity, notaryIdentityKeyPair) = loadNotaryClusterIdentity(it.serviceLegalName)
val (notaryIdentity, notaryIdentityKeyPair) = loadNotaryServiceIdentity(it.serviceLegalName)
keyPairs += notaryIdentityKeyPair
notaryIdentity
} else {
// In case of a single notary service myNotaryIdentity will be the node's single identity.
// The only case where the myNotaryIdentity will be the node's legal identity is for existing single notary services running
// an older version. Current single notary services (V4.6+) sign requests using a separate notary service identity so the
// notary identity will be different from the node's legal identity.
// This check is here to ensure that a user does not accidentally/intentionally remove the serviceLegalName configuration
// parameter after a notary has been registered. If that was possible then notary would start and sign incoming requests
// with the node's legal identity key, corrupting the data.
check (!cryptoService.containsKey(DISTRIBUTED_NOTARY_KEY_ALIAS)) {
"The notary service key exists in the key store but no notary service legal name has been configured. " +
"Either include the relevant 'notary.serviceLegalName' configuration or validate this key is not necessary " +
"and remove from the key store."
}
identity
}
}
@ -1147,8 +1161,12 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
}
}
/** Loads pre-generated notary service cluster identity. */
private fun loadNotaryClusterIdentity(serviceLegalName: CordaX500Name): Pair<PartyAndCertificate, KeyPair> {
/**
* Loads notary service identity. In the case of the experimental RAFT and BFT notary clusters, this loads the pre-generated
* cluster identity that all worker nodes share. In the case of a simple single notary, this loads the notary service identity
* that is generated during initial registration and is used to sign notarisation requests.
* */
private fun loadNotaryServiceIdentity(serviceLegalName: CordaX500Name): Pair<PartyAndCertificate, KeyPair> {
val privateKeyAlias = "$DISTRIBUTED_NOTARY_KEY_ALIAS"
val compositeKeyAlias = "$DISTRIBUTED_NOTARY_COMPOSITE_KEY_ALIAS"
@ -1264,6 +1282,8 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
private lateinit var _myInfo: NodeInfo
override val myInfo: NodeInfo get() = _myInfo
override val attachmentsClassLoaderCache: AttachmentsClassLoaderCache get() = this@AbstractNode.attachmentsClassLoaderCache
private lateinit var _networkParameters: NetworkParameters
override val networkParameters: NetworkParameters get() = _networkParameters
@ -1463,11 +1483,12 @@ fun CordaPersistence.startHikariPool(
NodeDatabaseErrors.MISSING_DRIVER)
ex is OutstandingDatabaseChangesException -> throw (DatabaseIncompatibleException(ex.message))
else -> {
LoggerFactory.getLogger("CordaPersistence extension").error("Could not create the DataSource", ex)
val msg = ex.message ?: ex::class.java.canonicalName
throw CouldNotCreateDataSourceException(
"Could not create the DataSource: ${ex.message}",
NodeDatabaseErrors.FAILED_STARTUP,
cause = ex)
cause = ex,
parameters = listOf(msg))
}
}
}

View File

@ -1,5 +1,6 @@
package net.corda.node.internal
import net.corda.client.rpc.RPCException
import net.corda.client.rpc.notUsed
import net.corda.common.logging.CordaVersion
import net.corda.core.CordaRuntimeException
@ -263,7 +264,8 @@ internal class CordaRPCOpsImpl(
}
override fun openAttachment(id: SecureHash): InputStream {
return services.attachments.openAttachment(id)!!.open()
return services.attachments.openAttachment(id)?.open() ?:
throw RPCException("Unable to open attachment with id: $id")
}
override fun uploadAttachment(jar: InputStream): SecureHash {

View File

@ -86,6 +86,7 @@ class NetworkParametersReader(private val trustRoot: X509Certificate,
logger.info("No network-parameters file found. Expecting network parameters to be available from the network map.")
networkMapClient ?: throw Error.NetworkMapNotConfigured()
val signedParams = networkMapClient.getNetworkParameters(parametersHash)
signedParams.verifiedNetworkParametersCert(trustRoot)
signedParams.serialize().open().copyTo(baseDirectory / NETWORK_PARAMS_FILE_NAME)
return signedParams
}

View File

@ -0,0 +1,47 @@
package net.corda.node.internal.artemis
import net.corda.core.utilities.contextLogger
import net.corda.nodeapi.internal.ArtemisMessagingComponent.Companion.PEER_USER
import org.apache.activemq.artemis.api.core.ActiveMQSecurityException
import org.apache.activemq.artemis.api.core.Message
import org.apache.activemq.artemis.core.server.ServerSession
import org.apache.activemq.artemis.core.server.plugin.ActiveMQServerPlugin
import org.apache.activemq.artemis.core.transaction.Transaction
import org.apache.activemq.artemis.protocol.amqp.broker.AMQPMessage
/**
* Plugin to verify the user in the AMQP message header against the user in the authenticated session.
*
* In core protocol, Artemis Server automatically overwrites the _AMQ_VALIDATED_USER field in message header according to authentication
* of the session. However, this is not done for AMQP protocol, which is used by Corda. Hence, _AMQ_VALIDATED_USER in AMQP packet is
* delivered in the same form, as it was produced by counterpart. To prevent manipulations of this field by other peers, we should check
* message header against user in authenticated session.
*
* Note that AMQP message is immutable, so changing the header means rebuilding the whole message, which is expensive. Instead, the
* preferred option is to throw an exception.
*/
class UserValidationPlugin : ActiveMQServerPlugin {
companion object {
private val log = contextLogger()
}
override fun beforeSend(session: ServerSession, tx: Transaction?, message: Message, direct: Boolean, noAutoCreateQueue: Boolean) {
try {
if (session.username == PEER_USER) {
if (message !is AMQPMessage) {
throw ActiveMQSecurityException("Invalid message type: expected [${AMQPMessage::class.java.name}], got [${message.javaClass.name}]")
}
val user = message.getStringProperty(Message.HDR_VALIDATED_USER)
if (user != null && user != session.validatedUser) {
throw ActiveMQSecurityException("_AMQ_VALIDATED_USER mismatch: expected [${session.validatedUser}], got [${user}]")
}
}
} catch (e: ActiveMQSecurityException) {
throw e
} catch (e: Throwable) {
// Artemis swallows any exception except ActiveMQException
log.error("Message validation failed", e)
throw ActiveMQSecurityException("Message validation failed")
}
}
}

Some files were not shown because too many files have changed in this diff Show More