mirror of
https://github.com/corda/corda.git
synced 2025-02-21 01:42:24 +00:00
Merge pull request #6611 from corda/chrisr3-os46-os47-merge
NOTICK: Merge OS 4.6 -> OS 4.7 up to a9799fd.
This commit is contained in:
commit
011e8d824b
@ -25,15 +25,28 @@ boolean isReleaseTag = (env.TAG_NAME =~ /^release.*JDK11$/)
|
||||
** * stage-release: for release candidates and for health checks
|
||||
** * operate: for final release
|
||||
*/
|
||||
def nexusIqStage = "build"
|
||||
def nexusDefaultIqStage = "build"
|
||||
if (isReleaseTag) {
|
||||
switch (env.TAG_NAME) {
|
||||
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
||||
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
||||
default: nexusIqStage = "release"
|
||||
case ~/.*-RC\d+(-.*)?/: nexusDefaultIqStage = "stage-release"; break;
|
||||
case ~/.*-HC\d+(-.*)?/: nexusDefaultIqStage = "stage-release"; break;
|
||||
default: nexusDefaultIqStage = "release"
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* make sure calculated default value of NexusIQ stage is first in the list
|
||||
* thus making it default for the `choice` parameter
|
||||
*/
|
||||
def nexusIqStageChoices = [nexusDefaultIqStage].plus(
|
||||
[
|
||||
'develop',
|
||||
'build',
|
||||
'stage-release',
|
||||
'release',
|
||||
'operate'
|
||||
].minus([nexusDefaultIqStage]))
|
||||
|
||||
pipeline {
|
||||
agent { label 'k8s' }
|
||||
options {
|
||||
@ -42,6 +55,10 @@ pipeline {
|
||||
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||
}
|
||||
|
||||
parameters {
|
||||
choice choices: nexusIqStageChoices, description: 'NexusIQ stage for code evaluation', name: 'nexusIqStage'
|
||||
}
|
||||
|
||||
environment {
|
||||
DOCKER_TAG_TO_USE = "${env.GIT_COMMIT.subSequence(0, 8)}JDK11"
|
||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||
@ -68,13 +85,13 @@ pipeline {
|
||||
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
|
||||
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
|
||||
def artifactId = 'corda'
|
||||
nexusAppId = "jenkins-${groupId}-${artifactId}-jdk11-${version}"
|
||||
nexusAppId = "${groupId}-${artifactId}-${version}"
|
||||
}
|
||||
nexusPolicyEvaluation (
|
||||
failBuildOnNetworkError: false,
|
||||
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
|
||||
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
|
||||
iqStage: nexusIqStage
|
||||
iqStage: params.nexusIqStage
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,20 @@ killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||
** calculate the stage for NexusIQ evaluation
|
||||
** * build for snapshots
|
||||
*/
|
||||
def nexusIqStage = "build"
|
||||
def nexusDefaultIqStage = "build"
|
||||
|
||||
/**
|
||||
* make sure calculated default value of NexusIQ stage is first in the list
|
||||
* thus making it default for the `choice` parameter
|
||||
*/
|
||||
def nexusIqStageChoices = [nexusDefaultIqStage].plus(
|
||||
[
|
||||
'develop',
|
||||
'build',
|
||||
'stage-release',
|
||||
'release',
|
||||
'operate'
|
||||
].minus([nexusDefaultIqStage]))
|
||||
|
||||
pipeline {
|
||||
agent { label 'standard' }
|
||||
@ -31,6 +44,10 @@ pipeline {
|
||||
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||
}
|
||||
|
||||
parameters {
|
||||
choice choices: nexusIqStageChoices, description: 'NexusIQ stage for code evaluation', name: 'nexusIqStage'
|
||||
}
|
||||
|
||||
triggers {
|
||||
cron '@midnight'
|
||||
}
|
||||
@ -53,13 +70,13 @@ pipeline {
|
||||
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
|
||||
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
|
||||
def artifactId = 'corda'
|
||||
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
|
||||
nexusAppId = "${groupId}-${artifactId}-${version}"
|
||||
}
|
||||
nexusPolicyEvaluation (
|
||||
failBuildOnNetworkError: false,
|
||||
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
|
||||
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
|
||||
iqStage: nexusIqStage
|
||||
iqStage: params.nexusIqStage
|
||||
)
|
||||
}
|
||||
}
|
||||
|
29
.ci/dev/regression/Jenkinsfile
vendored
29
.ci/dev/regression/Jenkinsfile
vendored
@ -25,15 +25,28 @@ boolean isInternalRelease = (env.TAG_NAME =~ /^internal-release-.*$/)
|
||||
** * stage-release: for release candidates and for health checks
|
||||
** * operate: for final release
|
||||
*/
|
||||
def nexusIqStage = "build"
|
||||
def nexusDefaultIqStage = "build"
|
||||
if (isReleaseTag) {
|
||||
switch (env.TAG_NAME) {
|
||||
case ~/.*-RC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
||||
case ~/.*-HC\d+(-.*)?/: nexusIqStage = "stage-release"; break;
|
||||
default: nexusIqStage = "release"
|
||||
case ~/.*-RC\d+(-.*)?/: nexusDefaultIqStage = "stage-release"; break;
|
||||
case ~/.*-HC\d+(-.*)?/: nexusDefaultIqStage = "stage-release"; break;
|
||||
default: nexusDefaultIqStage = "release"
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* make sure calculated default value of NexusIQ stage is first in the list
|
||||
* thus making it default for the `choice` parameter
|
||||
*/
|
||||
def nexusIqStageChoices = [nexusDefaultIqStage].plus(
|
||||
[
|
||||
'develop',
|
||||
'build',
|
||||
'stage-release',
|
||||
'release',
|
||||
'operate'
|
||||
].minus([nexusDefaultIqStage]))
|
||||
|
||||
pipeline {
|
||||
agent { label 'k8s' }
|
||||
options {
|
||||
@ -43,6 +56,10 @@ pipeline {
|
||||
buildDiscarder(logRotator(daysToKeepStr: '14', artifactDaysToKeepStr: '14'))
|
||||
}
|
||||
|
||||
parameters {
|
||||
choice choices: nexusIqStageChoices, description: 'NexusIQ stage for code evaluation', name: 'nexusIqStage'
|
||||
}
|
||||
|
||||
environment {
|
||||
DOCKER_TAG_TO_USE = "${env.GIT_COMMIT.subSequence(0, 8)}"
|
||||
DOCKER_URL = "https://index.docker.io/v1/"
|
||||
@ -70,13 +87,13 @@ pipeline {
|
||||
def version = sh (returnStdout: true, script: "grep ^version: version-properties | sed -e 's/^version: \\([0-9]\\+\\.[0-9]\\+\\).*\$/\\1/'").trim()
|
||||
def groupId = sh (returnStdout: true, script: "grep ^group: version-properties | sed -e 's/^group: //'").trim()
|
||||
def artifactId = 'corda'
|
||||
nexusAppId = "jenkins-${groupId}-${artifactId}-${version}"
|
||||
nexusAppId = "${groupId}-${artifactId}-${version}"
|
||||
}
|
||||
nexusPolicyEvaluation (
|
||||
failBuildOnNetworkError: false,
|
||||
iqApplication: selectedApplication(nexusAppId), // application *has* to exist before a build starts!
|
||||
iqScanPatterns: [[scanPattern: 'node/capsule/build/libs/corda*.jar']],
|
||||
iqStage: nexusIqStage
|
||||
iqStage: params.nexusIqStage
|
||||
)
|
||||
}
|
||||
}
|
||||
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -103,4 +103,7 @@ virtualenv/
|
||||
# Files you may find useful to have in your working directory.
|
||||
PLAN
|
||||
NOTES
|
||||
TODO
|
||||
TODO
|
||||
|
||||
# gradle-dependx plugin
|
||||
.dependx/
|
||||
|
183
build.gradle
183
build.gradle
@ -1,5 +1,4 @@
|
||||
import com.r3.testing.DistributeTestsBy
|
||||
import com.r3.testing.ParallelTestGroup
|
||||
import com.r3.testing.PodLogLevel
|
||||
|
||||
import static org.gradle.api.JavaVersion.VERSION_11
|
||||
@ -172,16 +171,27 @@ buildscript {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
maven {
|
||||
url "${artifactory_contextUrl}/corda-dependencies-dev"
|
||||
content {
|
||||
includeGroupByRegex 'net\\.corda(\\..*)?'
|
||||
includeGroupByRegex 'com\\.r3(\\..*)?'
|
||||
}
|
||||
}
|
||||
maven {
|
||||
url "${artifactory_contextUrl}/corda-releases"
|
||||
content {
|
||||
includeGroupByRegex 'net\\.corda(\\..*)?'
|
||||
includeGroupByRegex 'com\\.r3(\\..*)?'
|
||||
}
|
||||
}
|
||||
mavenCentral()
|
||||
jcenter()
|
||||
maven {
|
||||
url 'https://kotlin.bintray.com/kotlinx'
|
||||
}
|
||||
maven {
|
||||
url "${artifactory_contextUrl}/corda-dependencies-dev"
|
||||
}
|
||||
maven {
|
||||
url "${artifactory_contextUrl}/corda-releases"
|
||||
content {
|
||||
includeGroup 'org.jetbrains.kotlin'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -204,11 +214,13 @@ buildscript {
|
||||
// Capsule gradle plugin forked and maintained locally to support Gradle 5.x
|
||||
// See https://github.com/corda/gradle-capsule-plugin
|
||||
classpath "us.kirchmeier:gradle-capsule-plugin:1.0.4_r3"
|
||||
classpath group: "com.r3.testing", name: "gradle-distributed-testing-plugin", version: "1.2-LOCAL-K8S-SHARED-CACHE-SNAPSHOT", changing: true
|
||||
classpath group: "com.r3.dependx", name: "gradle-dependx", version: "0.1.13", changing: true
|
||||
classpath "com.bmuschko:gradle-docker-plugin:5.0.0"
|
||||
classpath group: "com.r3.testing", name: "gradle-distributed-testing-plugin", version: '1.3.0'
|
||||
classpath "org.sonarsource.scanner.gradle:sonarqube-gradle-plugin:2.8"
|
||||
}
|
||||
|
||||
configurations.all {
|
||||
resolutionStrategy.cacheChangingModulesFor 0, 'seconds'
|
||||
}
|
||||
}
|
||||
|
||||
plugins {
|
||||
@ -222,8 +234,7 @@ apply plugin: 'project-report'
|
||||
apply plugin: 'com.github.ben-manes.versions'
|
||||
apply plugin: 'net.corda.plugins.publish-utils'
|
||||
apply plugin: 'com.jfrog.artifactory'
|
||||
apply plugin: "com.bmuschko.docker-remote-api"
|
||||
apply plugin: "com.r3.dependx.dependxies"
|
||||
apply plugin: 'com.r3.testing.distributed-testing'
|
||||
|
||||
|
||||
// If the command line project option -PversionFromGit is added to the gradle invocation, we'll resolve
|
||||
@ -390,11 +401,32 @@ allprojects {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
maven {
|
||||
url "${artifactory_contextUrl}/corda-dependencies"
|
||||
content {
|
||||
includeGroupByRegex 'net\\.corda(\\..*)?'
|
||||
includeGroupByRegex 'com\\.r3(\\..*)?'
|
||||
includeGroup 'co.paralleluniverse'
|
||||
includeGroup 'org.crashub'
|
||||
includeGroup 'com.github.bft-smart'
|
||||
}
|
||||
}
|
||||
maven {
|
||||
url "${artifactory_contextUrl}/corda-dev"
|
||||
content {
|
||||
includeGroupByRegex 'net\\.corda(\\..*)?'
|
||||
includeGroupByRegex 'com\\.r3(\\..*)?'
|
||||
}
|
||||
}
|
||||
maven {
|
||||
url 'https://repo.gradle.org/gradle/libs-releases'
|
||||
content {
|
||||
includeGroup 'org.gradle'
|
||||
includeGroup 'com.github.detro'
|
||||
}
|
||||
}
|
||||
mavenCentral()
|
||||
jcenter()
|
||||
maven { url "${artifactory_contextUrl}/corda-dependencies" }
|
||||
maven { url 'https://repo.gradle.org/gradle/libs-releases' }
|
||||
maven { url "${artifactory_contextUrl}/corda-dev" }
|
||||
}
|
||||
}
|
||||
|
||||
@ -655,11 +687,6 @@ artifactory {
|
||||
}
|
||||
}
|
||||
|
||||
dependxiesModule {
|
||||
mode = "monitor"
|
||||
skipTasks = "test,integrationTest,smokeTest,slowIntegrationTest"
|
||||
}
|
||||
|
||||
tasks.register('generateApi', net.corda.plugins.apiscanner.GenerateApi) {
|
||||
baseName = "api-corda"
|
||||
}
|
||||
@ -705,83 +732,45 @@ buildScan {
|
||||
termsOfServiceAgree = 'yes'
|
||||
}
|
||||
|
||||
ext.generalPurpose = [
|
||||
numberOfShards: 15,
|
||||
streamOutput: false,
|
||||
coresPerFork: 2,
|
||||
memoryInGbPerFork: 12,
|
||||
nodeTaints: "small"
|
||||
]
|
||||
distributedTesting {
|
||||
profilesURL = 'https://raw.githubusercontent.com/corda/infrastructure-profiles/master'
|
||||
|
||||
ext.largeScaleSet = [
|
||||
numberOfShards: 15,
|
||||
streamOutput: false,
|
||||
coresPerFork: 6,
|
||||
memoryInGbPerFork: 10,
|
||||
nodeTaints: "big"
|
||||
]
|
||||
parallelTestGroups {
|
||||
allParallelIntegrationTest {
|
||||
testGroups 'integrationTest'
|
||||
profile 'generalPurpose.yml'
|
||||
podLogLevel PodLogLevel.INFO
|
||||
distribution DistributeTestsBy.METHOD
|
||||
}
|
||||
allParallelUnitTest {
|
||||
podLogLevel PodLogLevel.INFO
|
||||
testGroups 'test'
|
||||
profile 'generalPurpose.yml'
|
||||
distribution DistributeTestsBy.CLASS
|
||||
}
|
||||
allParallelUnitAndIntegrationTest {
|
||||
testGroups 'test', 'integrationTest'
|
||||
profile 'generalPurpose.yml'
|
||||
distribution DistributeTestsBy.METHOD
|
||||
}
|
||||
parallelRegressionTest {
|
||||
testGroups 'test', 'integrationTest', 'smokeTest'
|
||||
profile 'generalPurpose.yml'
|
||||
distribution DistributeTestsBy.METHOD
|
||||
}
|
||||
allParallelSmokeTest {
|
||||
testGroups 'smokeTest'
|
||||
profile 'generalPurpose.yml'
|
||||
distribution DistributeTestsBy.METHOD
|
||||
}
|
||||
allParallelSlowIntegrationTest {
|
||||
testGroups 'slowIntegrationTest'
|
||||
profile 'generalPurpose.yml'
|
||||
distribution DistributeTestsBy.METHOD
|
||||
}
|
||||
}
|
||||
|
||||
task allParallelIntegrationTest(type: ParallelTestGroup) {
|
||||
dependsOn dependxiesModule
|
||||
podLogLevel PodLogLevel.INFO
|
||||
testGroups "integrationTest"
|
||||
numberOfShards generalPurpose.numberOfShards
|
||||
streamOutput generalPurpose.streamOutput
|
||||
coresPerFork generalPurpose.coresPerFork
|
||||
memoryInGbPerFork generalPurpose.memoryInGbPerFork
|
||||
nodeTaints generalPurpose.nodeTaints
|
||||
distribute DistributeTestsBy.METHOD
|
||||
ignoredTests = [
|
||||
':core-deterministic:testing:data:test'
|
||||
]
|
||||
}
|
||||
task allParallelUnitTest(type: ParallelTestGroup) {
|
||||
dependsOn dependxiesModule
|
||||
podLogLevel PodLogLevel.INFO
|
||||
testGroups "test"
|
||||
numberOfShards generalPurpose.numberOfShards
|
||||
streamOutput generalPurpose.streamOutput
|
||||
coresPerFork generalPurpose.coresPerFork
|
||||
memoryInGbPerFork generalPurpose.memoryInGbPerFork
|
||||
nodeTaints generalPurpose.nodeTaints
|
||||
distribute DistributeTestsBy.CLASS
|
||||
}
|
||||
task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) {
|
||||
dependsOn dependxiesModule
|
||||
testGroups "test", "integrationTest"
|
||||
numberOfShards generalPurpose.numberOfShards
|
||||
streamOutput generalPurpose.streamOutput
|
||||
coresPerFork generalPurpose.coresPerFork
|
||||
memoryInGbPerFork generalPurpose.memoryInGbPerFork
|
||||
nodeTaints generalPurpose.nodeTaints
|
||||
distribute DistributeTestsBy.METHOD
|
||||
}
|
||||
task parallelRegressionTest(type: ParallelTestGroup) {
|
||||
testGroups "test", "integrationTest", "smokeTest"
|
||||
dependsOn dependxiesModule
|
||||
numberOfShards generalPurpose.numberOfShards
|
||||
streamOutput generalPurpose.streamOutput
|
||||
coresPerFork generalPurpose.coresPerFork
|
||||
memoryInGbPerFork generalPurpose.memoryInGbPerFork
|
||||
nodeTaints generalPurpose.nodeTaints
|
||||
distribute DistributeTestsBy.METHOD
|
||||
}
|
||||
task allParallelSmokeTest(type: ParallelTestGroup) {
|
||||
testGroups "smokeTest"
|
||||
dependsOn dependxiesModule
|
||||
numberOfShards generalPurpose.numberOfShards
|
||||
streamOutput generalPurpose.streamOutput
|
||||
coresPerFork generalPurpose.coresPerFork
|
||||
memoryInGbPerFork generalPurpose.memoryInGbPerFork
|
||||
nodeTaints generalPurpose.nodeTaints
|
||||
distribute DistributeTestsBy.METHOD
|
||||
}
|
||||
task allParallelSlowIntegrationTest(type: ParallelTestGroup) {
|
||||
testGroups "slowIntegrationTest"
|
||||
dependsOn dependxiesModule
|
||||
numberOfShards generalPurpose.numberOfShards
|
||||
streamOutput generalPurpose.streamOutput
|
||||
coresPerFork generalPurpose.coresPerFork
|
||||
memoryInGbPerFork generalPurpose.memoryInGbPerFork
|
||||
nodeTaints generalPurpose.nodeTaints
|
||||
distribute DistributeTestsBy.METHOD
|
||||
}
|
||||
apply plugin: 'com.r3.testing.distributed-testing'
|
||||
apply plugin: 'com.r3.testing.image-building'
|
||||
|
@ -6,6 +6,7 @@ import net.corda.client.rpc.CordaRPCClientTest
|
||||
import net.corda.client.rpc.GracefulReconnect
|
||||
import net.corda.client.rpc.MaxRpcRetryException
|
||||
import net.corda.client.rpc.RPCException
|
||||
import net.corda.client.rpc.UnrecoverableRPCException
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps
|
||||
import net.corda.core.messaging.startTrackedFlow
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
@ -82,6 +83,29 @@ class CordaRPCClientReconnectionTest {
|
||||
}
|
||||
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `minimum server protocol version should cause exception if higher than allowed`() {
|
||||
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
||||
val address = NetworkHostAndPort("localhost", portAllocator.nextPort())
|
||||
|
||||
fun startNode(): NodeHandle {
|
||||
return startNode(
|
||||
providedName = CHARLIE_NAME,
|
||||
rpcUsers = listOf(CordaRPCClientTest.rpcUser),
|
||||
customOverrides = mapOf("rpcSettings.address" to address.toString())
|
||||
).getOrThrow()
|
||||
}
|
||||
|
||||
assertThatThrownBy {
|
||||
val node = startNode ()
|
||||
val client = CordaRPCClient(node.rpcAddress, config.copy(minimumServerProtocolVersion = 100, maxReconnectAttempts = 1))
|
||||
client.start(rpcUser.username, rpcUser.password, gracefulReconnect = gracefulReconnect)
|
||||
}
|
||||
.isInstanceOf(UnrecoverableRPCException::class.java)
|
||||
.hasMessageStartingWith("Requested minimum protocol version (100) is higher than the server's supported protocol version ")
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `rpc client calls and returned observables continue working when the server crashes and restarts`() {
|
||||
driver(DriverParameters(cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
||||
@ -292,7 +316,7 @@ class CordaRPCClientReconnectionTest {
|
||||
val node = startNode()
|
||||
CordaRPCClient(node.rpcAddress, config).start(rpcUser.username, rpcUser.password, gracefulReconnect).use {
|
||||
node.stop()
|
||||
thread() {
|
||||
thread {
|
||||
it.proxy.startTrackedFlow(
|
||||
::CashIssueFlow,
|
||||
10.DOLLARS,
|
||||
@ -349,4 +373,4 @@ class CordaRPCClientReconnectionTest {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -10,7 +10,7 @@ import net.corda.client.rpc.ConnectionFailureException
|
||||
import net.corda.client.rpc.CordaRPCClientConfiguration
|
||||
import net.corda.client.rpc.RPCException
|
||||
import net.corda.client.rpc.RPCSinceVersion
|
||||
import net.corda.nodeapi.internal.rpc.client.RpcClientObservableDeSerializer
|
||||
import net.corda.client.rpc.internal.RPCUtils.isShutdownCmd
|
||||
import net.corda.core.context.Actor
|
||||
import net.corda.core.context.Trace
|
||||
import net.corda.core.context.Trace.InvocationId
|
||||
@ -35,6 +35,7 @@ import net.corda.nodeapi.internal.DeduplicationChecker
|
||||
import net.corda.nodeapi.internal.rpc.client.CallSite
|
||||
import net.corda.nodeapi.internal.rpc.client.CallSiteMap
|
||||
import net.corda.nodeapi.internal.rpc.client.ObservableContext
|
||||
import net.corda.nodeapi.internal.rpc.client.RpcClientObservableDeSerializer
|
||||
import net.corda.nodeapi.internal.rpc.client.RpcObservableMap
|
||||
import org.apache.activemq.artemis.api.core.ActiveMQException
|
||||
import org.apache.activemq.artemis.api.core.ActiveMQNotConnectedException
|
||||
@ -46,10 +47,12 @@ import org.apache.activemq.artemis.api.core.client.ClientMessage
|
||||
import org.apache.activemq.artemis.api.core.client.ClientProducer
|
||||
import org.apache.activemq.artemis.api.core.client.ClientSession
|
||||
import org.apache.activemq.artemis.api.core.client.ClientSessionFactory
|
||||
import org.apache.activemq.artemis.api.core.client.FailoverEventListener
|
||||
import org.apache.activemq.artemis.api.core.client.FailoverEventType
|
||||
import org.apache.activemq.artemis.api.core.client.ServerLocator
|
||||
import rx.Notification
|
||||
import rx.Observable
|
||||
import rx.exceptions.OnErrorNotImplementedException
|
||||
import rx.subjects.UnicastSubject
|
||||
import java.lang.reflect.InvocationHandler
|
||||
import java.lang.reflect.Method
|
||||
@ -123,7 +126,7 @@ internal class RPCClientProxyHandler(
|
||||
val toStringMethod: Method = Object::toString.javaMethod!!
|
||||
val equalsMethod: Method = Object::equals.javaMethod!!
|
||||
val hashCodeMethod: Method = Object::hashCode.javaMethod!!
|
||||
|
||||
var terminating = false
|
||||
private fun addRpcCallSiteToThrowable(throwable: Throwable, callSite: CallSite) {
|
||||
var currentThrowable = throwable
|
||||
while (true) {
|
||||
@ -142,6 +145,19 @@ internal class RPCClientProxyHandler(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("TooGenericExceptionCaught")
|
||||
private fun closeObservable(observable: UnicastSubject<Notification<*>>) {
|
||||
// Notify listeners of the observables that the connection is being terminated.
|
||||
try {
|
||||
observable.onError(ConnectionFailureException())
|
||||
} catch (ex: OnErrorNotImplementedException) {
|
||||
// Indicates the observer does not have any error handling.
|
||||
log.debug { "Closed connection on observable whose observers have no error handling." }
|
||||
} catch (ex: Exception) {
|
||||
log.error("Unexpected exception when RPC connection failure handling", ex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Used for reaping
|
||||
@ -203,7 +219,7 @@ internal class RPCClientProxyHandler(
|
||||
.weakValues()
|
||||
.removalListener(onObservableRemove)
|
||||
.executor(MoreExecutors.directExecutor()),
|
||||
"RpcClientProxyHandler_rpcObservable"
|
||||
"RpcClientProxyHandler_rpcObservable"
|
||||
)
|
||||
}
|
||||
|
||||
@ -219,6 +235,22 @@ internal class RPCClientProxyHandler(
|
||||
private val sendingEnabled = AtomicBoolean(true)
|
||||
// Used to interrupt failover thread (i.e. client is closed while failing over).
|
||||
private var haFailoverThread: Thread? = null
|
||||
private val haFailoverHandler: FailoverHandler = FailoverHandler(
|
||||
detected = { log.warn("Connection failure. Attempting to reconnect using back-up addresses.")
|
||||
cleanUpOnConnectionLoss()
|
||||
sessionFactory?.apply {
|
||||
connection.destroy()
|
||||
cleanup()
|
||||
close()
|
||||
}
|
||||
haFailoverThread = Thread.currentThread()
|
||||
attemptReconnect()
|
||||
})
|
||||
private val defaultFailoverHandler: FailoverHandler = FailoverHandler(
|
||||
detected = { cleanUpOnConnectionLoss() },
|
||||
completed = { sendingEnabled.set(true)
|
||||
log.info("RPC server available.")},
|
||||
failed = { log.error("Could not reconnect to the RPC server.")})
|
||||
|
||||
/**
|
||||
* Start the client. This creates the per-client queue, starts the consumer session and the reaper.
|
||||
@ -251,15 +283,27 @@ internal class RPCClientProxyHandler(
|
||||
}
|
||||
// Depending on how the client is constructed, connection failure is treated differently
|
||||
if (serverLocator.staticTransportConfigurations.size == 1) {
|
||||
sessionFactory!!.addFailoverListener(this::failoverHandler)
|
||||
sessionFactory!!.addFailoverListener(defaultFailoverHandler)
|
||||
} else {
|
||||
sessionFactory!!.addFailoverListener(this::haFailoverHandler)
|
||||
sessionFactory!!.addFailoverListener(haFailoverHandler)
|
||||
}
|
||||
initSessions()
|
||||
lifeCycle.transition(State.UNSTARTED, State.SERVER_VERSION_NOT_SET)
|
||||
startSessions()
|
||||
}
|
||||
|
||||
class FailoverHandler(private val detected: () -> Unit = {},
|
||||
private val completed: () -> Unit = {},
|
||||
private val failed: () -> Unit = {}): FailoverEventListener {
|
||||
override fun failoverEvent(eventType: FailoverEventType?) {
|
||||
when (eventType) {
|
||||
FailoverEventType.FAILURE_DETECTED -> { detected() }
|
||||
FailoverEventType.FAILOVER_COMPLETED -> { completed() }
|
||||
FailoverEventType.FAILOVER_FAILED -> { if (!terminating) failed() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is the general function that transforms a client side RPC to internal Artemis messages.
|
||||
override fun invoke(proxy: Any, method: Method, arguments: Array<out Any?>?): Any? {
|
||||
lifeCycle.requireState { it == State.STARTED || it == State.SERVER_VERSION_NOT_SET }
|
||||
@ -299,6 +343,10 @@ internal class RPCClientProxyHandler(
|
||||
"Generated several RPC requests with same ID $replyId"
|
||||
}
|
||||
|
||||
if (request.isShutdownCmd()){
|
||||
terminating = true
|
||||
}
|
||||
|
||||
sendMessage(request)
|
||||
return replyFuture.getOrThrow()
|
||||
} catch (e: RuntimeException) {
|
||||
@ -452,14 +500,9 @@ internal class RPCClientProxyHandler(
|
||||
}
|
||||
|
||||
reaperScheduledFuture?.cancel(false)
|
||||
val observablesMap = observableContext.observableMap.asMap()
|
||||
observablesMap.keys.forEach { key ->
|
||||
observableContext.observableMap.asMap().forEach { (key, observable) ->
|
||||
observationExecutorPool.run(key) {
|
||||
try {
|
||||
observablesMap[key]?.onError(ConnectionFailureException())
|
||||
} catch (e: Exception) {
|
||||
log.error("Unexpected exception when RPC connection failure handling", e)
|
||||
}
|
||||
observable?.also(Companion::closeObservable)
|
||||
}
|
||||
}
|
||||
observableContext.observableMap.invalidateAll()
|
||||
@ -564,7 +607,7 @@ internal class RPCClientProxyHandler(
|
||||
|
||||
log.debug { "Connected successfully after $reconnectAttempt attempts using ${transport.params}." }
|
||||
log.info("RPC server available.")
|
||||
sessionFactory!!.addFailoverListener(this::haFailoverHandler)
|
||||
sessionFactory!!.addFailoverListener(haFailoverHandler)
|
||||
initSessions()
|
||||
startSessions()
|
||||
sendingEnabled.set(true)
|
||||
@ -593,38 +636,6 @@ internal class RPCClientProxyHandler(
|
||||
producerSession!!.start()
|
||||
}
|
||||
|
||||
private fun haFailoverHandler(event: FailoverEventType) {
|
||||
if (event == FailoverEventType.FAILURE_DETECTED) {
|
||||
log.warn("Connection failure. Attempting to reconnect using back-up addresses.")
|
||||
cleanUpOnConnectionLoss()
|
||||
sessionFactory?.apply {
|
||||
connection.destroy()
|
||||
cleanup()
|
||||
close()
|
||||
}
|
||||
haFailoverThread = Thread.currentThread()
|
||||
attemptReconnect()
|
||||
}
|
||||
// Other events are not considered as reconnection is not done by Artemis.
|
||||
}
|
||||
|
||||
private fun failoverHandler(event: FailoverEventType) {
|
||||
when (event) {
|
||||
FailoverEventType.FAILURE_DETECTED -> {
|
||||
cleanUpOnConnectionLoss()
|
||||
}
|
||||
|
||||
FailoverEventType.FAILOVER_COMPLETED -> {
|
||||
sendingEnabled.set(true)
|
||||
log.info("RPC server available.")
|
||||
}
|
||||
|
||||
FailoverEventType.FAILOVER_FAILED -> {
|
||||
log.error("Could not reconnect to the RPC server.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun cleanUpOnConnectionLoss() {
|
||||
sendingEnabled.set(false)
|
||||
log.warn("Terminating observables.")
|
||||
|
@ -0,0 +1,15 @@
|
||||
package net.corda.client.rpc.internal
|
||||
|
||||
import net.corda.nodeapi.RPCApi
|
||||
import java.lang.reflect.Method
|
||||
|
||||
object RPCUtils {
|
||||
fun isShutdownMethodName(methodName: String) =
|
||||
methodName.equals("shutdown", true) ||
|
||||
methodName.equals("gracefulShutdown", true) ||
|
||||
methodName.equals("terminate", true)
|
||||
|
||||
fun RPCApi.ClientToServer.RpcRequest.isShutdownCmd() = isShutdownMethodName(methodName)
|
||||
fun Method.isShutdown() = isShutdownMethodName(name)
|
||||
fun Method.isStartFlow() = name.startsWith("startFlow") || name.startsWith("startTrackedFlow")
|
||||
}
|
@ -9,6 +9,9 @@ import net.corda.client.rpc.MaxRpcRetryException
|
||||
import net.corda.client.rpc.PermissionException
|
||||
import net.corda.client.rpc.RPCConnection
|
||||
import net.corda.client.rpc.RPCException
|
||||
import net.corda.client.rpc.UnrecoverableRPCException
|
||||
import net.corda.client.rpc.internal.RPCUtils.isShutdown
|
||||
import net.corda.client.rpc.internal.RPCUtils.isStartFlow
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps.ReconnectingRPCConnection.CurrentState.CLOSED
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps.ReconnectingRPCConnection.CurrentState.CONNECTED
|
||||
import net.corda.client.rpc.internal.ReconnectingCordaRPCOps.ReconnectingRPCConnection.CurrentState.CONNECTING
|
||||
@ -211,7 +214,7 @@ class ReconnectingCordaRPCOps private constructor(
|
||||
* Establishes a connection by automatically retrying if the attempt to establish a connection fails.
|
||||
*
|
||||
* @param retryInterval the interval between retries.
|
||||
* @param roundRobinIndex index of the address that will be used for the connection.
|
||||
* @param roundRobinIndex the index of the address that will be used for the connection.
|
||||
* @param retries the number of retries remaining. A negative value implies infinite retries.
|
||||
*/
|
||||
private tailrec fun establishConnectionWithRetry(
|
||||
@ -240,7 +243,7 @@ class ReconnectingCordaRPCOps private constructor(
|
||||
}
|
||||
} catch (ex: Exception) {
|
||||
when (ex) {
|
||||
is ActiveMQSecurityException -> {
|
||||
is UnrecoverableRPCException, is ActiveMQSecurityException -> {
|
||||
log.error("Failed to login to node.", ex)
|
||||
throw ex
|
||||
}
|
||||
@ -291,9 +294,6 @@ class ReconnectingCordaRPCOps private constructor(
|
||||
fun isClosed(): Boolean = currentState == CLOSED
|
||||
}
|
||||
private class ErrorInterceptingHandler(val reconnectingRPCConnection: ReconnectingRPCConnection) : InvocationHandler {
|
||||
private fun Method.isStartFlow() = name.startsWith("startFlow") || name.startsWith("startTrackedFlow")
|
||||
private fun Method.isShutdown() = name == "shutdown" || name == "gracefulShutdown" || name == "terminate"
|
||||
|
||||
private fun checkIfIsStartFlow(method: Method, e: InvocationTargetException) {
|
||||
if (method.isStartFlow()) {
|
||||
// Don't retry flows
|
||||
|
@ -9,26 +9,32 @@ import org.junit.Test
|
||||
import kotlin.test.assertEquals
|
||||
|
||||
class ExceptionsErrorCodeFunctionsTest {
|
||||
|
||||
@Test(timeout=3_000)
|
||||
fun `error code for message prints out message and full stack trace`() {
|
||||
val originalMessage = SimpleMessage("This is a test message")
|
||||
var previous: Exception? = null
|
||||
val throwables = (0..10).map {
|
||||
val current = TestThrowable(it, previous)
|
||||
previous = current
|
||||
current
|
||||
private companion object {
|
||||
private const val EXCEPTION_MESSAGE = "This is exception "
|
||||
private const val TEST_MESSAGE = "This is a test message"
|
||||
private fun makeChain(previous: Exception?, ttl: Int): Exception {
|
||||
val current = TestThrowable(ttl, previous)
|
||||
return if (ttl == 0) {
|
||||
current
|
||||
} else {
|
||||
makeChain(current, ttl - 1)
|
||||
}
|
||||
}
|
||||
val exception = throwables.last()
|
||||
}
|
||||
|
||||
@Test(timeout=5_000)
|
||||
fun `error code for message prints out message and full stack trace`() {
|
||||
val originalMessage = SimpleMessage(TEST_MESSAGE)
|
||||
val exception = makeChain(null, 10)
|
||||
val message = originalMessage.withErrorCodeFor(exception, Level.ERROR)
|
||||
assertThat(message.formattedMessage, contains("This is a test message".toRegex()))
|
||||
assertThat(message.formattedMessage, contains(TEST_MESSAGE.toRegex()))
|
||||
for (i in (0..10)) {
|
||||
assertThat(message.formattedMessage, contains("This is exception $i".toRegex()))
|
||||
assertThat(message.formattedMessage, contains("$EXCEPTION_MESSAGE $i".toRegex()))
|
||||
}
|
||||
assertEquals(message.format, originalMessage.format)
|
||||
assertEquals(message.parameters, originalMessage.parameters)
|
||||
assertEquals(message.throwable, originalMessage.throwable)
|
||||
}
|
||||
|
||||
private class TestThrowable(index: Int, cause: Exception?) : Exception("This is exception $index", cause)
|
||||
private class TestThrowable(index: Int, cause: Exception?) : Exception("$EXCEPTION_MESSAGE $index", cause)
|
||||
}
|
@ -25,10 +25,7 @@ tasks.named('jar', Jar) {
|
||||
enabled = false
|
||||
}
|
||||
|
||||
test {
|
||||
ext {
|
||||
ignoreForDistribution = true
|
||||
}
|
||||
def test = tasks.named('test', Test) {
|
||||
filter {
|
||||
// Running this class is the whole point, so include it explicitly.
|
||||
includeTestsMatching "net.corda.deterministic.data.GenerateData"
|
||||
@ -37,8 +34,9 @@ test {
|
||||
// note: required by Gradle Build Cache.
|
||||
outputs.upToDateWhen { false }
|
||||
}
|
||||
assemble.finalizedBy test
|
||||
|
||||
def testDataJar = file("$buildDir/test-data.jar")
|
||||
artifacts {
|
||||
testData file: file("$buildDir/test-data.jar"), type: 'jar', builtBy: test
|
||||
archives file: testDataJar, type: 'jar', builtBy: test
|
||||
testData file: testDataJar, type: 'jar', builtBy: test
|
||||
}
|
||||
|
@ -96,5 +96,5 @@ class FinalityFlowTests : WithFinality {
|
||||
}
|
||||
|
||||
/** "Old" CorDapp which will force its node to keep its FinalityHandler enabled */
|
||||
private fun tokenOldCordapp() = cordappWithPackages("com.template").copy(targetPlatformVersion = 3)
|
||||
private fun tokenOldCordapp() = cordappWithPackages().copy(targetPlatformVersion = 3)
|
||||
}
|
||||
|
@ -15,7 +15,6 @@ import net.corda.core.messaging.startFlow
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.minutes
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.node.services.statemachine.Checkpoint
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.BOB_NAME
|
||||
import net.corda.testing.core.CHARLIE_NAME
|
||||
@ -77,9 +76,10 @@ class FlowIsKilledTest {
|
||||
assertEquals(11, AFlowThatWantsToDieAndKillsItsFriends.position)
|
||||
assertTrue(AFlowThatWantsToDieAndKillsItsFriendsResponder.receivedKilledExceptions[BOB_NAME]!!)
|
||||
assertTrue(AFlowThatWantsToDieAndKillsItsFriendsResponder.receivedKilledExceptions[CHARLIE_NAME]!!)
|
||||
assertEquals(1, alice.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(2, bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, bob.rpc.startFlow(::GetNumberOfFailedCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
val aliceCheckpoints = alice.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds)
|
||||
assertEquals(1, aliceCheckpoints)
|
||||
val bobCheckpoints = bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds)
|
||||
assertEquals(1, bobCheckpoints)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -109,9 +109,10 @@ class FlowIsKilledTest {
|
||||
handle.returnValue.getOrThrow(1.minutes)
|
||||
}
|
||||
assertEquals(11, AFlowThatGetsMurderedByItsFriendResponder.position)
|
||||
assertEquals(2, alice.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, alice.rpc.startFlow(::GetNumberOfFailedCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
val aliceCheckpoints = alice.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds)
|
||||
assertEquals(1, aliceCheckpoints)
|
||||
val bobCheckpoints = bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds)
|
||||
assertEquals(1, bobCheckpoints)
|
||||
}
|
||||
}
|
||||
|
||||
@ -360,18 +361,4 @@ class FlowIsKilledTest {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@StartableByRPC
|
||||
class GetNumberOfFailedCheckpointsFlow : FlowLogic<Long>() {
|
||||
override fun call(): Long {
|
||||
return serviceHub.jdbcSession()
|
||||
.prepareStatement("select count(*) from node_checkpoints where status = ${Checkpoint.FlowStatus.FAILED.ordinal}")
|
||||
.use { ps ->
|
||||
ps.executeQuery().use { rs ->
|
||||
rs.next()
|
||||
rs.getLong(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -56,7 +56,9 @@ class ReceiveFinalityFlowTest {
|
||||
bob.assertFlowSentForObservationDueToUntrustedAttachmentsException(paymentReceiverId)
|
||||
|
||||
// Restart Bob with the contracts CorDapp so that it can recover from the error
|
||||
bob = mockNet.restartNode(bob, parameters = InternalMockNodeParameters(additionalCordapps = listOf(FINANCE_CONTRACTS_CORDAPP)))
|
||||
bob = mockNet.restartNode(bob,
|
||||
parameters = InternalMockNodeParameters(additionalCordapps = listOf(FINANCE_CONTRACTS_CORDAPP)),
|
||||
nodeFactory = { args -> InternalMockNetwork.MockNode(args, allowAppSchemaUpgradeWithCheckpoints = true) })
|
||||
mockNet.runNetwork()
|
||||
assertThat(bob.services.getCashBalance(GBP)).isEqualTo(100.POUNDS)
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ import com.natpryce.hamkrest.Matcher
|
||||
import com.natpryce.hamkrest.equalTo
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.FlowStateMachineHandle
|
||||
import net.corda.core.messaging.CordaRPCOps
|
||||
import net.corda.core.messaging.FlowHandle
|
||||
import net.corda.core.messaging.startFlow
|
||||
@ -16,7 +16,7 @@ import net.corda.testing.node.internal.TestStartedNode
|
||||
|
||||
interface WithFinality : WithMockNet {
|
||||
//region Operations
|
||||
fun TestStartedNode.finalise(stx: SignedTransaction, vararg recipients: Party): FlowStateMachine<SignedTransaction> {
|
||||
fun TestStartedNode.finalise(stx: SignedTransaction, vararg recipients: Party): FlowStateMachineHandle<SignedTransaction> {
|
||||
return startFlowAndRunNetwork(FinalityInvoker(stx, recipients.toSet(), emptySet()))
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@ import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.FlowStateMachineHandle
|
||||
import net.corda.core.transactions.SignedTransaction
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.testing.core.makeUnique
|
||||
@ -48,12 +48,12 @@ interface WithMockNet {
|
||||
/**
|
||||
* Start a flow
|
||||
*/
|
||||
fun <T> TestStartedNode.startFlow(logic: FlowLogic<T>): FlowStateMachine<T> = services.startFlow(logic)
|
||||
fun <T> TestStartedNode.startFlow(logic: FlowLogic<T>): FlowStateMachineHandle<T> = services.startFlow(logic)
|
||||
|
||||
/**
|
||||
* Start a flow and run the network immediately afterwards
|
||||
*/
|
||||
fun <T> TestStartedNode.startFlowAndRunNetwork(logic: FlowLogic<T>): FlowStateMachine<T> =
|
||||
fun <T> TestStartedNode.startFlowAndRunNetwork(logic: FlowLogic<T>): FlowStateMachineHandle<T> =
|
||||
startFlow(logic).andRunNetwork()
|
||||
|
||||
fun TestStartedNode.createConfidentialIdentity(party: Party) =
|
||||
|
@ -24,7 +24,8 @@ data class InvocationContext(
|
||||
val actor: Actor?,
|
||||
val externalTrace: Trace? = null,
|
||||
val impersonatedActor: Actor? = null,
|
||||
val arguments: List<Any?> = emptyList()
|
||||
val arguments: List<Any?>? = emptyList(), // 'arguments' is nullable so that a - >= 4.6 version - RPC client can be backwards compatible against - < 4.6 version - nodes
|
||||
val clientId: String? = null
|
||||
) {
|
||||
|
||||
constructor(
|
||||
@ -49,8 +50,9 @@ data class InvocationContext(
|
||||
actor: Actor? = null,
|
||||
externalTrace: Trace? = null,
|
||||
impersonatedActor: Actor? = null,
|
||||
arguments: List<Any?> = emptyList()
|
||||
) = InvocationContext(origin, trace, actor, externalTrace, impersonatedActor, arguments)
|
||||
arguments: List<Any?> = emptyList(),
|
||||
clientId: String? = null
|
||||
) = InvocationContext(origin, trace, actor, externalTrace, impersonatedActor, arguments, clientId)
|
||||
|
||||
/**
|
||||
* Creates an [InvocationContext] with [InvocationOrigin.RPC] origin.
|
||||
@ -113,7 +115,8 @@ data class InvocationContext(
|
||||
actor = actor,
|
||||
externalTrace = externalTrace,
|
||||
impersonatedActor = impersonatedActor,
|
||||
arguments = arguments
|
||||
arguments = arguments,
|
||||
clientId = clientId
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,11 @@
|
||||
package net.corda.core.flows
|
||||
|
||||
import net.corda.core.CordaRuntimeException
|
||||
import net.corda.core.serialization.internal.MissingSerializerException
|
||||
|
||||
/**
|
||||
* Thrown whenever a flow result cannot be serialized when attempting to save it in the database
|
||||
*/
|
||||
class ResultSerializationException private constructor(message: String?) : CordaRuntimeException(message) {
|
||||
constructor(e: MissingSerializerException): this(e.message)
|
||||
}
|
@ -11,10 +11,19 @@ import net.corda.core.node.ServiceHub
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import org.slf4j.Logger
|
||||
|
||||
@DeleteForDJVM
|
||||
@DoNotImplement
|
||||
interface FlowStateMachineHandle<FLOWRETURN> {
|
||||
val logic: FlowLogic<FLOWRETURN>?
|
||||
val id: StateMachineRunId
|
||||
val resultFuture: CordaFuture<FLOWRETURN>
|
||||
val clientId: String?
|
||||
}
|
||||
|
||||
/** This is an internal interface that is implemented by code in the node module. You should look at [FlowLogic]. */
|
||||
@DeleteForDJVM
|
||||
@DoNotImplement
|
||||
interface FlowStateMachine<FLOWRETURN> {
|
||||
interface FlowStateMachine<FLOWRETURN> : FlowStateMachineHandle<FLOWRETURN> {
|
||||
@Suspendable
|
||||
fun <SUSPENDRETURN : Any> suspend(ioRequest: FlowIORequest<SUSPENDRETURN>, maySkipCheckpoint: Boolean): SUSPENDRETURN
|
||||
|
||||
@ -38,14 +47,11 @@ interface FlowStateMachine<FLOWRETURN> {
|
||||
|
||||
fun updateTimedFlowTimeout(timeoutSeconds: Long)
|
||||
|
||||
val logic: FlowLogic<FLOWRETURN>
|
||||
val serviceHub: ServiceHub
|
||||
val logger: Logger
|
||||
val id: StateMachineRunId
|
||||
val resultFuture: CordaFuture<FLOWRETURN>
|
||||
val context: InvocationContext
|
||||
val ourIdentity: Party
|
||||
val ourSenderUUID: String?
|
||||
val creationTime: Long
|
||||
val isKilled: Boolean
|
||||
}
|
||||
}
|
@ -264,6 +264,25 @@ interface CordaRPCOps : RPCOps {
|
||||
@RPCReturnsObservables
|
||||
fun <T> startFlowDynamic(logicType: Class<out FlowLogic<T>>, vararg args: Any?): FlowHandle<T>
|
||||
|
||||
/**
|
||||
* Start the given flow with the given arguments and a [clientId].
|
||||
*
|
||||
* The flow's result/ exception will be available for the client to re-connect and retrieve even after the flow's lifetime,
|
||||
* by re-calling [startFlowDynamicWithClientId] with the same [clientId]. The [logicType] and [args] will be ignored if the
|
||||
* [clientId] matches an existing flow. If you don't have the original values, consider using [reattachFlowWithClientId].
|
||||
*
|
||||
* Upon calling [removeClientId], the node's resources holding the result/ exception will be freed and the result/ exception will
|
||||
* no longer be available.
|
||||
*
|
||||
* [logicType] must be annotated with [net.corda.core.flows.StartableByRPC].
|
||||
*
|
||||
* @param clientId The client id to relate the flow to (or is already related to if the flow already exists)
|
||||
* @param logicType The [FlowLogic] to start
|
||||
* @param args The arguments to pass to the flow
|
||||
*/
|
||||
@RPCReturnsObservables
|
||||
fun <T> startFlowDynamicWithClientId(clientId: String, logicType: Class<out FlowLogic<T>>, vararg args: Any?): FlowHandleWithClientId<T>
|
||||
|
||||
/**
|
||||
* Start the given flow with the given arguments, returning an [Observable] with a single observation of the
|
||||
* result of running the flow. [logicType] must be annotated with [net.corda.core.flows.StartableByRPC].
|
||||
@ -278,6 +297,30 @@ interface CordaRPCOps : RPCOps {
|
||||
*/
|
||||
fun killFlow(id: StateMachineRunId): Boolean
|
||||
|
||||
/**
|
||||
* Reattach to an existing flow that was started with [startFlowDynamicWithClientId] and has a [clientId].
|
||||
*
|
||||
* If there is a flow matching the [clientId] then its result or exception is returned.
|
||||
*
|
||||
* When there is no flow matching the [clientId] then [null] is returned directly (not a future/[FlowHandleWithClientId]).
|
||||
*
|
||||
* Calling [reattachFlowWithClientId] after [removeClientId] with the same [clientId] will cause the function to return [null] as
|
||||
* the result/exception of the flow will no longer be available.
|
||||
*
|
||||
* @param clientId The client id relating to an existing flow
|
||||
*/
|
||||
@RPCReturnsObservables
|
||||
fun <T> reattachFlowWithClientId(clientId: String): FlowHandleWithClientId<T>?
|
||||
|
||||
/**
|
||||
* Removes a flow's [clientId] to result/ exception mapping. If the mapping is of a running flow, then the mapping will not get removed.
|
||||
*
|
||||
* See [startFlowDynamicWithClientId] for more information.
|
||||
*
|
||||
* @return whether the mapping was removed.
|
||||
*/
|
||||
fun removeClientId(clientId: String): Boolean
|
||||
|
||||
/** Returns Node's NodeInfo, assuming this will not change while the node is running. */
|
||||
fun nodeInfo(): NodeInfo
|
||||
|
||||
@ -542,6 +585,79 @@ inline fun <T, A, B, C, D, E, F, reified R : FlowLogic<T>> CordaRPCOps.startFlow
|
||||
arg5: F
|
||||
): FlowHandle<T> = startFlowDynamic(R::class.java, arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
|
||||
/**
|
||||
* Extension function for type safe invocation of flows from Kotlin, with [clientId].
|
||||
*/
|
||||
@Suppress("unused")
|
||||
inline fun <T, reified R : FlowLogic<T>> CordaRPCOps.startFlowWithClientId(
|
||||
clientId: String,
|
||||
@Suppress("unused_parameter")
|
||||
flowConstructor: () -> R
|
||||
): FlowHandleWithClientId<T> = startFlowDynamicWithClientId(clientId, R::class.java)
|
||||
|
||||
@Suppress("unused")
|
||||
inline fun <T, A, reified R : FlowLogic<T>> CordaRPCOps.startFlowWithClientId(
|
||||
clientId: String,
|
||||
@Suppress("unused_parameter")
|
||||
flowConstructor: (A) -> R,
|
||||
arg0: A
|
||||
): FlowHandleWithClientId<T> = startFlowDynamicWithClientId(clientId, R::class.java, arg0)
|
||||
|
||||
@Suppress("unused")
|
||||
inline fun <T, A, B, reified R : FlowLogic<T>> CordaRPCOps.startFlowWithClientId(
|
||||
clientId: String,
|
||||
@Suppress("unused_parameter")
|
||||
flowConstructor: (A, B) -> R,
|
||||
arg0: A,
|
||||
arg1: B
|
||||
): FlowHandleWithClientId<T> = startFlowDynamicWithClientId(clientId, R::class.java, arg0, arg1)
|
||||
|
||||
@Suppress("unused")
|
||||
inline fun <T, A, B, C, reified R : FlowLogic<T>> CordaRPCOps.startFlowWithClientId(
|
||||
clientId: String,
|
||||
@Suppress("unused_parameter")
|
||||
flowConstructor: (A, B, C) -> R,
|
||||
arg0: A,
|
||||
arg1: B,
|
||||
arg2: C
|
||||
): FlowHandleWithClientId<T> = startFlowDynamicWithClientId(clientId, R::class.java, arg0, arg1, arg2)
|
||||
|
||||
@Suppress("unused")
|
||||
inline fun <T, A, B, C, D, reified R : FlowLogic<T>> CordaRPCOps.startFlowWithClientId(
|
||||
clientId: String,
|
||||
@Suppress("unused_parameter")
|
||||
flowConstructor: (A, B, C, D) -> R,
|
||||
arg0: A,
|
||||
arg1: B,
|
||||
arg2: C,
|
||||
arg3: D
|
||||
): FlowHandleWithClientId<T> = startFlowDynamicWithClientId(clientId, R::class.java, arg0, arg1, arg2, arg3)
|
||||
|
||||
@Suppress("unused")
|
||||
inline fun <T, A, B, C, D, E, reified R : FlowLogic<T>> CordaRPCOps.startFlowWithClientId(
|
||||
clientId: String,
|
||||
@Suppress("unused_parameter")
|
||||
flowConstructor: (A, B, C, D, E) -> R,
|
||||
arg0: A,
|
||||
arg1: B,
|
||||
arg2: C,
|
||||
arg3: D,
|
||||
arg4: E
|
||||
): FlowHandleWithClientId<T> = startFlowDynamicWithClientId(clientId, R::class.java, arg0, arg1, arg2, arg3, arg4)
|
||||
|
||||
@Suppress("unused")
|
||||
inline fun <T, A, B, C, D, E, F, reified R : FlowLogic<T>> CordaRPCOps.startFlowWithClientId(
|
||||
clientId: String,
|
||||
@Suppress("unused_parameter")
|
||||
flowConstructor: (A, B, C, D, E, F) -> R,
|
||||
arg0: A,
|
||||
arg1: B,
|
||||
arg2: C,
|
||||
arg3: D,
|
||||
arg4: E,
|
||||
arg5: F
|
||||
): FlowHandleWithClientId<T> = startFlowDynamicWithClientId(clientId, R::class.java, arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
|
||||
/**
|
||||
* Extension function for type safe invocation of flows from Kotlin, with progress tracking enabled.
|
||||
*/
|
||||
|
@ -28,6 +28,14 @@ interface FlowHandle<A> : AutoCloseable {
|
||||
override fun close()
|
||||
}
|
||||
|
||||
interface FlowHandleWithClientId<A> : FlowHandle<A> {
|
||||
|
||||
/**
|
||||
* The [clientId] with which the client has started the flow.
|
||||
*/
|
||||
val clientId: String
|
||||
}
|
||||
|
||||
/**
|
||||
* [FlowProgressHandle] is a serialisable handle for the started flow, parameterised by the type of the flow's return value.
|
||||
*/
|
||||
@ -66,6 +74,18 @@ data class FlowHandleImpl<A>(
|
||||
}
|
||||
}
|
||||
|
||||
@CordaSerializable
|
||||
data class FlowHandleWithClientIdImpl<A>(
|
||||
override val id: StateMachineRunId,
|
||||
override val returnValue: CordaFuture<A>,
|
||||
override val clientId: String) : FlowHandleWithClientId<A> {
|
||||
|
||||
// Remember to add @Throws to FlowHandle.close() if this throws an exception.
|
||||
override fun close() {
|
||||
returnValue.cancel(false)
|
||||
}
|
||||
}
|
||||
|
||||
@CordaSerializable
|
||||
data class FlowProgressHandleImpl<A> @JvmOverloads constructor(
|
||||
override val id: StateMachineRunId,
|
||||
|
@ -640,6 +640,9 @@
|
||||
<ID>LongParameterList:CordaRPCOps.kt$( @Suppress("UNUSED_PARAMETER") flowConstructor: (A, B, C, D, E, F) -> R, arg0: A, arg1: B, arg2: C, arg3: D, arg4: E, arg5: F )</ID>
|
||||
<ID>LongParameterList:CordaRPCOps.kt$( @Suppress("unused_parameter") flowConstructor: (A, B, C, D, E) -> R, arg0: A, arg1: B, arg2: C, arg3: D, arg4: E )</ID>
|
||||
<ID>LongParameterList:CordaRPCOps.kt$( @Suppress("unused_parameter") flowConstructor: (A, B, C, D, E, F) -> R, arg0: A, arg1: B, arg2: C, arg3: D, arg4: E, arg5: F )</ID>
|
||||
<ID>LongParameterList:CordaRPCOps.kt$( clientId: String, @Suppress("unused_parameter") flowConstructor: (A, B, C, D) -> R, arg0: A, arg1: B, arg2: C, arg3: D )</ID>
|
||||
<ID>LongParameterList:CordaRPCOps.kt$( clientId: String, @Suppress("unused_parameter") flowConstructor: (A, B, C, D, E) -> R, arg0: A, arg1: B, arg2: C, arg3: D, arg4: E )</ID>
|
||||
<ID>LongParameterList:CordaRPCOps.kt$( clientId: String, @Suppress("unused_parameter") flowConstructor: (A, B, C, D, E, F) -> R, arg0: A, arg1: B, arg2: C, arg3: D, arg4: E, arg5: F )</ID>
|
||||
<ID>LongParameterList:Driver.kt$DriverParameters$( isDebug: Boolean, driverDirectory: Path, portAllocation: PortAllocation, debugPortAllocation: PortAllocation, systemProperties: Map<String, String>, useTestClock: Boolean, startNodesInProcess: Boolean, waitForAllNodesToFinish: Boolean, notarySpecs: List<NotarySpec>, extraCordappPackagesToScan: List<String>, jmxPolicy: JmxPolicy, networkParameters: NetworkParameters )</ID>
|
||||
<ID>LongParameterList:Driver.kt$DriverParameters$( isDebug: Boolean, driverDirectory: Path, portAllocation: PortAllocation, debugPortAllocation: PortAllocation, systemProperties: Map<String, String>, useTestClock: Boolean, startNodesInProcess: Boolean, waitForAllNodesToFinish: Boolean, notarySpecs: List<NotarySpec>, extraCordappPackagesToScan: List<String>, jmxPolicy: JmxPolicy, networkParameters: NetworkParameters, cordappsForAllNodes: Set<TestCordapp>? )</ID>
|
||||
<ID>LongParameterList:DriverDSL.kt$DriverDSL$( defaultParameters: NodeParameters = NodeParameters(), providedName: CordaX500Name? = defaultParameters.providedName, rpcUsers: List<User> = defaultParameters.rpcUsers, verifierType: VerifierType = defaultParameters.verifierType, customOverrides: Map<String, Any?> = defaultParameters.customOverrides, startInSameProcess: Boolean? = defaultParameters.startInSameProcess, maximumHeapSize: String = defaultParameters.maximumHeapSize )</ID>
|
||||
@ -1261,7 +1264,6 @@
|
||||
<ID>SpreadOperator:ConfigUtilities.kt$(*pairs)</ID>
|
||||
<ID>SpreadOperator:Configuration.kt$Configuration.Validation.Error$(*(containingPath.toList() + this.containingPath).toTypedArray())</ID>
|
||||
<ID>SpreadOperator:ContractJarTestUtils.kt$ContractJarTestUtils$(jarName, *contractNames.map{ "${it.replace(".", "/")}.class" }.toTypedArray())</ID>
|
||||
<ID>SpreadOperator:CordaRPCOpsImpl.kt$CordaRPCOpsImpl$(logicType, context(), *args)</ID>
|
||||
<ID>SpreadOperator:CordaX500Name.kt$CordaX500Name.Companion$(*Locale.getISOCountries(), unspecifiedCountry)</ID>
|
||||
<ID>SpreadOperator:CustomCordapp.kt$CustomCordapp$(*classes.map { it.name }.toTypedArray())</ID>
|
||||
<ID>SpreadOperator:CustomCordapp.kt$CustomCordapp$(*packages.map { it.replace('.', '/') }.toTypedArray())</ID>
|
||||
|
BIN
lib/quasar.jar
BIN
lib/quasar.jar
Binary file not shown.
@ -15,7 +15,6 @@ import net.corda.core.crypto.Crypto.generateKeyPair
|
||||
import net.corda.core.crypto.SignatureScheme
|
||||
import net.corda.core.crypto.newSecureRandom
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.JavaVersion
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.serialization.SerializationContext
|
||||
import net.corda.core.serialization.deserialize
|
||||
@ -118,7 +117,7 @@ class X509UtilitiesTest {
|
||||
@Test(timeout=300_000)
|
||||
fun `create valid self-signed CA certificate`() {
|
||||
Crypto.supportedSignatureSchemes().filter { it != COMPOSITE_KEY
|
||||
&& ( !JavaVersion.isVersionAtLeast(JavaVersion.Java_11) || it != SPHINCS256_SHA256)}.forEach { validSelfSignedCertificate(it) }
|
||||
&& ( it != SPHINCS256_SHA256)}.forEach { validSelfSignedCertificate(it) }
|
||||
}
|
||||
|
||||
private fun validSelfSignedCertificate(signatureScheme: SignatureScheme) {
|
||||
@ -153,7 +152,7 @@ class X509UtilitiesTest {
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `create valid server certificate chain`() {
|
||||
certChainSchemeCombinations.filter{ !JavaVersion.isVersionAtLeast(JavaVersion.Java_11) || it.first != SPHINCS256_SHA256 }
|
||||
certChainSchemeCombinations.filter{ it.first != SPHINCS256_SHA256 }
|
||||
.forEach { createValidServerCertChain(it.first, it.second) }
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,108 @@
|
||||
package net.corda.nodeapitests.internal.persistence
|
||||
|
||||
import net.corda.core.contracts.UniqueIdentifier
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.schemas.PersistentState
|
||||
import net.corda.core.schemas.PersistentStateRef
|
||||
import net.corda.node.internal.DataSourceFactory
|
||||
import net.corda.node.internal.startHikariPool
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseMigrationException
|
||||
import net.corda.nodeapi.internal.persistence.HibernateSchemaChangeException
|
||||
import net.corda.nodeapi.internal.persistence.SchemaMigration
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.TestIdentity
|
||||
import net.corda.testing.internal.TestingNamedCacheFactory
|
||||
import net.corda.testing.node.MockServices
|
||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
import java.util.*
|
||||
import javax.persistence.Column
|
||||
import javax.persistence.Entity
|
||||
import javax.persistence.Table
|
||||
import javax.sql.DataSource
|
||||
|
||||
class MigrationSchemaSyncTest{
|
||||
object TestSchemaFamily
|
||||
|
||||
object GoodSchema : MappedSchema(schemaFamily = TestSchemaFamily.javaClass, version = 1, mappedTypes = listOf(State::class.java)) {
|
||||
@Entity
|
||||
@Table(name = "State")
|
||||
class State(
|
||||
@Column
|
||||
var id: String
|
||||
) : PersistentState(PersistentStateRef(UniqueIdentifier().toString(), 0 ))
|
||||
|
||||
override val migrationResource: String? = "goodschema.testmigration"
|
||||
}
|
||||
|
||||
lateinit var hikariProperties: Properties
|
||||
lateinit var dataSource: DataSource
|
||||
|
||||
@Before
|
||||
fun setUp() {
|
||||
hikariProperties = MockServices.makeTestDataSourceProperties()
|
||||
dataSource = DataSourceFactory.createDataSource(hikariProperties)
|
||||
}
|
||||
|
||||
private fun schemaMigration() = SchemaMigration(dataSource, null, null,
|
||||
TestIdentity(ALICE_NAME, 70).name)
|
||||
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun testSchemaScript(){
|
||||
schemaMigration().runMigration(false, setOf(GoodSchema), true)
|
||||
val persistence = CordaPersistence(
|
||||
false,
|
||||
setOf(GoodSchema),
|
||||
hikariProperties.getProperty("dataSource.url"),
|
||||
TestingNamedCacheFactory()
|
||||
)
|
||||
persistence.startHikariPool(hikariProperties){ _, _ -> Unit}
|
||||
|
||||
persistence.transaction {
|
||||
this.entityManager.persist(GoodSchema.State("id"))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun checkThatSchemaSyncFixesLiquibaseException(){
|
||||
// Schema is missing if no migration is run and hibernate not allowed to create
|
||||
val persistenceBlank = CordaPersistence(
|
||||
false,
|
||||
setOf(GoodSchema),
|
||||
hikariProperties.getProperty("dataSource.url"),
|
||||
TestingNamedCacheFactory()
|
||||
)
|
||||
persistenceBlank.startHikariPool(hikariProperties){ _, _ -> Unit}
|
||||
assertThatThrownBy{ persistenceBlank.transaction {this.entityManager.persist(GoodSchema.State("id"))}}
|
||||
.isInstanceOf(HibernateSchemaChangeException::class.java)
|
||||
.hasMessageContaining("Incompatible schema")
|
||||
|
||||
// create schema via hibernate - now schema gets created and we can write
|
||||
val persistenceHibernate = CordaPersistence(
|
||||
false,
|
||||
setOf(GoodSchema),
|
||||
hikariProperties.getProperty("dataSource.url"),
|
||||
TestingNamedCacheFactory(),
|
||||
allowHibernateToManageAppSchema = true
|
||||
)
|
||||
persistenceHibernate.startHikariPool(hikariProperties){ _, _ -> Unit}
|
||||
persistenceHibernate.transaction { entityManager.persist(GoodSchema.State("id_hibernate")) }
|
||||
|
||||
// if we try to run schema migration now, the changelog and the schemas are out of sync
|
||||
assertThatThrownBy { schemaMigration().runMigration(false, setOf(GoodSchema), true) }
|
||||
.isInstanceOf(DatabaseMigrationException::class.java)
|
||||
.hasMessageContaining("Table \"STATE\" already exists")
|
||||
|
||||
// update the change log with schemas we know exist
|
||||
schemaMigration().synchroniseSchemas(setOf(GoodSchema), true)
|
||||
|
||||
// now run migration runs clean
|
||||
schemaMigration().runMigration(false, setOf(GoodSchema), true)
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -2,12 +2,11 @@ package net.corda.nodeapitests.internal.persistence
|
||||
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.schemas.PersistentState
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import net.corda.nodeapi.internal.persistence.MissingMigrationException
|
||||
import net.corda.nodeapi.internal.persistence.SchemaMigration
|
||||
import net.corda.node.internal.DataSourceFactory
|
||||
import net.corda.node.services.persistence.DBCheckpointStorage
|
||||
import net.corda.node.services.schema.NodeSchemaService
|
||||
import net.corda.nodeapi.internal.persistence.MissingMigrationException
|
||||
import net.corda.nodeapi.internal.persistence.SchemaMigration
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.TestIdentity
|
||||
import net.corda.testing.node.MockServices
|
||||
@ -40,25 +39,21 @@ class MissingSchemaMigrationTest {
|
||||
dataSource = DataSourceFactory.createDataSource(hikariProperties)
|
||||
}
|
||||
|
||||
private fun createSchemaMigration(schemasToMigrate: Set<MappedSchema>, forceThrowOnMissingMigration: Boolean): SchemaMigration {
|
||||
val databaseConfig = DatabaseConfig()
|
||||
return SchemaMigration(schemasToMigrate, dataSource, databaseConfig, null, null,
|
||||
TestIdentity(ALICE_NAME, 70).name, forceThrowOnMissingMigration)
|
||||
}
|
||||
private fun schemaMigration() = SchemaMigration(dataSource, null, null,
|
||||
TestIdentity(ALICE_NAME, 70).name)
|
||||
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `test that an error is thrown when forceThrowOnMissingMigration is set and a mapped schema is missing a migration`() {
|
||||
assertThatThrownBy {
|
||||
createSchemaMigration(setOf(GoodSchema), true)
|
||||
.nodeStartup(dataSource.connection.use { DBCheckpointStorage.getCheckpointCount(it) != 0L })
|
||||
schemaMigration().runMigration(dataSource.connection.use { DBCheckpointStorage.getCheckpointCount(it) != 0L }, setOf(GoodSchema), true)
|
||||
}.isInstanceOf(MissingMigrationException::class.java)
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `test that an error is not thrown when forceThrowOnMissingMigration is not set and a mapped schema is missing a migration`() {
|
||||
assertDoesNotThrow {
|
||||
createSchemaMigration(setOf(GoodSchema), false)
|
||||
.nodeStartup(dataSource.connection.use { DBCheckpointStorage.getCheckpointCount(it) != 0L })
|
||||
schemaMigration().runMigration(dataSource.connection.use { DBCheckpointStorage.getCheckpointCount(it) != 0L }, setOf(GoodSchema), false)
|
||||
}
|
||||
}
|
||||
|
||||
@ -66,8 +61,7 @@ class MissingSchemaMigrationTest {
|
||||
fun `test that there are no missing migrations for the node`() {
|
||||
assertDoesNotThrow("This test failure indicates " +
|
||||
"a new table has been added to the node without the appropriate migration scripts being present") {
|
||||
createSchemaMigration(NodeSchemaService().internalSchemas(), false)
|
||||
.nodeStartup(dataSource.connection.use { DBCheckpointStorage.getCheckpointCount(it) != 0L })
|
||||
schemaMigration().runMigration(dataSource.connection.use { DBCheckpointStorage.getCheckpointCount(it) != 0L }, NodeSchemaService().internalSchemas, true)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,19 @@
|
||||
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
|
||||
xmlns:ext="http://www.liquibase.org/xml/ns/dbchangelog-ext"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog-ext http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-ext.xsd http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd"
|
||||
logicalFilePath="migration/node-services.changelog-init.xml">
|
||||
|
||||
<changeSet author="R3.Corda" id="unittest-goodschema-v1">
|
||||
<createTable tableName="State">
|
||||
<column name="output_index" type="INT">
|
||||
<constraints nullable="false"/>
|
||||
</column>
|
||||
<column name="transaction_id" type="NVARCHAR(64)">
|
||||
<constraints nullable="false"/>
|
||||
</column>
|
||||
<column name="id" type="NVARCHAR(255)"/>
|
||||
</createTable>
|
||||
</changeSet>
|
||||
</databaseChangeLog>
|
@ -75,6 +75,15 @@ constructor(private val initSerEnv: Boolean,
|
||||
"generate-node-info"
|
||||
)
|
||||
|
||||
private val createSchemasCmd = listOf(
|
||||
Paths.get(System.getProperty("java.home"), "bin", "java").toString(),
|
||||
"-jar",
|
||||
"corda.jar",
|
||||
"run-migration-scripts",
|
||||
"--core-schemas",
|
||||
"--app-schemas"
|
||||
)
|
||||
|
||||
private const val LOGS_DIR_NAME = "logs"
|
||||
|
||||
private val jarsThatArentCordapps = setOf("corda.jar", "runnodes.jar")
|
||||
@ -92,7 +101,9 @@ constructor(private val initSerEnv: Boolean,
|
||||
}
|
||||
val executor = Executors.newFixedThreadPool(numParallelProcesses)
|
||||
return try {
|
||||
nodeDirs.map { executor.fork { generateNodeInfo(it) } }.transpose().getOrThrow()
|
||||
nodeDirs.map { executor.fork {
|
||||
createDbSchemas(it)
|
||||
generateNodeInfo(it) } }.transpose().getOrThrow()
|
||||
} finally {
|
||||
warningTimer.cancel()
|
||||
executor.shutdownNow()
|
||||
@ -100,23 +111,31 @@ constructor(private val initSerEnv: Boolean,
|
||||
}
|
||||
|
||||
private fun generateNodeInfo(nodeDir: Path): Path {
|
||||
runNodeJob(nodeInfoGenCmd, nodeDir, "node-info-gen.log")
|
||||
return nodeDir.list { paths ->
|
||||
paths.filter { it.fileName.toString().startsWith(NODE_INFO_FILE_NAME_PREFIX) }.findFirst().get()
|
||||
}
|
||||
}
|
||||
|
||||
private fun createDbSchemas(nodeDir: Path) {
|
||||
runNodeJob(createSchemasCmd, nodeDir, "node-run-migration.log")
|
||||
}
|
||||
|
||||
private fun runNodeJob(command: List<String>, nodeDir: Path, logfileName: String) {
|
||||
val logsDir = (nodeDir / LOGS_DIR_NAME).createDirectories()
|
||||
val nodeInfoGenFile = (logsDir / "node-info-gen.log").toFile()
|
||||
val process = ProcessBuilder(nodeInfoGenCmd)
|
||||
val nodeRedirectFile = (logsDir / logfileName).toFile()
|
||||
val process = ProcessBuilder(command)
|
||||
.directory(nodeDir.toFile())
|
||||
.redirectErrorStream(true)
|
||||
.redirectOutput(nodeInfoGenFile)
|
||||
.redirectOutput(nodeRedirectFile)
|
||||
.apply { environment()["CAPSULE_CACHE_DIR"] = "../.cache" }
|
||||
.start()
|
||||
try {
|
||||
if (!process.waitFor(3, TimeUnit.MINUTES)) {
|
||||
process.destroyForcibly()
|
||||
printNodeInfoGenLogToConsole(nodeInfoGenFile)
|
||||
}
|
||||
printNodeInfoGenLogToConsole(nodeInfoGenFile) { process.exitValue() == 0 }
|
||||
return nodeDir.list { paths ->
|
||||
paths.filter { it.fileName.toString().startsWith(NODE_INFO_FILE_NAME_PREFIX) }.findFirst().get()
|
||||
printNodeOutputToConsoleAndThrow(nodeRedirectFile)
|
||||
}
|
||||
if (process.exitValue() != 0) printNodeOutputToConsoleAndThrow(nodeRedirectFile)
|
||||
} catch (e: InterruptedException) {
|
||||
// Don't leave this process dangling if the thread is interrupted.
|
||||
process.destroyForcibly()
|
||||
@ -124,18 +143,16 @@ constructor(private val initSerEnv: Boolean,
|
||||
}
|
||||
}
|
||||
|
||||
private fun printNodeInfoGenLogToConsole(nodeInfoGenFile: File, check: (() -> Boolean) = { true }) {
|
||||
if (!check.invoke()) {
|
||||
val nodeDir = nodeInfoGenFile.parent
|
||||
val nodeIdentifier = try {
|
||||
ConfigFactory.parseFile((nodeDir / "node.conf").toFile()).getString("myLegalName")
|
||||
} catch (e: ConfigException) {
|
||||
nodeDir
|
||||
}
|
||||
System.err.println("#### Error while generating node info file $nodeIdentifier ####")
|
||||
nodeInfoGenFile.inputStream().copyTo(System.err)
|
||||
throw IllegalStateException("Error while generating node info file. Please check the logs in $nodeDir.")
|
||||
private fun printNodeOutputToConsoleAndThrow(stdoutFile: File) {
|
||||
val nodeDir = stdoutFile.parent
|
||||
val nodeIdentifier = try {
|
||||
ConfigFactory.parseFile((nodeDir / "node.conf").toFile()).getString("myLegalName")
|
||||
} catch (e: ConfigException) {
|
||||
nodeDir
|
||||
}
|
||||
System.err.println("#### Error while generating node info file $nodeIdentifier ####")
|
||||
stdoutFile.inputStream().copyTo(System.err)
|
||||
throw IllegalStateException("Error while generating node info file. Please check the logs in $nodeDir.")
|
||||
}
|
||||
|
||||
const val DEFAULT_MAX_MESSAGE_SIZE: Int = 10485760
|
||||
|
@ -31,24 +31,12 @@ import javax.sql.DataSource
|
||||
*/
|
||||
const val NODE_DATABASE_PREFIX = "node_"
|
||||
|
||||
enum class SchemaInitializationType{
|
||||
NONE,
|
||||
VALIDATE,
|
||||
UPDATE
|
||||
}
|
||||
|
||||
// This class forms part of the node config and so any changes to it must be handled with care
|
||||
data class DatabaseConfig(
|
||||
val initialiseSchema: Boolean = Defaults.initialiseSchema,
|
||||
val initialiseAppSchema: SchemaInitializationType = Defaults.initialiseAppSchema,
|
||||
val transactionIsolationLevel: TransactionIsolationLevel = Defaults.transactionIsolationLevel,
|
||||
val exportHibernateJMXStatistics: Boolean = Defaults.exportHibernateJMXStatistics,
|
||||
val mappedSchemaCacheSize: Long = Defaults.mappedSchemaCacheSize
|
||||
) {
|
||||
object Defaults {
|
||||
val initialiseSchema = true
|
||||
val initialiseAppSchema = SchemaInitializationType.UPDATE
|
||||
val transactionIsolationLevel = TransactionIsolationLevel.REPEATABLE_READ
|
||||
val exportHibernateJMXStatistics = false
|
||||
val mappedSchemaCacheSize = 100L
|
||||
}
|
||||
@ -67,6 +55,10 @@ enum class TransactionIsolationLevel {
|
||||
*/
|
||||
val jdbcString = "TRANSACTION_$name"
|
||||
val jdbcValue: Int = java.sql.Connection::class.java.getField(jdbcString).get(null) as Int
|
||||
|
||||
companion object{
|
||||
val default = READ_COMMITTED
|
||||
}
|
||||
}
|
||||
|
||||
internal val _prohibitDatabaseAccess = ThreadLocal.withInitial { false }
|
||||
@ -96,27 +88,28 @@ fun <T> withoutDatabaseAccess(block: () -> T): T {
|
||||
val contextDatabaseOrNull: CordaPersistence? get() = _contextDatabase.get()
|
||||
|
||||
class CordaPersistence(
|
||||
databaseConfig: DatabaseConfig,
|
||||
exportHibernateJMXStatistics: Boolean,
|
||||
schemas: Set<MappedSchema>,
|
||||
val jdbcUrl: String,
|
||||
cacheFactory: NamedCacheFactory,
|
||||
attributeConverters: Collection<AttributeConverter<*, *>> = emptySet(),
|
||||
customClassLoader: ClassLoader? = null,
|
||||
val closeConnection: Boolean = true,
|
||||
val errorHandler: DatabaseTransaction.(e: Exception) -> Unit = {}
|
||||
val errorHandler: DatabaseTransaction.(e: Exception) -> Unit = {},
|
||||
allowHibernateToManageAppSchema: Boolean = false
|
||||
) : Closeable {
|
||||
companion object {
|
||||
private val log = contextLogger()
|
||||
}
|
||||
|
||||
private val defaultIsolationLevel = databaseConfig.transactionIsolationLevel
|
||||
private val defaultIsolationLevel = TransactionIsolationLevel.default
|
||||
val hibernateConfig: HibernateConfiguration by lazy {
|
||||
transaction {
|
||||
try {
|
||||
HibernateConfiguration(schemas, databaseConfig, attributeConverters, jdbcUrl, cacheFactory, customClassLoader)
|
||||
HibernateConfiguration(schemas, exportHibernateJMXStatistics, attributeConverters, jdbcUrl, cacheFactory, customClassLoader, allowHibernateToManageAppSchema)
|
||||
} catch (e: Exception) {
|
||||
when (e) {
|
||||
is SchemaManagementException -> throw HibernateSchemaChangeException("Incompatible schema change detected. Please run the node with database.initialiseSchema=true. Reason: ${e.message}", e)
|
||||
is SchemaManagementException -> throw HibernateSchemaChangeException("Incompatible schema change detected. Please run schema migration scripts (node with sub-command run-migration-scripts). Reason: ${e.message}", e)
|
||||
else -> throw HibernateConfigException("Could not create Hibernate configuration: ${e.message}", e)
|
||||
}
|
||||
}
|
||||
|
@ -19,11 +19,12 @@ import javax.persistence.AttributeConverter
|
||||
|
||||
class HibernateConfiguration(
|
||||
schemas: Set<MappedSchema>,
|
||||
private val databaseConfig: DatabaseConfig,
|
||||
private val exportHibernateJMXStatistics: Boolean,
|
||||
private val attributeConverters: Collection<AttributeConverter<*, *>>,
|
||||
jdbcUrl: String,
|
||||
cacheFactory: NamedCacheFactory,
|
||||
val customClassLoader: ClassLoader? = null
|
||||
val customClassLoader: ClassLoader? = null,
|
||||
val allowHibernateToManageAppSchema: Boolean = false
|
||||
) {
|
||||
companion object {
|
||||
private val logger = contextLogger()
|
||||
@ -64,10 +65,10 @@ class HibernateConfiguration(
|
||||
fun sessionFactoryForSchemas(key: Set<MappedSchema>): SessionFactory = sessionFactories.get(key, ::makeSessionFactoryForSchemas)!!
|
||||
|
||||
private fun makeSessionFactoryForSchemas(schemas: Set<MappedSchema>): SessionFactory {
|
||||
val sessionFactory = sessionFactoryFactory.makeSessionFactoryForSchemas(databaseConfig, schemas, customClassLoader, attributeConverters)
|
||||
val sessionFactory = sessionFactoryFactory.makeSessionFactoryForSchemas(schemas, customClassLoader, attributeConverters, allowHibernateToManageAppSchema)
|
||||
|
||||
// export Hibernate JMX statistics
|
||||
if (databaseConfig.exportHibernateJMXStatistics)
|
||||
if (exportHibernateJMXStatistics)
|
||||
initStatistics(sessionFactory)
|
||||
|
||||
return sessionFactory
|
||||
@ -75,7 +76,7 @@ class HibernateConfiguration(
|
||||
|
||||
// NOTE: workaround suggested to overcome deprecation of StatisticsService (since Hibernate v4.0)
|
||||
// https://stackoverflow.com/questions/23606092/hibernate-upgrade-statisticsservice
|
||||
fun initStatistics(sessionFactory: SessionFactory) {
|
||||
private fun initStatistics(sessionFactory: SessionFactory) {
|
||||
val statsName = ObjectName("org.hibernate:type=statistics")
|
||||
val mbeanServer = ManagementFactory.getPlatformMBeanServer()
|
||||
|
||||
|
@ -0,0 +1,8 @@
|
||||
package net.corda.nodeapi.internal.persistence
|
||||
|
||||
import liquibase.database.Database
|
||||
import liquibase.database.jvm.JdbcConnection
|
||||
|
||||
interface LiquibaseDatabaseFactory {
|
||||
fun getLiquibaseDatabase(conn: JdbcConnection): Database
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
package net.corda.nodeapi.internal.persistence
|
||||
|
||||
import liquibase.database.Database
|
||||
import liquibase.database.DatabaseFactory
|
||||
import liquibase.database.jvm.JdbcConnection
|
||||
|
||||
class LiquibaseDatabaseFactoryImpl : LiquibaseDatabaseFactory {
|
||||
override fun getLiquibaseDatabase(conn: JdbcConnection): Database {
|
||||
return DatabaseFactory.getInstance().findCorrectDatabaseImplementation(conn)
|
||||
}
|
||||
}
|
@ -4,44 +4,40 @@ import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import liquibase.Contexts
|
||||
import liquibase.LabelExpression
|
||||
import liquibase.Liquibase
|
||||
import liquibase.database.Database
|
||||
import liquibase.database.DatabaseFactory
|
||||
import liquibase.database.jvm.JdbcConnection
|
||||
import liquibase.exception.LiquibaseException
|
||||
import liquibase.resource.ClassLoaderResourceAccessor
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.nodeapi.internal.MigrationHelpers.getMigrationResource
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.nodeapi.internal.MigrationHelpers.getMigrationResource
|
||||
import net.corda.nodeapi.internal.cordapp.CordappLoader
|
||||
import java.io.ByteArrayInputStream
|
||||
import java.io.InputStream
|
||||
import java.nio.file.Path
|
||||
import java.sql.Statement
|
||||
import javax.sql.DataSource
|
||||
import java.sql.Connection
|
||||
import java.util.concurrent.locks.ReentrantLock
|
||||
import javax.sql.DataSource
|
||||
import kotlin.concurrent.withLock
|
||||
|
||||
// Migrate the database to the current version, using liquibase.
|
||||
class SchemaMigration(
|
||||
val schemas: Set<MappedSchema>,
|
||||
open class SchemaMigration(
|
||||
val dataSource: DataSource,
|
||||
private val databaseConfig: DatabaseConfig,
|
||||
cordappLoader: CordappLoader? = null,
|
||||
private val currentDirectory: Path?,
|
||||
// This parameter is used by the vault state migration to establish what the node's legal identity is when setting up
|
||||
// its copy of the identity service. It is passed through using a system property. When multiple identity support is added, this will need
|
||||
// reworking so that multiple identities can be passed to the migration.
|
||||
private val ourName: CordaX500Name? = null,
|
||||
// This parameter forces an error to be thrown if there are missing migrations. When using H2, Hibernate will automatically create schemas where they are
|
||||
// missing, so no need to throw unless you're specifically testing whether all the migrations are present.
|
||||
private val forceThrowOnMissingMigration: Boolean = false) {
|
||||
protected val databaseFactory: LiquibaseDatabaseFactory = LiquibaseDatabaseFactoryImpl()) {
|
||||
|
||||
companion object {
|
||||
private val logger = contextLogger()
|
||||
const val NODE_BASE_DIR_KEY = "liquibase.nodeDaseDir"
|
||||
const val NODE_X500_NAME = "liquibase.nodeName"
|
||||
val loader = ThreadLocal<CordappLoader>()
|
||||
private val mutex = ReentrantLock()
|
||||
@JvmStatic
|
||||
protected val mutex = ReentrantLock()
|
||||
}
|
||||
|
||||
init {
|
||||
@ -50,36 +46,86 @@ class SchemaMigration(
|
||||
|
||||
private val classLoader = cordappLoader?.appClassLoader ?: Thread.currentThread().contextClassLoader
|
||||
|
||||
/**
|
||||
* Main entry point to the schema migration.
|
||||
* Called during node startup.
|
||||
/**
|
||||
* Will run the Liquibase migration on the actual database.
|
||||
* @param existingCheckpoints Whether checkpoints exist that would prohibit running a migration
|
||||
* @param schemas The set of MappedSchemas to check
|
||||
* @param forceThrowOnMissingMigration throws an exception if a mapped schema is missing the migration resource. Can be set to false
|
||||
* when allowing hibernate to create missing schemas in dev or tests.
|
||||
*/
|
||||
fun nodeStartup(existingCheckpoints: Boolean) {
|
||||
when {
|
||||
databaseConfig.initialiseSchema -> {
|
||||
migrateOlderDatabaseToUseLiquibase(existingCheckpoints)
|
||||
runMigration(existingCheckpoints)
|
||||
fun runMigration(existingCheckpoints: Boolean, schemas: Set<MappedSchema>, forceThrowOnMissingMigration: Boolean) {
|
||||
val resourcesAndSourceInfo = prepareResources(schemas, forceThrowOnMissingMigration)
|
||||
|
||||
// current version of Liquibase appears to be non-threadsafe
|
||||
// this is apparent when multiple in-process nodes are all running migrations simultaneously
|
||||
mutex.withLock {
|
||||
dataSource.connection.use { connection ->
|
||||
val (runner, _, shouldBlockOnCheckpoints) = prepareRunner(connection, resourcesAndSourceInfo)
|
||||
if (shouldBlockOnCheckpoints && existingCheckpoints)
|
||||
throw CheckpointsException()
|
||||
try {
|
||||
runner.update(Contexts().toString())
|
||||
} catch (exp: LiquibaseException) {
|
||||
throw DatabaseMigrationException(exp.message, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures that the database is up to date with the latest migration changes.
|
||||
* @param schemas The set of MappedSchemas to check
|
||||
* @param forceThrowOnMissingMigration throws an exception if a mapped schema is missing the migration resource. Can be set to false
|
||||
* when allowing hibernate to create missing schemas in dev or tests.
|
||||
*/
|
||||
fun checkState(schemas: Set<MappedSchema>, forceThrowOnMissingMigration: Boolean) {
|
||||
val resourcesAndSourceInfo = prepareResources(schemas, forceThrowOnMissingMigration)
|
||||
|
||||
// current version of Liquibase appears to be non-threadsafe
|
||||
// this is apparent when multiple in-process nodes are all running migrations simultaneously
|
||||
mutex.withLock {
|
||||
dataSource.connection.use { connection ->
|
||||
val (_, changeToRunCount, _) = prepareRunner(connection, resourcesAndSourceInfo)
|
||||
if (changeToRunCount > 0)
|
||||
throw OutstandingDatabaseChangesException(changeToRunCount)
|
||||
}
|
||||
else -> checkState()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Will run the Liquibase migration on the actual database.
|
||||
* Synchronises the changelog table with the schema descriptions passed in without applying any of the changes to the database.
|
||||
* This can be used when migrating a CorDapp that had its schema generated by hibernate to liquibase schema migration, or when
|
||||
* updating from a version of Corda that does not use liquibase for CorDapps
|
||||
* **Warning** - this will not check if the matching schema changes have been applied, it will just generate the changelog
|
||||
* It must not be run on a newly installed CorDapp.
|
||||
* @param schemas The set of schemas to add to the changelog
|
||||
* @param forceThrowOnMissingMigration throw an exception if a mapped schema is missing its migration resource
|
||||
*/
|
||||
private fun runMigration(existingCheckpoints: Boolean) = doRunMigration(run = true, check = false, existingCheckpoints = existingCheckpoints)
|
||||
fun synchroniseSchemas(schemas: Set<MappedSchema>, forceThrowOnMissingMigration: Boolean) {
|
||||
val resourcesAndSourceInfo = prepareResources(schemas, forceThrowOnMissingMigration)
|
||||
|
||||
/**
|
||||
* Ensures that the database is up to date with the latest migration changes.
|
||||
*/
|
||||
private fun checkState() = doRunMigration(run = false, check = true)
|
||||
// current version of Liquibase appears to be non-threadsafe
|
||||
// this is apparent when multiple in-process nodes are all running migrations simultaneously
|
||||
mutex.withLock {
|
||||
dataSource.connection.use { connection ->
|
||||
val (runner, _, _) = prepareRunner(connection, resourcesAndSourceInfo)
|
||||
try {
|
||||
runner.changeLogSync(Contexts().toString())
|
||||
} catch (exp: LiquibaseException) {
|
||||
throw DatabaseMigrationException(exp.message, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Create a resourse accessor that aggregates the changelogs included in the schemas into one dynamic stream. */
|
||||
private class CustomResourceAccessor(val dynamicInclude: String, val changelogList: List<String?>, classLoader: ClassLoader) : ClassLoaderResourceAccessor(classLoader) {
|
||||
/** Create a resource accessor that aggregates the changelogs included in the schemas into one dynamic stream. */
|
||||
protected class CustomResourceAccessor(val dynamicInclude: String, val changelogList: List<String?>, classLoader: ClassLoader) :
|
||||
ClassLoaderResourceAccessor(classLoader) {
|
||||
override fun getResourcesAsStream(path: String): Set<InputStream> {
|
||||
if (path == dynamicInclude) {
|
||||
// Create a map in Liquibase format including all migration files.
|
||||
val includeAllFiles = mapOf("databaseChangeLog" to changelogList.filter { it != null }.map { file -> mapOf("include" to mapOf("file" to file)) })
|
||||
val includeAllFiles = mapOf("databaseChangeLog"
|
||||
to changelogList.filterNotNull().map { file -> mapOf("include" to mapOf("file" to file)) })
|
||||
|
||||
// Transform it to json.
|
||||
val includeAllFilesJson = ObjectMapper().writeValueAsBytes(includeAllFiles)
|
||||
@ -91,7 +137,7 @@ class SchemaMigration(
|
||||
}
|
||||
}
|
||||
|
||||
private fun logOrThrowMigrationError(mappedSchema: MappedSchema): String? =
|
||||
private fun logOrThrowMigrationError(mappedSchema: MappedSchema, forceThrowOnMissingMigration: Boolean): String? =
|
||||
if (forceThrowOnMissingMigration) {
|
||||
throw MissingMigrationException(mappedSchema)
|
||||
} else {
|
||||
@ -99,143 +145,38 @@ class SchemaMigration(
|
||||
null
|
||||
}
|
||||
|
||||
private fun doRunMigration(
|
||||
run: Boolean,
|
||||
check: Boolean,
|
||||
existingCheckpoints: Boolean? = null
|
||||
) {
|
||||
// Virtual file name of the changelog that includes all schemas.
|
||||
val dynamicInclude = "master.changelog.json"
|
||||
|
||||
// Virtual file name of the changelog that includes all schemas.
|
||||
val dynamicInclude = "master.changelog.json"
|
||||
|
||||
dataSource.connection.use { connection ->
|
||||
|
||||
// Collect all changelog files referenced in the included schemas.
|
||||
val changelogList = schemas.mapNotNull { mappedSchema ->
|
||||
val resource = getMigrationResource(mappedSchema, classLoader)
|
||||
when {
|
||||
resource != null -> resource
|
||||
// Corda OS FinanceApp in v3 has no Liquibase script, so no error is raised
|
||||
(mappedSchema::class.qualifiedName == "net.corda.finance.schemas.CashSchemaV1" || mappedSchema::class.qualifiedName == "net.corda.finance.schemas.CommercialPaperSchemaV1") && mappedSchema.migrationResource == null -> null
|
||||
else -> logOrThrowMigrationError(mappedSchema)
|
||||
}
|
||||
}
|
||||
|
||||
val path = currentDirectory?.toString()
|
||||
if (path != null) {
|
||||
System.setProperty(NODE_BASE_DIR_KEY, path) // base dir for any custom change set which may need to load a file (currently AttachmentVersionNumberMigration)
|
||||
}
|
||||
if (ourName != null) {
|
||||
System.setProperty(NODE_X500_NAME, ourName.toString())
|
||||
}
|
||||
val customResourceAccessor = CustomResourceAccessor(dynamicInclude, changelogList, classLoader)
|
||||
checkResourcesInClassPath(changelogList)
|
||||
|
||||
// current version of Liquibase appears to be non-threadsafe
|
||||
// this is apparent when multiple in-process nodes are all running migrations simultaneously
|
||||
mutex.withLock {
|
||||
val liquibase = Liquibase(dynamicInclude, customResourceAccessor, getLiquibaseDatabase(JdbcConnection(connection)))
|
||||
|
||||
val unRunChanges = liquibase.listUnrunChangeSets(Contexts(), LabelExpression())
|
||||
|
||||
when {
|
||||
(run && !check) && (unRunChanges.isNotEmpty() && existingCheckpoints!!) -> throw CheckpointsException() // Do not allow database migration when there are checkpoints
|
||||
run && !check -> liquibase.update(Contexts())
|
||||
check && !run && unRunChanges.isNotEmpty() -> throw OutstandingDatabaseChangesException(unRunChanges.size)
|
||||
check && !run -> {
|
||||
} // Do nothing will be interpreted as "check succeeded"
|
||||
else -> throw IllegalStateException("Invalid usage.")
|
||||
}
|
||||
protected fun prepareResources(schemas: Set<MappedSchema>, forceThrowOnMissingMigration: Boolean): List<Pair<CustomResourceAccessor, String>> {
|
||||
// Collect all changelog files referenced in the included schemas.
|
||||
val changelogList = schemas.mapNotNull { mappedSchema ->
|
||||
val resource = getMigrationResource(mappedSchema, classLoader)
|
||||
when {
|
||||
resource != null -> resource
|
||||
else -> logOrThrowMigrationError(mappedSchema, forceThrowOnMissingMigration)
|
||||
}
|
||||
}
|
||||
|
||||
val path = currentDirectory?.toString()
|
||||
if (path != null) {
|
||||
System.setProperty(NODE_BASE_DIR_KEY, path) // base dir for any custom change set which may need to load a file (currently AttachmentVersionNumberMigration)
|
||||
}
|
||||
if (ourName != null) {
|
||||
System.setProperty(NODE_X500_NAME, ourName.toString())
|
||||
}
|
||||
val customResourceAccessor = CustomResourceAccessor(dynamicInclude, changelogList, classLoader)
|
||||
checkResourcesInClassPath(changelogList)
|
||||
return listOf(Pair(customResourceAccessor, ""))
|
||||
}
|
||||
|
||||
private fun getLiquibaseDatabase(conn: JdbcConnection): Database {
|
||||
return DatabaseFactory.getInstance().findCorrectDatabaseImplementation(conn)
|
||||
}
|
||||
protected fun prepareRunner(connection: Connection,
|
||||
resourcesAndSourceInfo: List<Pair<CustomResourceAccessor, String>>): Triple<Liquibase, Int, Boolean> {
|
||||
require(resourcesAndSourceInfo.size == 1)
|
||||
val liquibase = Liquibase(dynamicInclude, resourcesAndSourceInfo.single().first, databaseFactory.getLiquibaseDatabase(JdbcConnection(connection)))
|
||||
|
||||
/** For existing database created before verions 4.0 add Liquibase support - creates DATABASECHANGELOG and DATABASECHANGELOGLOCK tables and marks changesets as executed. */
|
||||
private fun migrateOlderDatabaseToUseLiquibase(existingCheckpoints: Boolean): Boolean {
|
||||
val isFinanceAppWithLiquibase = schemas.any { schema ->
|
||||
(schema::class.qualifiedName == "net.corda.finance.schemas.CashSchemaV1"
|
||||
|| schema::class.qualifiedName == "net.corda.finance.schemas.CommercialPaperSchemaV1")
|
||||
&& schema.migrationResource != null
|
||||
}
|
||||
val noLiquibaseEntryLogForFinanceApp: (Statement) -> Boolean = {
|
||||
it.execute("SELECT COUNT(*) FROM DATABASECHANGELOG WHERE FILENAME IN ('migration/cash.changelog-init.xml','migration/commercial-paper.changelog-init.xml')")
|
||||
if (it.resultSet.next())
|
||||
it.resultSet.getInt(1) == 0
|
||||
else
|
||||
true
|
||||
}
|
||||
|
||||
val (isExistingDBWithoutLiquibase, isFinanceAppWithLiquibaseNotMigrated) = dataSource.connection.use {
|
||||
|
||||
val existingDatabase = it.metaData.getTables(null, null, "NODE%", null).next()
|
||||
// Lower case names for PostgreSQL
|
||||
|| it.metaData.getTables(null, null, "node%", null).next()
|
||||
|
||||
val hasLiquibase = it.metaData.getTables(null, null, "DATABASECHANGELOG%", null).next()
|
||||
// Lower case names for PostgreSQL
|
||||
|| it.metaData.getTables(null, null, "databasechangelog%", null).next()
|
||||
|
||||
val isFinanceAppWithLiquibaseNotMigrated = isFinanceAppWithLiquibase // If Finance App is pre v4.0 then no need to migrate it so no need to check.
|
||||
&& existingDatabase
|
||||
&& (!hasLiquibase // Migrate as other tables.
|
||||
|| (hasLiquibase && it.createStatement().use { noLiquibaseEntryLogForFinanceApp(it) })) // If Liquibase is already in the database check if Finance App schema log is missing.
|
||||
|
||||
Pair(existingDatabase && !hasLiquibase, isFinanceAppWithLiquibaseNotMigrated)
|
||||
}
|
||||
|
||||
if (isExistingDBWithoutLiquibase && existingCheckpoints)
|
||||
throw CheckpointsException()
|
||||
|
||||
// Schema migrations pre release 4.0
|
||||
val preV4Baseline = mutableListOf<String>()
|
||||
if (isExistingDBWithoutLiquibase) {
|
||||
preV4Baseline.addAll(listOf("migration/common.changelog-init.xml",
|
||||
"migration/node-info.changelog-init.xml",
|
||||
"migration/node-info.changelog-v1.xml",
|
||||
"migration/node-info.changelog-v2.xml",
|
||||
"migration/node-core.changelog-init.xml",
|
||||
"migration/node-core.changelog-v3.xml",
|
||||
"migration/node-core.changelog-v4.xml",
|
||||
"migration/node-core.changelog-v5.xml",
|
||||
"migration/node-core.changelog-pkey.xml",
|
||||
"migration/vault-schema.changelog-init.xml",
|
||||
"migration/vault-schema.changelog-v3.xml",
|
||||
"migration/vault-schema.changelog-v4.xml",
|
||||
"migration/vault-schema.changelog-pkey.xml"))
|
||||
|
||||
if (schemas.any { schema -> schema.migrationResource == "node-notary.changelog-master" })
|
||||
preV4Baseline.addAll(listOf("migration/node-notary.changelog-init.xml",
|
||||
"migration/node-notary.changelog-v1.xml"))
|
||||
|
||||
if (schemas.any { schema -> schema.migrationResource == "notary-raft.changelog-master" })
|
||||
preV4Baseline.addAll(listOf("migration/notary-raft.changelog-init.xml",
|
||||
"migration/notary-raft.changelog-v1.xml"))
|
||||
|
||||
if (schemas.any { schema -> schema.migrationResource == "notary-bft-smart.changelog-master" })
|
||||
preV4Baseline.addAll(listOf("migration/notary-bft-smart.changelog-init.xml",
|
||||
"migration/notary-bft-smart.changelog-v1.xml"))
|
||||
}
|
||||
if (isFinanceAppWithLiquibaseNotMigrated) {
|
||||
preV4Baseline.addAll(listOf("migration/cash.changelog-init.xml",
|
||||
"migration/cash.changelog-v1.xml",
|
||||
"migration/commercial-paper.changelog-init.xml",
|
||||
"migration/commercial-paper.changelog-v1.xml"))
|
||||
}
|
||||
|
||||
if (preV4Baseline.isNotEmpty()) {
|
||||
val dynamicInclude = "master.changelog.json" // Virtual file name of the changelog that includes all schemas.
|
||||
checkResourcesInClassPath(preV4Baseline)
|
||||
dataSource.connection.use { connection ->
|
||||
val customResourceAccessor = CustomResourceAccessor(dynamicInclude, preV4Baseline, classLoader)
|
||||
val liquibase = Liquibase(dynamicInclude, customResourceAccessor, getLiquibaseDatabase(JdbcConnection(connection)))
|
||||
liquibase.changeLogSync(Contexts(), LabelExpression())
|
||||
}
|
||||
}
|
||||
return isExistingDBWithoutLiquibase || isFinanceAppWithLiquibaseNotMigrated
|
||||
val unRunChanges = liquibase.listUnrunChangeSets(Contexts(), LabelExpression())
|
||||
return Triple(liquibase, unRunChanges.size, !unRunChanges.isEmpty())
|
||||
}
|
||||
|
||||
private fun checkResourcesInClassPath(resources: List<String?>) {
|
||||
@ -247,7 +188,7 @@ class SchemaMigration(
|
||||
}
|
||||
}
|
||||
|
||||
open class DatabaseMigrationException(message: String) : IllegalArgumentException(message) {
|
||||
open class DatabaseMigrationException(message: String?, cause: Throwable? = null) : IllegalArgumentException(message, cause) {
|
||||
override val message: String = super.message!!
|
||||
}
|
||||
|
||||
@ -269,6 +210,6 @@ class CheckpointsException : DatabaseMigrationException("Attempting to update th
|
||||
|
||||
class DatabaseIncompatibleException(@Suppress("MemberVisibilityCanBePrivate") private val reason: String) : DatabaseMigrationException(errorMessageFor(reason)) {
|
||||
internal companion object {
|
||||
fun errorMessageFor(reason: String): String = "Incompatible database schema version detected, please run the node with configuration option database.initialiseSchema=true. Reason: $reason"
|
||||
fun errorMessageFor(reason: String): String = "Incompatible database schema version detected, please run schema migration scripts (node with sub-command run-migration-scripts). Reason: $reason"
|
||||
}
|
||||
}
|
@ -3,9 +3,8 @@ package net.corda.nodeapi.internal.persistence.factory
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.toHexString
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import net.corda.nodeapi.internal.persistence.HibernateConfiguration
|
||||
import net.corda.nodeapi.internal.persistence.SchemaInitializationType
|
||||
import net.corda.nodeapi.internal.persistence.TransactionIsolationLevel
|
||||
import org.hibernate.SessionFactory
|
||||
import org.hibernate.boot.Metadata
|
||||
import org.hibernate.boot.MetadataBuilder
|
||||
@ -26,22 +25,19 @@ abstract class BaseSessionFactoryFactory : CordaSessionFactoryFactory {
|
||||
private val logger = contextLogger()
|
||||
}
|
||||
|
||||
open fun buildHibernateConfig(databaseConfig: DatabaseConfig, metadataSources: MetadataSources): Configuration {
|
||||
open fun buildHibernateConfig(metadataSources: MetadataSources, allowHibernateToManageAppSchema: Boolean): Configuration {
|
||||
val hbm2dll: String =
|
||||
if (databaseConfig.initialiseSchema && databaseConfig.initialiseAppSchema == SchemaInitializationType.UPDATE) {
|
||||
if (allowHibernateToManageAppSchema) {
|
||||
"update"
|
||||
} else if ((!databaseConfig.initialiseSchema && databaseConfig.initialiseAppSchema == SchemaInitializationType.UPDATE)
|
||||
|| databaseConfig.initialiseAppSchema == SchemaInitializationType.VALIDATE) {
|
||||
} else {
|
||||
"validate"
|
||||
} else {
|
||||
"none"
|
||||
}
|
||||
// We set a connection provider as the auto schema generation requires it. The auto schema generation will not
|
||||
// necessarily remain and would likely be replaced by something like Liquibase. For now it is very convenient though.
|
||||
return Configuration(metadataSources).setProperty("hibernate.connection.provider_class", HibernateConfiguration.NodeDatabaseConnectionProvider::class.java.name)
|
||||
.setProperty("hibernate.format_sql", "true")
|
||||
.setProperty("javax.persistence.validation.mode", "none")
|
||||
.setProperty("hibernate.connection.isolation", databaseConfig.transactionIsolationLevel.jdbcValue.toString())
|
||||
.setProperty("hibernate.connection.isolation", TransactionIsolationLevel.default.jdbcValue.toString())
|
||||
.setProperty("hibernate.hbm2ddl.auto", hbm2dll)
|
||||
.setProperty("hibernate.jdbc.time_zone", "UTC")
|
||||
}
|
||||
@ -85,15 +81,15 @@ abstract class BaseSessionFactoryFactory : CordaSessionFactoryFactory {
|
||||
}
|
||||
|
||||
final override fun makeSessionFactoryForSchemas(
|
||||
databaseConfig: DatabaseConfig,
|
||||
schemas: Set<MappedSchema>,
|
||||
customClassLoader: ClassLoader?,
|
||||
attributeConverters: Collection<AttributeConverter<*, *>>): SessionFactory {
|
||||
attributeConverters: Collection<AttributeConverter<*, *>>,
|
||||
allowHibernateToMananageAppSchema: Boolean): SessionFactory {
|
||||
logger.info("Creating session factory for schemas: $schemas")
|
||||
val serviceRegistry = BootstrapServiceRegistryBuilder().build()
|
||||
val metadataSources = MetadataSources(serviceRegistry)
|
||||
|
||||
val config = buildHibernateConfig(databaseConfig, metadataSources)
|
||||
val config = buildHibernateConfig(metadataSources, allowHibernateToMananageAppSchema)
|
||||
schemas.forEach { schema ->
|
||||
schema.mappedTypes.forEach { config.addAnnotatedClass(it) }
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
package net.corda.nodeapi.internal.persistence.factory
|
||||
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import org.hibernate.SessionFactory
|
||||
import org.hibernate.boot.Metadata
|
||||
import org.hibernate.boot.MetadataBuilder
|
||||
@ -11,10 +10,10 @@ interface CordaSessionFactoryFactory {
|
||||
val databaseType: String
|
||||
fun canHandleDatabase(jdbcUrl: String): Boolean
|
||||
fun makeSessionFactoryForSchemas(
|
||||
databaseConfig: DatabaseConfig,
|
||||
schemas: Set<MappedSchema>,
|
||||
customClassLoader: ClassLoader?,
|
||||
attributeConverters: Collection<AttributeConverter<*, *>>): SessionFactory
|
||||
attributeConverters: Collection<AttributeConverter<*, *>>,
|
||||
allowHibernateToMananageAppSchema: Boolean): SessionFactory
|
||||
fun getExtraConfiguration(key: String): Any?
|
||||
fun buildHibernateMetadata(metadataBuilder: MetadataBuilder, attributeConverters: Collection<AttributeConverter<*, *>>): Metadata
|
||||
}
|
@ -14,7 +14,7 @@ class HibernateConfigurationFactoryLoadingTest {
|
||||
val cacheFactory = mock<NamedCacheFactory>()
|
||||
HibernateConfiguration(
|
||||
emptySet(),
|
||||
DatabaseConfig(),
|
||||
false,
|
||||
emptyList(),
|
||||
jdbcUrl,
|
||||
cacheFactory)
|
||||
|
@ -1,32 +0,0 @@
|
||||
package net.corda.node.endurance
|
||||
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.BOB_NAME
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import org.junit.Test
|
||||
import org.junit.runner.RunWith
|
||||
import org.junit.runners.Parameterized
|
||||
|
||||
@RunWith(Parameterized::class)
|
||||
class NodesStartStopSingleVmTests(@Suppress("unused") private val iteration: Int) {
|
||||
|
||||
companion object {
|
||||
@JvmStatic
|
||||
@Parameterized.Parameters(name = "iteration = {0}")
|
||||
fun iterations(): Iterable<Array<Int>> {
|
||||
return (1..60).map { arrayOf(it) }
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun nodesStartStop() {
|
||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||
val alice = startNode(providedName = ALICE_NAME)
|
||||
val bob = startNode(providedName = BOB_NAME)
|
||||
alice.getOrThrow()
|
||||
bob.getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
@ -37,7 +37,8 @@ class FlowCheckpointVersionNodeStartupCheckTest {
|
||||
startNodesInProcess = false,
|
||||
inMemoryDB = false, // Ensure database is persisted between node restarts so we can keep suspended flows
|
||||
cordappsForAllNodes = emptyList(),
|
||||
notarySpecs = emptyList()
|
||||
notarySpecs = emptyList(),
|
||||
allowHibernateToManageAppSchema = false
|
||||
)) {
|
||||
createSuspendedFlowInBob()
|
||||
val cordappsDir = baseDirectory(BOB_NAME) / "cordapps"
|
||||
|
@ -86,7 +86,7 @@ class NodeStatePersistenceTests {
|
||||
nodeName
|
||||
}()
|
||||
|
||||
val nodeHandle = startNode(providedName = nodeName, rpcUsers = listOf(user), customOverrides = mapOf("devMode" to "false")).getOrThrow()
|
||||
val nodeHandle = startNode(providedName = nodeName, rpcUsers = listOf(user)).getOrThrow()
|
||||
val result = CordaRPCClient(nodeHandle.rpcAddress).start(user.username, user.password).use {
|
||||
val page = it.proxy.vaultQuery(MessageState::class.java)
|
||||
page.states.singleOrNull()
|
||||
|
@ -47,7 +47,7 @@ class DistributedServiceTests {
|
||||
invokeRpc(CordaRPCOps::stateMachinesFeed))
|
||||
)
|
||||
driver(DriverParameters(
|
||||
cordappsForAllNodes = FINANCE_CORDAPPS + cordappWithPackages("net.corda.notary.raft"),
|
||||
cordappsForAllNodes = FINANCE_CORDAPPS + cordappWithPackages(),
|
||||
notarySpecs = listOf(NotarySpec(
|
||||
DUMMY_NOTARY_NAME,
|
||||
rpcUsers = listOf(testUser),
|
||||
|
@ -115,7 +115,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
||||
).returnValue.getOrThrow(30.seconds)
|
||||
}
|
||||
|
||||
alice.rpc.assertNumberOfCheckpoints(failed = 1)
|
||||
alice.rpc.assertNumberOfCheckpointsAllZero()
|
||||
alice.rpc.assertHospitalCounts(propagated = 1)
|
||||
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
|
||||
}
|
||||
@ -235,7 +235,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
||||
*
|
||||
* Each time the flow retries, it starts from the beginning of the flow (due to being in an unstarted state).
|
||||
*/
|
||||
@Test(timeout = 300_000)
|
||||
@Test(timeout = 450_000)
|
||||
fun `error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
||||
startDriver {
|
||||
val (charlie, alice, port) = createNodeAndBytemanNode(CHARLIE_NAME, ALICE_NAME)
|
||||
@ -390,7 +390,7 @@ class StateMachineFlowInitErrorHandlingTest : StateMachineErrorHandlingTest() {
|
||||
*
|
||||
* Each time the flow retries, it starts from the beginning of the flow (due to being in an unstarted state).
|
||||
*/
|
||||
@Test(timeout = 300_000)
|
||||
@Test(timeout = 450_000)
|
||||
fun `responding flow - error during transition with CommitTransaction action that occurs during flow initialisation will retry and be kept for observation if error persists`() {
|
||||
startDriver {
|
||||
val (alice, charlie, port) = createNodeAndBytemanNode(ALICE_NAME, CHARLIE_NAME)
|
||||
|
@ -1,15 +1,21 @@
|
||||
package net.corda.node.services.statemachine
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.CordaRuntimeException
|
||||
import net.corda.core.flows.FlowException
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StartableByRPC
|
||||
import net.corda.core.messaging.startFlow
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.node.services.api.CheckpointStorage
|
||||
import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import net.corda.node.services.statemachine.transitions.StartedFlowTransition
|
||||
import net.corda.node.services.statemachine.transitions.TopLevelTransition
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.CHARLIE_NAME
|
||||
import net.corda.testing.core.singleIdentity
|
||||
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
||||
import org.junit.Test
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.Executors
|
||||
@ -206,7 +212,7 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
||||
alice.rpc.startFlow(StateMachineErrorHandlingTest::ThrowAnErrorFlow).returnValue.getOrThrow(60.seconds)
|
||||
}
|
||||
|
||||
alice.rpc.assertNumberOfCheckpoints(failed = 1)
|
||||
alice.rpc.assertNumberOfCheckpointsAllZero()
|
||||
alice.rpc.assertHospitalCounts(
|
||||
propagated = 1,
|
||||
propagatedRetry = 3
|
||||
@ -648,4 +654,50 @@ class StateMachineGeneralErrorHandlingTest : StateMachineErrorHandlingTest() {
|
||||
assertEquals(0, charlie.rpc.stateMachinesSnapshot().size)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws an exception when creating a transition.
|
||||
*
|
||||
* The exception is thrown back to the flow, who catches it and returns a different exception, showing the exception returns to user
|
||||
* code and can be caught if needed.
|
||||
*/
|
||||
@Test(timeout = 300_000)
|
||||
fun `error during creation of transition that occurs after the first suspend will throw error into flow`() {
|
||||
startDriver {
|
||||
val (alice, port) = createBytemanNode(ALICE_NAME)
|
||||
|
||||
val rules = """
|
||||
RULE Throw exception when creating transition
|
||||
CLASS ${StartedFlowTransition::class.java.name}
|
||||
METHOD sleepTransition
|
||||
AT ENTRY
|
||||
IF true
|
||||
DO traceln("Throwing exception"); throw new java.lang.IllegalStateException("die dammit die")
|
||||
ENDRULE
|
||||
""".trimIndent()
|
||||
|
||||
submitBytemanRules(rules, port)
|
||||
|
||||
assertThatExceptionOfType(FlowException::class.java).isThrownBy {
|
||||
alice.rpc.startFlow(::SleepCatchAndRethrowFlow).returnValue.getOrThrow(30.seconds)
|
||||
}.withMessage("java.lang.IllegalStateException: die dammit die")
|
||||
|
||||
alice.rpc.assertNumberOfCheckpointsAllZero()
|
||||
alice.rpc.assertHospitalCounts(propagated = 1)
|
||||
assertEquals(0, alice.rpc.stateMachinesSnapshot().size)
|
||||
}
|
||||
}
|
||||
|
||||
@StartableByRPC
|
||||
class SleepCatchAndRethrowFlow : FlowLogic<String>() {
|
||||
@Suspendable
|
||||
override fun call(): String {
|
||||
try {
|
||||
sleep(5.seconds)
|
||||
} catch (e: IllegalStateException) {
|
||||
throw FlowException(e)
|
||||
}
|
||||
return "cant get here"
|
||||
}
|
||||
}
|
||||
}
|
@ -84,7 +84,8 @@ class NodeRegistrationTest {
|
||||
portAllocation = portAllocation,
|
||||
compatibilityZone = compatibilityZone,
|
||||
notarySpecs = listOf(NotarySpec(notaryName)),
|
||||
notaryCustomOverrides = mapOf("devMode" to false)
|
||||
notaryCustomOverrides = mapOf("devMode" to false),
|
||||
allowHibernateToManageAppSchema = false
|
||||
) {
|
||||
startNode(providedName = aliceName, customOverrides = mapOf("devMode" to false)).getOrThrow()
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
package net.corda.serialization.reproduction;
|
||||
|
||||
import com.google.common.io.LineProcessor;
|
||||
import net.corda.client.rpc.CordaRPCClient;
|
||||
import net.corda.core.concurrent.CordaFuture;
|
||||
import net.corda.node.services.Permissions;
|
||||
|
@ -44,7 +44,7 @@ class BootTests {
|
||||
rpc.startFlow(::ObjectInputStreamFlow).returnValue.getOrThrow()
|
||||
}
|
||||
}
|
||||
driver(DriverParameters(cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
||||
driver(DriverParameters(cordappsForAllNodes = listOf(enclosedCordapp()), allowHibernateToManageAppSchema = false)) {
|
||||
val devModeNode = startNode(devParams).getOrThrow()
|
||||
val node = startNode(ALICE_NAME, devMode = false, parameters = params).getOrThrow()
|
||||
|
||||
|
@ -15,6 +15,7 @@ import net.corda.testing.core.singleIdentity
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.node.User
|
||||
import net.corda.testing.node.internal.enclosedCordapp
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.Test
|
||||
|
||||
@ -23,7 +24,7 @@ class CordappScanningDriverTest {
|
||||
fun `sub-classed initiated flow pointing to the same initiating flow as its super-class`() {
|
||||
val user = User("u", "p", setOf(startFlow<ReceiveFlow>()))
|
||||
// The driver will automatically pick up the annotated flows below
|
||||
driver(DriverParameters(notarySpecs = emptyList())) {
|
||||
driver(DriverParameters(notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
||||
val (alice, bob) = listOf(
|
||||
startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)),
|
||||
startNode(providedName = BOB_NAME)).transpose().getOrThrow()
|
||||
|
@ -1,62 +0,0 @@
|
||||
package net.corda.node
|
||||
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.services.config.configureDevKeyAndTrustStores
|
||||
import net.corda.nodeapi.internal.crypto.CertificateType
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.coretesting.internal.stubs.CertificateStoreStubs
|
||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.junit.Test
|
||||
import javax.security.auth.x500.X500Principal
|
||||
|
||||
class NodeKeystoreCheckTest {
|
||||
@Test(timeout=300_000)
|
||||
fun `starting node in non-dev mode with no key store`() {
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = emptyList())) {
|
||||
assertThatThrownBy {
|
||||
startNode(customOverrides = mapOf("devMode" to false)).getOrThrow()
|
||||
}.hasMessageContaining("One or more keyStores (identity or TLS) or trustStore not found.")
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `node should throw exception if cert path does not chain to the trust root`() {
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = emptyList())) {
|
||||
// Create keystores.
|
||||
val keystorePassword = "password"
|
||||
val certificatesDirectory = baseDirectory(ALICE_NAME) / "certificates"
|
||||
val signingCertStore = CertificateStoreStubs.Signing.withCertificatesDirectory(certificatesDirectory, keystorePassword)
|
||||
val p2pSslConfig = CertificateStoreStubs.P2P.withCertificatesDirectory(certificatesDirectory, keyStorePassword = keystorePassword, trustStorePassword = keystorePassword)
|
||||
|
||||
p2pSslConfig.configureDevKeyAndTrustStores(ALICE_NAME, signingCertStore, certificatesDirectory)
|
||||
|
||||
// This should pass with correct keystore.
|
||||
val node = startNode(
|
||||
providedName = ALICE_NAME,
|
||||
customOverrides = mapOf("devMode" to false,
|
||||
"keyStorePassword" to keystorePassword,
|
||||
"trustStorePassword" to keystorePassword)
|
||||
).getOrThrow()
|
||||
node.stop()
|
||||
|
||||
// Fiddle with node keystore.
|
||||
signingCertStore.get().update {
|
||||
// Self signed root.
|
||||
val badRootKeyPair = Crypto.generateKeyPair()
|
||||
val badRoot = X509Utilities.createSelfSignedCACertificate(X500Principal("O=Bad Root,L=Lodnon,C=GB"), badRootKeyPair)
|
||||
val nodeCA = getCertificateAndKeyPair(X509Utilities.CORDA_CLIENT_CA, signingCertStore.entryPassword)
|
||||
val badNodeCACert = X509Utilities.createCertificate(CertificateType.NODE_CA, badRoot, badRootKeyPair, ALICE_NAME.x500Principal, nodeCA.keyPair.public)
|
||||
setPrivateKey(X509Utilities.CORDA_CLIENT_CA, nodeCA.keyPair.private, listOf(badNodeCACert, badRoot), signingCertStore.entryPassword)
|
||||
}
|
||||
|
||||
assertThatThrownBy {
|
||||
startNode(providedName = ALICE_NAME, customOverrides = mapOf("devMode" to false)).getOrThrow()
|
||||
}.hasMessage("Client CA certificate must chain to the trusted root.")
|
||||
}
|
||||
}
|
||||
}
|
@ -3,6 +3,7 @@ package net.corda.node.amqp
|
||||
import com.nhaarman.mockito_kotlin.doReturn
|
||||
import com.nhaarman.mockito_kotlin.mock
|
||||
import com.nhaarman.mockito_kotlin.whenever
|
||||
import net.corda.core.internal.JavaVersion
|
||||
import net.corda.core.internal.div
|
||||
import net.corda.core.toFuture
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
@ -19,6 +20,7 @@ import net.corda.nodeapi.internal.protonwrapper.netty.toRevocationConfig
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.BOB_NAME
|
||||
import net.corda.testing.driver.internal.incrementalPortAllocation
|
||||
import org.junit.Assume.assumeFalse
|
||||
import org.junit.Before
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
@ -133,6 +135,9 @@ class AMQPClientSslErrorsTest(@Suppress("unused") private val iteration: Int) {
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun trivialClientServerExchange() {
|
||||
// SSL works quite differently in JDK 11 and re-work is needed
|
||||
assumeFalse(JavaVersion.isVersionAtLeast(JavaVersion.Java_11))
|
||||
|
||||
val serverPort = portAllocation.nextPort()
|
||||
val serverThread = ServerThread(serverKeyManagerFactory, serverTrustManagerFactory, serverPort).also { it.start() }
|
||||
|
||||
@ -168,6 +173,9 @@ class AMQPClientSslErrorsTest(@Suppress("unused") private val iteration: Int) {
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun amqpClientServerConnect() {
|
||||
// SSL works quite differently in JDK 11 and re-work is needed
|
||||
assumeFalse(JavaVersion.isVersionAtLeast(JavaVersion.Java_11))
|
||||
|
||||
val serverPort = portAllocation.nextPort()
|
||||
val serverThread = ServerThread(serverKeyManagerFactory, serverTrustManagerFactory, serverPort)
|
||||
.also { it.start() }
|
||||
@ -188,6 +196,9 @@ class AMQPClientSslErrorsTest(@Suppress("unused") private val iteration: Int) {
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun amqpClientServerHandshakeTimeout() {
|
||||
// SSL works quite differently in JDK 11 and re-work is needed
|
||||
assumeFalse(JavaVersion.isVersionAtLeast(JavaVersion.Java_11))
|
||||
|
||||
val serverPort = portAllocation.nextPort()
|
||||
val serverThread = ServerThread(serverKeyManagerFactory, serverTrustManagerFactory, serverPort, 5.seconds)
|
||||
.also { it.start() }
|
||||
|
@ -34,9 +34,11 @@ import net.corda.testing.node.internal.FINANCE_CORDAPPS
|
||||
import net.corda.testing.node.internal.enclosedCordapp
|
||||
import org.junit.Test
|
||||
import java.sql.SQLTransientConnectionException
|
||||
import java.util.concurrent.Semaphore
|
||||
import java.util.concurrent.ConcurrentLinkedQueue
|
||||
import java.util.concurrent.CountDownLatch
|
||||
import java.util.concurrent.TimeUnit
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertNull
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
class FlowReloadAfterCheckpointTest {
|
||||
|
||||
@ -46,9 +48,9 @@ class FlowReloadAfterCheckpointTest {
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true`() {
|
||||
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||
reloads.add(id)
|
||||
}
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||
|
||||
@ -65,16 +67,16 @@ class FlowReloadAfterCheckpointTest {
|
||||
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, false)
|
||||
val flowStartedByAlice = handle.id
|
||||
handle.returnValue.getOrThrow()
|
||||
assertEquals(5, reloadCounts[flowStartedByAlice])
|
||||
assertEquals(6, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||
assertEquals(5, reloads.filter { it == flowStartedByAlice }.count())
|
||||
assertEquals(6, reloads.filter { it == ReloadFromCheckpointResponder.flowId }.count())
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `flow will not reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is false`() {
|
||||
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||
reloads.add(id)
|
||||
}
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||
|
||||
@ -89,24 +91,22 @@ class FlowReloadAfterCheckpointTest {
|
||||
.getOrThrow()
|
||||
|
||||
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, false)
|
||||
val flowStartedByAlice = handle.id
|
||||
handle.returnValue.getOrThrow()
|
||||
assertNull(reloadCounts[flowStartedByAlice])
|
||||
assertNull(reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||
assertEquals(0, reloads.size)
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true and be kept for observation due to failed deserialization`() {
|
||||
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||
reloads.add(id)
|
||||
}
|
||||
lateinit var flowKeptForObservation: StateMachineRunId
|
||||
val lock = Semaphore(0)
|
||||
val lock = CountDownLatch(1)
|
||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { id, _ ->
|
||||
flowKeptForObservation = id
|
||||
lock.release()
|
||||
lock.countDown()
|
||||
}
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||
|
||||
@ -122,18 +122,18 @@ class FlowReloadAfterCheckpointTest {
|
||||
|
||||
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), true, false, false)
|
||||
val flowStartedByAlice = handle.id
|
||||
lock.acquire()
|
||||
lock.await()
|
||||
assertEquals(flowStartedByAlice, flowKeptForObservation)
|
||||
assertEquals(4, reloadCounts[flowStartedByAlice])
|
||||
assertEquals(4, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||
assertEquals(4, reloads.filter { it == flowStartedByAlice }.count())
|
||||
assertEquals(4, reloads.filter { it == ReloadFromCheckpointResponder.flowId }.count())
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `flow will reload from a previous checkpoint after calling suspending function and skipping the persisting the current checkpoint when reloadCheckpointAfterSuspend is true`() {
|
||||
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||
reloads.add(id)
|
||||
}
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||
|
||||
@ -150,8 +150,8 @@ class FlowReloadAfterCheckpointTest {
|
||||
val handle = alice.rpc.startFlow(::ReloadFromCheckpointFlow, bob.nodeInfo.singleIdentity(), false, false, true)
|
||||
val flowStartedByAlice = handle.id
|
||||
handle.returnValue.getOrThrow()
|
||||
assertEquals(5, reloadCounts[flowStartedByAlice])
|
||||
assertEquals(6, reloadCounts[ReloadFromCheckpointResponder.flowId])
|
||||
assertEquals(5, reloads.filter { it == flowStartedByAlice }.count())
|
||||
assertEquals(6, reloads.filter { it == ReloadFromCheckpointResponder.flowId }.count())
|
||||
}
|
||||
}
|
||||
|
||||
@ -189,8 +189,8 @@ class FlowReloadAfterCheckpointTest {
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `timed flow will reload from initial checkpoint after calling a suspending function when reloadCheckpointAfterSuspend is true`() {
|
||||
var reloadCount = 0
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { runId -> reloads.add(runId) }
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||
|
||||
val alice = startNode(
|
||||
@ -199,14 +199,14 @@ class FlowReloadAfterCheckpointTest {
|
||||
).getOrThrow()
|
||||
|
||||
alice.rpc.startFlow(::MyTimedFlow).returnValue.getOrThrow()
|
||||
assertEquals(5, reloadCount)
|
||||
assertEquals(5, reloads.size)
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `flow will correctly retry after an error when reloadCheckpointAfterSuspend is true`() {
|
||||
var reloadCount = 0
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { runId -> reloads.add(runId) }
|
||||
var timesDischarged = 0
|
||||
StaffedFlowHospital.onFlowDischarged.add { _, _ -> timesDischarged += 1 }
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||
@ -217,15 +217,21 @@ class FlowReloadAfterCheckpointTest {
|
||||
).getOrThrow()
|
||||
|
||||
alice.rpc.startFlow(::TransientConnectionFailureFlow).returnValue.getOrThrow()
|
||||
assertEquals(5, reloadCount)
|
||||
assertEquals(5, reloads.size)
|
||||
assertEquals(3, timesDischarged)
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `flow continues reloading from checkpoints after node restart when reloadCheckpointAfterSuspend is true`() {
|
||||
var reloadCount = 0
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
val firstLatch = CountDownLatch(2)
|
||||
val secondLatch = CountDownLatch(5)
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { runId ->
|
||||
reloads.add(runId)
|
||||
firstLatch.countDown()
|
||||
secondLatch.countDown()
|
||||
}
|
||||
driver(
|
||||
DriverParameters(
|
||||
inMemoryDB = false,
|
||||
@ -241,25 +247,31 @@ class FlowReloadAfterCheckpointTest {
|
||||
).getOrThrow()
|
||||
|
||||
alice.rpc.startFlow(::MyHospitalizingFlow)
|
||||
Thread.sleep(10.seconds.toMillis())
|
||||
|
||||
assertTrue { firstLatch.await(10, TimeUnit.SECONDS) }
|
||||
alice.stop()
|
||||
assertEquals(2, reloads.size)
|
||||
|
||||
// Set up a new latch
|
||||
startNode(
|
||||
providedName = ALICE_NAME,
|
||||
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||
).getOrThrow()
|
||||
|
||||
Thread.sleep(20.seconds.toMillis())
|
||||
|
||||
assertEquals(5, reloadCount)
|
||||
assertTrue { secondLatch.await(20, TimeUnit.SECONDS) }
|
||||
assertEquals(5, reloads.size)
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `idempotent flow continues reloading from checkpoints after node restart when reloadCheckpointAfterSuspend is true`() {
|
||||
var reloadCount = 0
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { _ -> reloadCount += 1 }
|
||||
// restarts completely from the beginning and forgets the in-memory reload count therefore
|
||||
// it reloads an extra 2 times for checkpoints it had already reloaded before the node shutdown
|
||||
val reloadsExpected = CountDownLatch(7)
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { runId ->
|
||||
reloads.add(runId)
|
||||
reloadsExpected.countDown()
|
||||
}
|
||||
driver(
|
||||
DriverParameters(
|
||||
inMemoryDB = false,
|
||||
@ -284,19 +296,18 @@ class FlowReloadAfterCheckpointTest {
|
||||
customOverrides = mapOf(NodeConfiguration::reloadCheckpointAfterSuspend.name to true)
|
||||
).getOrThrow()
|
||||
|
||||
Thread.sleep(20.seconds.toMillis())
|
||||
|
||||
// restarts completely from the beginning and forgets the in-memory reload count therefore
|
||||
// it reloads an extra 2 times for checkpoints it had already reloaded before the node shutdown
|
||||
assertEquals(7, reloadCount)
|
||||
assertTrue { reloadsExpected.await(20, TimeUnit.SECONDS) }
|
||||
assertEquals(7, reloads.size)
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `more complicated flow will reload from its checkpoint after suspending when reloadCheckpointAfterSuspend is true`() {
|
||||
val reloadCounts = mutableMapOf<StateMachineRunId, Int>()
|
||||
val reloads = ConcurrentLinkedQueue<StateMachineRunId>()
|
||||
FlowStateMachineImpl.onReloadFlowFromCheckpoint = { id ->
|
||||
reloadCounts.compute(id) { _, value -> value?.plus(1) ?: 1 }
|
||||
reloads.add(id)
|
||||
}
|
||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = FINANCE_CORDAPPS)) {
|
||||
|
||||
@ -325,8 +336,8 @@ class FlowReloadAfterCheckpointTest {
|
||||
.toSet()
|
||||
.single()
|
||||
Thread.sleep(10.seconds.toMillis())
|
||||
assertEquals(7, reloadCounts[flowStartedByAlice])
|
||||
assertEquals(6, reloadCounts[flowStartedByBob])
|
||||
assertEquals(7, reloads.filter { it == flowStartedByAlice }.size)
|
||||
assertEquals(6, reloads.filter { it == flowStartedByBob }.size)
|
||||
}
|
||||
}
|
||||
|
||||
@ -508,4 +519,8 @@ class FlowReloadAfterCheckpointTest {
|
||||
stateMachine.suspend(FlowIORequest.ForceCheckpoint, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
internal class BrokenMap<K, V>(delegate: MutableMap<K, V> = mutableMapOf()) : MutableMap<K, V> by delegate {
|
||||
override fun put(key: K, value: V): V? = throw IllegalStateException("Broken on purpose")
|
||||
}
|
@ -161,7 +161,7 @@ class FlowRetryTest {
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `General external exceptions are not retried and propagate`() {
|
||||
fun `general external exceptions are not retried and propagate`() {
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = cordapps)) {
|
||||
|
||||
val (nodeAHandle, nodeBHandle) = listOf(ALICE_NAME, BOB_NAME)
|
||||
@ -176,10 +176,7 @@ class FlowRetryTest {
|
||||
).returnValue.getOrThrow()
|
||||
}
|
||||
assertEquals(0, GeneralExternalFailureFlow.retryCount)
|
||||
assertEquals(
|
||||
1,
|
||||
nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.FAILED).returnValue.get()
|
||||
)
|
||||
assertEquals(0, nodeAHandle.rpc.startFlow(::GetCheckpointNumberOfStatusFlow, Checkpoint.FlowStatus.FAILED).returnValue.get())
|
||||
}
|
||||
}
|
||||
|
||||
@ -304,10 +301,6 @@ enum class Step { First, BeforeInitiate, AfterInitiate, AfterInitiateSendReceive
|
||||
|
||||
data class Visited(val sessionNum: Int, val iterationNum: Int, val step: Step)
|
||||
|
||||
class BrokenMap<K, V>(delegate: MutableMap<K, V> = mutableMapOf()) : MutableMap<K, V> by delegate {
|
||||
override fun put(key: K, value: V): V? = throw IllegalStateException("Broken on purpose")
|
||||
}
|
||||
|
||||
@StartableByRPC
|
||||
class RetryFlow() : FlowLogic<String>(), IdempotentFlow {
|
||||
companion object {
|
||||
|
@ -0,0 +1,174 @@
|
||||
package net.corda.node.flows
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.CordaRuntimeException
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.ResultSerializationException
|
||||
import net.corda.core.flows.StartableByRPC
|
||||
import net.corda.core.internal.concurrent.OpenFuture
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.messaging.startFlowWithClientId
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import org.assertj.core.api.Assertions
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
import rx.Observable
|
||||
import java.util.UUID
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertFailsWith
|
||||
import kotlin.test.assertNotEquals
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
class FlowWithClientIdTest {
|
||||
|
||||
@Before
|
||||
fun reset() {
|
||||
ResultFlow.hook = null
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `start flow with client id`() {
|
||||
val clientId = UUID.randomUUID().toString()
|
||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||
val nodeA = startNode().getOrThrow()
|
||||
val flowHandle = nodeA.rpc.startFlowWithClientId(clientId, ::ResultFlow, 5)
|
||||
|
||||
assertEquals(5, flowHandle.returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(clientId, flowHandle.clientId)
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `remove client id`() {
|
||||
val clientId = UUID.randomUUID().toString()
|
||||
var counter = 0
|
||||
ResultFlow.hook = { counter++ }
|
||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||
val nodeA = startNode().getOrThrow()
|
||||
|
||||
val flowHandle0 = nodeA.rpc.startFlowWithClientId(clientId, ::ResultFlow, 5)
|
||||
flowHandle0.returnValue.getOrThrow(20.seconds)
|
||||
|
||||
val removed = nodeA.rpc.removeClientId(clientId)
|
||||
|
||||
val flowHandle1 = nodeA.rpc.startFlowWithClientId(clientId, ::ResultFlow, 5)
|
||||
flowHandle1.returnValue.getOrThrow(20.seconds)
|
||||
|
||||
assertTrue(removed)
|
||||
assertNotEquals(flowHandle0.id, flowHandle1.id)
|
||||
assertEquals(flowHandle0.clientId, flowHandle1.clientId)
|
||||
assertEquals(2, counter) // this asserts that 2 different flows were spawned indeed
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `on flow unserializable result a 'CordaRuntimeException' is thrown containing in its message the unserializable type`() {
|
||||
val clientId = UUID.randomUUID().toString()
|
||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||
val nodeA = startNode().getOrThrow()
|
||||
|
||||
val e = assertFailsWith<ResultSerializationException> {
|
||||
nodeA.rpc.startFlowWithClientId(clientId, ::UnserializableResultFlow).returnValue.getOrThrow(20.seconds)
|
||||
}
|
||||
|
||||
val errorMessage = e.message
|
||||
assertTrue(errorMessage!!.contains("Unable to create an object serializer for type class ${UnserializableResultFlow.UNSERIALIZABLE_OBJECT::class.java.name}"))
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `If flow has an unserializable exception result then it gets converted into a 'CordaRuntimeException'`() {
|
||||
ResultFlow.hook = {
|
||||
throw UnserializableException()
|
||||
}
|
||||
val clientId = UUID.randomUUID().toString()
|
||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||
val node = startNode().getOrThrow()
|
||||
|
||||
// the below exception is the one populating the flows future. It will get serialized on node jvm, sent over to client and
|
||||
// deserialized on client's.
|
||||
val e0 = assertFailsWith<CordaRuntimeException> {
|
||||
node.rpc.startFlowWithClientId(clientId, ::ResultFlow, 5).returnValue.getOrThrow()
|
||||
}
|
||||
|
||||
// the below exception is getting fetched from the database first, and deserialized on node's jvm,
|
||||
// then serialized on node jvm, sent over to client and deserialized on client's.
|
||||
val e1 = assertFailsWith<CordaRuntimeException> {
|
||||
node.rpc.startFlowWithClientId(clientId, ::ResultFlow, 5).returnValue.getOrThrow()
|
||||
}
|
||||
|
||||
assertTrue(e0 !is UnserializableException)
|
||||
assertTrue(e1 !is UnserializableException)
|
||||
assertEquals(UnserializableException::class.java.name, e0.originalExceptionClassName)
|
||||
assertEquals(UnserializableException::class.java.name, e1.originalExceptionClassName)
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `reattachFlowWithClientId can retrieve results from existing flow future`() {
|
||||
val clientId = UUID.randomUUID().toString()
|
||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||
val nodeA = startNode().getOrThrow()
|
||||
val flowHandle = nodeA.rpc.startFlowWithClientId(clientId, ::ResultFlow, 5)
|
||||
val reattachedFlowHandle = nodeA.rpc.reattachFlowWithClientId<Int>(clientId)
|
||||
assertEquals(5, flowHandle.returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(clientId, flowHandle.clientId)
|
||||
assertEquals(flowHandle.id, reattachedFlowHandle?.id)
|
||||
assertEquals(flowHandle.returnValue.get(), reattachedFlowHandle?.returnValue?.get())
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300_000)
|
||||
fun `reattachFlowWithClientId can retrieve exception from existing flow future`() {
|
||||
ResultFlow.hook = { throw IllegalStateException("Bla bla bla") }
|
||||
val clientId = UUID.randomUUID().toString()
|
||||
driver(DriverParameters(startNodesInProcess = true, cordappsForAllNodes = emptySet())) {
|
||||
val nodeA = startNode().getOrThrow()
|
||||
val flowHandle = nodeA.rpc.startFlowWithClientId(clientId, ::ResultFlow, 5)
|
||||
val reattachedFlowHandle = nodeA.rpc.reattachFlowWithClientId<Int>(clientId)
|
||||
|
||||
// [CordaRunTimeException] returned because [IllegalStateException] is not serializable
|
||||
Assertions.assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
||||
flowHandle.returnValue.getOrThrow(20.seconds)
|
||||
}.withMessage("java.lang.IllegalStateException: Bla bla bla")
|
||||
|
||||
Assertions.assertThatExceptionOfType(CordaRuntimeException::class.java).isThrownBy {
|
||||
reattachedFlowHandle?.returnValue?.getOrThrow()
|
||||
}.withMessage("java.lang.IllegalStateException: Bla bla bla")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@StartableByRPC
|
||||
internal class ResultFlow<A>(private val result: A): FlowLogic<A>() {
|
||||
companion object {
|
||||
var hook: (() -> Unit)? = null
|
||||
var suspendableHook: FlowLogic<Unit>? = null
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
override fun call(): A {
|
||||
hook?.invoke()
|
||||
suspendableHook?.let { subFlow(it) }
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
@StartableByRPC
|
||||
internal class UnserializableResultFlow: FlowLogic<OpenFuture<Observable<Unit>>>() {
|
||||
companion object {
|
||||
val UNSERIALIZABLE_OBJECT = openFuture<Observable<Unit>>().also { it.set(Observable.empty<Unit>())}
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
override fun call(): OpenFuture<Observable<Unit>> {
|
||||
return UNSERIALIZABLE_OBJECT
|
||||
}
|
||||
}
|
||||
|
||||
internal class UnserializableException(
|
||||
val unserializableObject: BrokenMap<Unit, Unit> = BrokenMap()
|
||||
): CordaRuntimeException("123")
|
@ -26,7 +26,6 @@ import net.corda.core.utilities.seconds
|
||||
import net.corda.finance.DOLLARS
|
||||
import net.corda.finance.contracts.asset.Cash
|
||||
import net.corda.finance.flows.CashIssueFlow
|
||||
import net.corda.node.services.statemachine.Checkpoint
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.BOB_NAME
|
||||
import net.corda.testing.core.CHARLIE_NAME
|
||||
@ -39,6 +38,8 @@ import net.corda.testing.node.internal.FINANCE_CORDAPPS
|
||||
import org.assertj.core.api.Assertions
|
||||
import org.junit.Test
|
||||
import java.time.Duration
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import java.util.concurrent.CountDownLatch
|
||||
import java.util.concurrent.Semaphore
|
||||
import java.util.concurrent.TimeUnit
|
||||
import kotlin.system.measureTimeMillis
|
||||
@ -70,29 +71,45 @@ class KillFlowTest {
|
||||
fun `a killed flow will propagate the killed error to counter parties when it reaches the next suspension point`() {
|
||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||
.map { startNode(providedName = it) }
|
||||
.transpose()
|
||||
.getOrThrow()
|
||||
.map { startNode(providedName = it) }
|
||||
.transpose()
|
||||
.getOrThrow()
|
||||
alice.rpc.let { rpc ->
|
||||
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.locks[BOB_NAME] = CountDownLatch(1)
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.locks[CHARLIE_NAME] = CountDownLatch(1)
|
||||
|
||||
val handle = rpc.startFlow(
|
||||
::AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriends,
|
||||
listOf(bob.nodeInfo.singleIdentity(), charlie.nodeInfo.singleIdentity())
|
||||
)
|
||||
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriends.lockA.acquire()
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.locks.forEach { it.value.acquire() }
|
||||
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.locks.forEach { (_, lock) ->
|
||||
lock.await(30, TimeUnit.SECONDS)
|
||||
}
|
||||
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.locks[BOB_NAME] = CountDownLatch(1)
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.locks[CHARLIE_NAME] = CountDownLatch(1)
|
||||
|
||||
rpc.killFlow(handle.id)
|
||||
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriends.lockB.release()
|
||||
|
||||
assertFailsWith<KilledFlowException> {
|
||||
handle.returnValue.getOrThrow(1.minutes)
|
||||
}
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.locks.forEach { it.value.acquire() }
|
||||
|
||||
AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.locks.forEach { (_, lock) ->
|
||||
lock.await(30, TimeUnit.SECONDS)
|
||||
}
|
||||
|
||||
assertTrue(AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.receivedKilledExceptions[BOB_NAME]!!)
|
||||
assertTrue(AFlowThatGetsMurderedWhenItTriesToSuspendAndSomehowKillsItsFriendsResponder.receivedKilledExceptions[CHARLIE_NAME]!!)
|
||||
assertEquals(1, rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(2, bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, bob.rpc.startFlow(::GetNumberOfFailedCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(2, charlie.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, charlie.rpc.startFlow(::GetNumberOfFailedCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, charlie.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -121,9 +138,9 @@ class KillFlowTest {
|
||||
fun `killing a flow suspended in send + receive + sendAndReceive ends the flow immediately`() {
|
||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = false)) {
|
||||
val (alice, bob) = listOf(ALICE_NAME, BOB_NAME)
|
||||
.map { startNode(providedName = it) }
|
||||
.transpose()
|
||||
.getOrThrow()
|
||||
.map { startNode(providedName = it) }
|
||||
.transpose()
|
||||
.getOrThrow()
|
||||
val bobParty = bob.nodeInfo.singleIdentity()
|
||||
bob.stop()
|
||||
val terminated = (bob as OutOfProcess).process.waitFor(30, TimeUnit.SECONDS)
|
||||
@ -197,31 +214,36 @@ class KillFlowTest {
|
||||
fun `a killed flow will propagate the killed error to counter parties if it was suspended`() {
|
||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||
.map { startNode(providedName = it) }
|
||||
.transpose()
|
||||
.getOrThrow()
|
||||
.map { startNode(providedName = it) }
|
||||
.transpose()
|
||||
.getOrThrow()
|
||||
alice.rpc.let { rpc ->
|
||||
|
||||
AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.locks[BOB_NAME] = CountDownLatch(1)
|
||||
AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.locks[CHARLIE_NAME] = CountDownLatch(1)
|
||||
|
||||
val handle = rpc.startFlow(
|
||||
::AFlowThatGetsMurderedAndSomehowKillsItsFriends,
|
||||
listOf(bob.nodeInfo.singleIdentity(), charlie.nodeInfo.singleIdentity())
|
||||
)
|
||||
AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.locks.forEach {
|
||||
it.value.acquire()
|
||||
}
|
||||
|
||||
AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.locks.forEach { (_, lock) -> lock.await(30, TimeUnit.SECONDS) }
|
||||
|
||||
AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.locks[BOB_NAME] = CountDownLatch(1)
|
||||
AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.locks[CHARLIE_NAME] = CountDownLatch(1)
|
||||
|
||||
rpc.killFlow(handle.id)
|
||||
assertFailsWith<KilledFlowException> {
|
||||
handle.returnValue.getOrThrow(20.seconds)
|
||||
}
|
||||
AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.locks.forEach {
|
||||
it.value.acquire()
|
||||
}
|
||||
|
||||
AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.locks.forEach { (_, lock) -> lock.await(30, TimeUnit.SECONDS) }
|
||||
|
||||
assertTrue(AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.receivedKilledExceptions[BOB_NAME]!!)
|
||||
assertTrue(AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder.receivedKilledExceptions[CHARLIE_NAME]!!)
|
||||
assertEquals(1, rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(2, bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, bob.rpc.startFlow(::GetNumberOfFailedCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(2, charlie.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, charlie.rpc.startFlow(::GetNumberOfFailedCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, charlie.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -230,15 +252,21 @@ class KillFlowTest {
|
||||
fun `a killed initiated flow will propagate the killed error to the initiator and its counter parties`() {
|
||||
driver(DriverParameters(notarySpecs = emptyList(), startNodesInProcess = true)) {
|
||||
val (alice, bob, charlie) = listOf(ALICE_NAME, BOB_NAME, CHARLIE_NAME)
|
||||
.map { startNode(providedName = it) }
|
||||
.transpose()
|
||||
.getOrThrow()
|
||||
.map { startNode(providedName = it) }
|
||||
.transpose()
|
||||
.getOrThrow()
|
||||
|
||||
AFlowThatGetsMurderedByItsFriendResponder.locks[BOB_NAME] = CountDownLatch(1)
|
||||
AFlowThatGetsMurderedByItsFriendResponder.locks[CHARLIE_NAME] = CountDownLatch(1)
|
||||
|
||||
val handle = alice.rpc.startFlow(
|
||||
::AFlowThatGetsMurderedByItsFriend,
|
||||
listOf(bob.nodeInfo.singleIdentity(), charlie.nodeInfo.singleIdentity())
|
||||
)
|
||||
|
||||
AFlowThatGetsMurderedByItsFriendResponder.locks.forEach { it.value.acquire() }
|
||||
AFlowThatGetsMurderedByItsFriendResponder.locks.forEach { (_, lock) -> lock.await(30, TimeUnit.SECONDS) }
|
||||
|
||||
AFlowThatGetsMurderedByItsFriendResponder.locks[CHARLIE_NAME] = CountDownLatch(1)
|
||||
|
||||
val initiatedFlowId = AFlowThatGetsMurderedByItsFriendResponder.flowIds[BOB_NAME]!!
|
||||
|
||||
@ -247,15 +275,15 @@ class KillFlowTest {
|
||||
assertFailsWith<UnexpectedFlowEndException> {
|
||||
handle.returnValue.getOrThrow(1.minutes)
|
||||
}
|
||||
AFlowThatGetsMurderedByItsFriendResponder.locks[CHARLIE_NAME]!!.acquire()
|
||||
|
||||
AFlowThatGetsMurderedByItsFriendResponder.locks[CHARLIE_NAME]!!.await(30, TimeUnit.SECONDS)
|
||||
|
||||
assertTrue(AFlowThatGetsMurderedByItsFriend.receivedKilledException)
|
||||
assertFalse(AFlowThatGetsMurderedByItsFriendResponder.receivedKilledExceptions[BOB_NAME]!!)
|
||||
assertTrue(AFlowThatGetsMurderedByItsFriendResponder.receivedKilledExceptions[CHARLIE_NAME]!!)
|
||||
assertEquals(2, alice.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, alice.rpc.startFlow(::GetNumberOfFailedCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, alice.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, bob.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(2, charlie.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, charlie.rpc.startFlow(::GetNumberOfFailedCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
assertEquals(1, charlie.rpc.startFlow(::GetNumberOfCheckpointsFlow).returnValue.getOrThrow(20.seconds))
|
||||
}
|
||||
}
|
||||
|
||||
@ -343,14 +371,11 @@ class KillFlowTest {
|
||||
FlowLogic<Unit>() {
|
||||
|
||||
companion object {
|
||||
val locks = mapOf(
|
||||
BOB_NAME to Semaphore(0),
|
||||
CHARLIE_NAME to Semaphore(0)
|
||||
)
|
||||
var receivedKilledExceptions = mutableMapOf(
|
||||
BOB_NAME to false,
|
||||
CHARLIE_NAME to false
|
||||
)
|
||||
val locks = ConcurrentHashMap<CordaX500Name, CountDownLatch>()
|
||||
var receivedKilledExceptions = ConcurrentHashMap<CordaX500Name, Boolean>().apply {
|
||||
this[BOB_NAME] = false
|
||||
this[CHARLIE_NAME] = false
|
||||
}
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
@ -358,12 +383,12 @@ class KillFlowTest {
|
||||
session.receive<String>()
|
||||
session.send("hi")
|
||||
session.receive<String>()
|
||||
locks[ourIdentity.name]!!.release()
|
||||
locks[ourIdentity.name]!!.countDown()
|
||||
try {
|
||||
session.receive<String>()
|
||||
} catch (e: UnexpectedFlowEndException) {
|
||||
receivedKilledExceptions[ourIdentity.name] = true
|
||||
locks[ourIdentity.name]!!.release()
|
||||
locks[ourIdentity.name]!!.countDown()
|
||||
throw e
|
||||
}
|
||||
}
|
||||
@ -480,14 +505,11 @@ class KillFlowTest {
|
||||
class AFlowThatGetsMurderedAndSomehowKillsItsFriendsResponder(private val session: FlowSession) : FlowLogic<Unit>() {
|
||||
|
||||
companion object {
|
||||
val locks = mapOf(
|
||||
BOB_NAME to Semaphore(0),
|
||||
CHARLIE_NAME to Semaphore(0)
|
||||
)
|
||||
var receivedKilledExceptions = mutableMapOf(
|
||||
BOB_NAME to false,
|
||||
CHARLIE_NAME to false
|
||||
)
|
||||
val locks = ConcurrentHashMap<CordaX500Name, CountDownLatch>()
|
||||
var receivedKilledExceptions = ConcurrentHashMap<CordaX500Name, Boolean>().apply {
|
||||
this[BOB_NAME] = false
|
||||
this[CHARLIE_NAME] = false
|
||||
}
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
@ -495,12 +517,12 @@ class KillFlowTest {
|
||||
session.receive<String>()
|
||||
session.send("hi")
|
||||
session.receive<String>()
|
||||
locks[ourIdentity.name]!!.release()
|
||||
locks[ourIdentity.name]!!.countDown()
|
||||
try {
|
||||
session.receive<String>()
|
||||
} catch (e: UnexpectedFlowEndException) {
|
||||
receivedKilledExceptions[ourIdentity.name] = true
|
||||
locks[ourIdentity.name]!!.release()
|
||||
locks[ourIdentity.name]!!.countDown()
|
||||
throw e
|
||||
}
|
||||
}
|
||||
@ -539,15 +561,12 @@ class KillFlowTest {
|
||||
class AFlowThatGetsMurderedByItsFriendResponder(private val session: FlowSession) : FlowLogic<Unit>() {
|
||||
|
||||
companion object {
|
||||
val locks = mapOf(
|
||||
BOB_NAME to Semaphore(0),
|
||||
CHARLIE_NAME to Semaphore(0)
|
||||
)
|
||||
var receivedKilledExceptions = mutableMapOf(
|
||||
BOB_NAME to false,
|
||||
CHARLIE_NAME to false
|
||||
)
|
||||
var flowIds = mutableMapOf<CordaX500Name, StateMachineRunId>()
|
||||
val locks = ConcurrentHashMap<CordaX500Name, CountDownLatch>()
|
||||
var receivedKilledExceptions = ConcurrentHashMap<CordaX500Name, Boolean>().apply {
|
||||
this[BOB_NAME] = false
|
||||
this[CHARLIE_NAME] = false
|
||||
}
|
||||
var flowIds = ConcurrentHashMap<CordaX500Name, StateMachineRunId>()
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
@ -556,12 +575,12 @@ class KillFlowTest {
|
||||
session.receive<String>()
|
||||
session.send("hi")
|
||||
session.receive<String>()
|
||||
locks[ourIdentity.name]!!.release()
|
||||
locks[ourIdentity.name]!!.countDown()
|
||||
try {
|
||||
session.receive<String>()
|
||||
} catch (e: UnexpectedFlowEndException) {
|
||||
receivedKilledExceptions[ourIdentity.name] = true
|
||||
locks[ourIdentity.name]!!.release()
|
||||
locks[ourIdentity.name]!!.countDown()
|
||||
throw e
|
||||
}
|
||||
}
|
||||
@ -588,18 +607,4 @@ class KillFlowTest {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@StartableByRPC
|
||||
class GetNumberOfFailedCheckpointsFlow : FlowLogic<Long>() {
|
||||
override fun call(): Long {
|
||||
return serviceHub.jdbcSession()
|
||||
.prepareStatement("select count(*) from node_checkpoints where status = ${Checkpoint.FlowStatus.FAILED.ordinal}")
|
||||
.use { ps ->
|
||||
ps.executeQuery().use { rs ->
|
||||
rs.next()
|
||||
rs.getLong(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -2,32 +2,21 @@ package net.corda.node.persistence
|
||||
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.flows.isQuasarAgentSpecified
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseIncompatibleException
|
||||
import net.corda.node.internal.ConfigurationException
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.NodeParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import org.junit.Test
|
||||
import kotlin.test.assertFailsWith
|
||||
import kotlin.test.assertNotNull
|
||||
|
||||
class DbSchemaInitialisationTest {
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `database is initialised`() {
|
||||
@Test(timeout = 300_000)
|
||||
fun `database initialisation not allowed in config`() {
|
||||
driver(DriverParameters(startNodesInProcess = isQuasarAgentSpecified(), cordappsForAllNodes = emptyList())) {
|
||||
val nodeHandle = {
|
||||
startNode(NodeParameters(customOverrides = mapOf("database.initialiseSchema" to "true"))).getOrThrow()
|
||||
}()
|
||||
assertNotNull(nodeHandle)
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `database is not initialised`() {
|
||||
driver(DriverParameters(startNodesInProcess = isQuasarAgentSpecified(), cordappsForAllNodes = emptyList())) {
|
||||
assertFailsWith(DatabaseIncompatibleException::class) {
|
||||
assertFailsWith(ConfigurationException::class) {
|
||||
startNode(NodeParameters(customOverrides = mapOf("database.initialiseSchema" to "false"))).getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -8,7 +8,6 @@ import net.corda.core.messaging.startFlow
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.node.flows.isQuasarAgentSpecified
|
||||
import net.corda.node.services.Permissions
|
||||
import net.corda.nodeapi.internal.persistence.CouldNotCreateDataSourceException
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.driver.internal.incrementalPortAllocation
|
||||
@ -16,7 +15,6 @@ import net.corda.testing.node.User
|
||||
import net.corda.testing.node.internal.enclosedCordapp
|
||||
import org.h2.jdbc.JdbcSQLNonTransientException
|
||||
import org.junit.Test
|
||||
import java.net.InetAddress
|
||||
import java.sql.DriverManager
|
||||
import kotlin.test.assertFailsWith
|
||||
import kotlin.test.assertNull
|
||||
@ -46,88 +44,6 @@ class H2SecurityTests {
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on the host name requires non-default database password`() {
|
||||
driver(DriverParameters(
|
||||
inMemoryDB = false,
|
||||
startNodesInProcess = isQuasarAgentSpecified(),
|
||||
notarySpecs = emptyList(),
|
||||
cordappsForAllNodes = emptyList()
|
||||
)) {
|
||||
assertFailsWith(CouldNotCreateDataSourceException::class) {
|
||||
startNode(customOverrides = mapOf(h2AddressKey to "${InetAddress.getLocalHost().hostName}:${getFreePort()}")).getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on the external host IP requires non-default database password`() {
|
||||
driver(DriverParameters(
|
||||
inMemoryDB = false,
|
||||
startNodesInProcess = isQuasarAgentSpecified(),
|
||||
notarySpecs = emptyList(),
|
||||
cordappsForAllNodes = emptyList()
|
||||
)) {
|
||||
assertFailsWith(CouldNotCreateDataSourceException::class) {
|
||||
startNode(customOverrides = mapOf(h2AddressKey to "${InetAddress.getLocalHost().hostAddress}:${getFreePort()}")).getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on host name requires non-blank database password`() {
|
||||
driver(DriverParameters(
|
||||
inMemoryDB = false,
|
||||
startNodesInProcess = isQuasarAgentSpecified(),
|
||||
notarySpecs = emptyList(),
|
||||
cordappsForAllNodes = emptyList()
|
||||
)) {
|
||||
assertFailsWith(CouldNotCreateDataSourceException::class) {
|
||||
startNode(customOverrides = mapOf(h2AddressKey to "${InetAddress.getLocalHost().hostName}:${getFreePort()}",
|
||||
dbPasswordKey to " ")).getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on external host IP requires non-blank database password`() {
|
||||
driver(DriverParameters(
|
||||
inMemoryDB = false,
|
||||
startNodesInProcess = isQuasarAgentSpecified(),
|
||||
notarySpecs = emptyList(),
|
||||
cordappsForAllNodes = emptyList()
|
||||
)) {
|
||||
assertFailsWith(CouldNotCreateDataSourceException::class) {
|
||||
startNode(customOverrides = mapOf(h2AddressKey to "${InetAddress.getLocalHost().hostAddress}:${getFreePort()}",
|
||||
dbPasswordKey to " ")).getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on localhost runs with the default database password`() {
|
||||
driver(DriverParameters(
|
||||
inMemoryDB = false,
|
||||
startNodesInProcess = false,
|
||||
notarySpecs = emptyList(),
|
||||
cordappsForAllNodes = emptyList()
|
||||
)) {
|
||||
startNode(customOverrides = mapOf(h2AddressKey to "localhost:${getFreePort()}")).getOrThrow()
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server to loopback IP runs with the default database password`() {
|
||||
driver(DriverParameters(
|
||||
inMemoryDB = false,
|
||||
startNodesInProcess = isQuasarAgentSpecified(),
|
||||
notarySpecs = emptyList(),
|
||||
cordappsForAllNodes = emptyList()
|
||||
)) {
|
||||
startNode(customOverrides = mapOf(h2AddressKey to "127.0.0.1:${getFreePort()}")).getOrThrow()
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `remote code execution via h2 server is disabled`() {
|
||||
driver(DriverParameters(
|
||||
|
@ -1,32 +0,0 @@
|
||||
package net.corda.node.services.config
|
||||
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.driver.internal.incrementalPortAllocation
|
||||
import net.corda.testing.driver.logFile
|
||||
import org.junit.Assert.assertTrue
|
||||
import org.junit.Test
|
||||
|
||||
class NodeConfigParsingTests {
|
||||
@Test(timeout = 300_000)
|
||||
fun `bad keys are ignored and warned for`() {
|
||||
val portAllocator = incrementalPortAllocation()
|
||||
driver(DriverParameters(
|
||||
environmentVariables = mapOf(
|
||||
"corda_bad_key" to "2077"),
|
||||
startNodesInProcess = false,
|
||||
portAllocation = portAllocator,
|
||||
notarySpecs = emptyList())) {
|
||||
|
||||
val hasWarning = startNode()
|
||||
.getOrThrow()
|
||||
.logFile()
|
||||
.readLines()
|
||||
.any {
|
||||
it.contains("(property or environment variable) cannot be mapped to an existing Corda")
|
||||
}
|
||||
assertTrue(hasWarning)
|
||||
}
|
||||
}
|
||||
}
|
@ -94,7 +94,8 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
internalDriver(
|
||||
portAllocation = portAllocation,
|
||||
compatibilityZone = compatibilityZone,
|
||||
notarySpecs = emptyList()
|
||||
notarySpecs = emptyList(),
|
||||
allowHibernateToManageAppSchema = false
|
||||
) {
|
||||
val alice = startNode(providedName = ALICE_NAME, devMode = false).getOrThrow() as NodeHandleInternal
|
||||
val nextParams = networkMapServer.networkParameters.copy(
|
||||
@ -148,7 +149,8 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
internalDriver(
|
||||
portAllocation = portAllocation,
|
||||
compatibilityZone = compatibilityZone,
|
||||
notarySpecs = emptyList()
|
||||
notarySpecs = emptyList(),
|
||||
allowHibernateToManageAppSchema = false
|
||||
) {
|
||||
|
||||
val notary: Party = TestIdentity.fresh("test notary").party
|
||||
@ -157,8 +159,9 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
epoch = 3,
|
||||
modifiedTime = Instant.ofEpochMilli(random63BitValue())).addNotary(notary)
|
||||
|
||||
val alice = startNodeAndRunFlagDay(paramsWithNewNotary)
|
||||
eventually { assertEquals(paramsWithNewNotary, alice.rpc.networkParameters) }
|
||||
startNodeAndRunFlagDay(paramsWithNewNotary).use { alice ->
|
||||
eventually { assertEquals(paramsWithNewNotary, alice.rpc.networkParameters) }
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@ -168,7 +171,8 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
internalDriver(
|
||||
portAllocation = portAllocation,
|
||||
compatibilityZone = compatibilityZone,
|
||||
notarySpecs = emptyList()
|
||||
notarySpecs = emptyList(),
|
||||
allowHibernateToManageAppSchema = false
|
||||
) {
|
||||
|
||||
val notary: Party = TestIdentity.fresh("test notary").party
|
||||
@ -192,7 +196,8 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
internalDriver(
|
||||
portAllocation = portAllocation,
|
||||
compatibilityZone = compatibilityZone,
|
||||
notarySpecs = emptyList()
|
||||
notarySpecs = emptyList(),
|
||||
allowHibernateToManageAppSchema = false
|
||||
) {
|
||||
|
||||
val oldParams = networkMapServer.networkParameters
|
||||
@ -200,8 +205,9 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
epoch = 3,
|
||||
modifiedTime = Instant.ofEpochMilli(random63BitValue()),
|
||||
maxMessageSize = oldParams.maxMessageSize + 1)
|
||||
val alice = startNodeAndRunFlagDay(paramsWithUpdatedMaxMessageSize)
|
||||
eventually { assertThatThrownBy { alice.rpc.networkParameters }.hasMessageContaining("Connection failure detected") }
|
||||
startNodeAndRunFlagDay(paramsWithUpdatedMaxMessageSize).use { alice ->
|
||||
eventually { assertThatThrownBy { alice.rpc.networkParameters }.hasMessageContaining("Connection failure detected") }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -210,7 +216,8 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
internalDriver(
|
||||
portAllocation = portAllocation,
|
||||
compatibilityZone = compatibilityZone,
|
||||
notarySpecs = emptyList()
|
||||
notarySpecs = emptyList(),
|
||||
allowHibernateToManageAppSchema = false
|
||||
) {
|
||||
|
||||
val oldParams = networkMapServer.networkParameters
|
||||
@ -219,8 +226,9 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
epoch = 3,
|
||||
modifiedTime = Instant.ofEpochMilli(random63BitValue()),
|
||||
maxMessageSize = oldParams.maxMessageSize + 1).addNotary(notary)
|
||||
val alice = startNodeAndRunFlagDay(paramsWithUpdatedMaxMessageSizeAndNotary)
|
||||
eventually { assertThatThrownBy { alice.rpc.networkParameters }.hasMessageContaining("Connection failure detected") }
|
||||
startNodeAndRunFlagDay(paramsWithUpdatedMaxMessageSizeAndNotary).use { alice ->
|
||||
eventually { assertThatThrownBy { alice.rpc.networkParameters }.hasMessageContaining("Connection failure detected") }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -244,26 +252,28 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
internalDriver(
|
||||
portAllocation = portAllocation,
|
||||
compatibilityZone = compatibilityZone,
|
||||
notarySpecs = emptyList()
|
||||
notarySpecs = emptyList(),
|
||||
allowHibernateToManageAppSchema = false
|
||||
) {
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, devMode = false).getOrThrow()
|
||||
assertDownloadedNetworkParameters(aliceNode)
|
||||
aliceNode.onlySees(aliceNode.nodeInfo)
|
||||
startNode(providedName = ALICE_NAME, devMode = false).getOrThrow().use { aliceNode ->
|
||||
assertDownloadedNetworkParameters(aliceNode)
|
||||
aliceNode.onlySees(aliceNode.nodeInfo)
|
||||
|
||||
val bobNode = startNode(providedName = BOB_NAME, devMode = false).getOrThrow()
|
||||
// Wait for network map client to poll for the next update.
|
||||
Thread.sleep(cacheTimeout.toMillis() * 2)
|
||||
|
||||
// Wait for network map client to poll for the next update.
|
||||
Thread.sleep(cacheTimeout.toMillis() * 2)
|
||||
startNode(providedName = BOB_NAME, devMode = false).getOrThrow().use { bobNode ->
|
||||
bobNode.onlySees(aliceNode.nodeInfo, bobNode.nodeInfo)
|
||||
aliceNode.onlySees(aliceNode.nodeInfo, bobNode.nodeInfo)
|
||||
|
||||
bobNode.onlySees(aliceNode.nodeInfo, bobNode.nodeInfo)
|
||||
aliceNode.onlySees(aliceNode.nodeInfo, bobNode.nodeInfo)
|
||||
networkMapServer.removeNodeInfo(aliceNode.nodeInfo)
|
||||
|
||||
networkMapServer.removeNodeInfo(aliceNode.nodeInfo)
|
||||
// Wait for network map client to poll for the next update.
|
||||
Thread.sleep(cacheTimeout.toMillis() * 2)
|
||||
|
||||
// Wait for network map client to poll for the next update.
|
||||
Thread.sleep(cacheTimeout.toMillis() * 2)
|
||||
|
||||
bobNode.onlySees(bobNode.nodeInfo)
|
||||
bobNode.onlySees(bobNode.nodeInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,26 +283,28 @@ class NetworkMapTest(var initFunc: (URL, NetworkMapServer) -> CompatibilityZoneP
|
||||
portAllocation = portAllocation,
|
||||
compatibilityZone = compatibilityZone,
|
||||
notarySpecs = emptyList(),
|
||||
systemProperties = mapOf("net.corda.node.internal.nodeinfo.publish.interval" to 1.seconds.toString())
|
||||
systemProperties = mapOf("net.corda.node.internal.nodeinfo.publish.interval" to 1.seconds.toString()),
|
||||
allowHibernateToManageAppSchema = false
|
||||
) {
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, devMode = false).getOrThrow()
|
||||
val aliceNodeInfo = aliceNode.nodeInfo.serialize().hash
|
||||
assertThat(networkMapServer.networkMapHashes()).contains(aliceNodeInfo)
|
||||
networkMapServer.removeNodeInfo(aliceNode.nodeInfo)
|
||||
|
||||
var maxRemoveRetries = 5
|
||||
|
||||
// Try to remove multiple times in case the network map republishes just in between the removal and the check.
|
||||
while (aliceNodeInfo in networkMapServer.networkMapHashes()) {
|
||||
startNode(providedName = ALICE_NAME, devMode = false).getOrThrow().use { aliceNode ->
|
||||
val aliceNodeInfo = aliceNode.nodeInfo.serialize().hash
|
||||
assertThat(networkMapServer.networkMapHashes()).contains(aliceNodeInfo)
|
||||
networkMapServer.removeNodeInfo(aliceNode.nodeInfo)
|
||||
if (maxRemoveRetries-- == 0) {
|
||||
throw AssertionError("Could not remove Node info.")
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until the node info is republished.
|
||||
Thread.sleep(2000)
|
||||
assertThat(networkMapServer.networkMapHashes()).contains(aliceNodeInfo)
|
||||
var maxRemoveRetries = 5
|
||||
|
||||
// Try to remove multiple times in case the network map republishes just in between the removal and the check.
|
||||
while (aliceNodeInfo in networkMapServer.networkMapHashes()) {
|
||||
networkMapServer.removeNodeInfo(aliceNode.nodeInfo)
|
||||
if (maxRemoveRetries-- == 0) {
|
||||
throw AssertionError("Could not remove Node info.")
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until the node info is republished.
|
||||
Thread.sleep(2000)
|
||||
assertThat(networkMapServer.networkMapHashes()).contains(aliceNodeInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ class RpcExceptionHandlingTest {
|
||||
}
|
||||
}
|
||||
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()), allowHibernateToManageAppSchema = false)) {
|
||||
val (devModeNode, node) = listOf(startNode(params, BOB_NAME),
|
||||
startNode(ALICE_NAME, devMode = false, parameters = params))
|
||||
.transpose()
|
||||
@ -79,7 +79,7 @@ class RpcExceptionHandlingTest {
|
||||
rpc.startFlow(::FlowExceptionFlow, expectedMessage, expectedErrorId).returnValue.getOrThrow()
|
||||
}
|
||||
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()), allowHibernateToManageAppSchema = false)) {
|
||||
val (devModeNode, node) = listOf(startNode(params, BOB_NAME),
|
||||
startNode(ALICE_NAME, devMode = false, parameters = params))
|
||||
.transpose()
|
||||
@ -115,7 +115,7 @@ class RpcExceptionHandlingTest {
|
||||
nodeA.rpc.startFlow(::InitFlow, nodeB.nodeInfo.singleIdentity()).returnValue.getOrThrow()
|
||||
}
|
||||
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()))) {
|
||||
driver(DriverParameters(startNodesInProcess = true, notarySpecs = emptyList(), cordappsForAllNodes = listOf(enclosedCordapp()), allowHibernateToManageAppSchema = false)) {
|
||||
|
||||
assertThatThrownBy { scenario(ALICE_NAME, BOB_NAME,true) }.isInstanceOfSatisfying(CordaRuntimeException::class.java) { exception ->
|
||||
|
||||
|
@ -445,11 +445,11 @@ class VaultObserverExceptionTest {
|
||||
|
||||
val user = User("user", "foo", setOf(Permissions.all()))
|
||||
driver(DriverParameters(startNodesInProcess = true,
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),inMemoryDB = false)
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
), inMemoryDB = false)
|
||||
) {
|
||||
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||
.map { startNode(providedName = it,
|
||||
@ -537,12 +537,12 @@ class VaultObserverExceptionTest {
|
||||
|
||||
val user = User("user", "foo", setOf(Permissions.all()))
|
||||
driver(DriverParameters(startNodesInProcess = true,
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),
|
||||
inMemoryDB = false)
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),
|
||||
inMemoryDB = false)
|
||||
) {
|
||||
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||
.map { startNode(providedName = it,
|
||||
@ -622,12 +622,12 @@ class VaultObserverExceptionTest {
|
||||
|
||||
val user = User("user", "foo", setOf(Permissions.all()))
|
||||
driver(DriverParameters(startNodesInProcess = true,
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),
|
||||
inMemoryDB = false)
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),
|
||||
inMemoryDB = false)
|
||||
) {
|
||||
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||
.map { startNode(providedName = it,
|
||||
@ -702,12 +702,12 @@ class VaultObserverExceptionTest {
|
||||
|
||||
val user = User("user", "foo", setOf(Permissions.all()))
|
||||
driver(DriverParameters(startNodesInProcess = true,
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),
|
||||
inMemoryDB = false)
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),
|
||||
inMemoryDB = false)
|
||||
) {
|
||||
val (aliceNode, bobNode) = listOf(ALICE_NAME, BOB_NAME)
|
||||
.map { startNode(providedName = it,
|
||||
@ -762,12 +762,12 @@ class VaultObserverExceptionTest {
|
||||
fun `Accessing NodeVaultService rawUpdates from a flow is not allowed` () {
|
||||
val user = User("user", "foo", setOf(Permissions.all()))
|
||||
driver(DriverParameters(startNodesInProcess = true,
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),
|
||||
inMemoryDB = false)
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")
|
||||
),
|
||||
inMemoryDB = false)
|
||||
) {
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||
|
||||
@ -792,12 +792,12 @@ class VaultObserverExceptionTest {
|
||||
|
||||
val user = User("user", "foo", setOf(Permissions.all()))
|
||||
driver(DriverParameters(startNodesInProcess = true,
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.transactionfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")),
|
||||
inMemoryDB = false)
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.transactionfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")),
|
||||
inMemoryDB = false)
|
||||
) {
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(user)).getOrThrow()
|
||||
|
||||
@ -823,12 +823,12 @@ class VaultObserverExceptionTest {
|
||||
|
||||
val user = User("user", "foo", setOf(Permissions.all()))
|
||||
driver(DriverParameters(startNodesInProcess = true,
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.transactionfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")),
|
||||
inMemoryDB = false)
|
||||
cordappsForAllNodes = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.transactionfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas")),
|
||||
inMemoryDB = false)
|
||||
) {
|
||||
// Subscribing with custom SafeSubscriber; the custom SafeSubscriber will not get replaced by a ResilientSubscriber
|
||||
// meaning that it will behave as a SafeSubscriber; it will get unsubscribed upon throwing an error.
|
||||
|
@ -42,7 +42,6 @@ class P2PMessagingTest {
|
||||
private fun startDriverWithDistributedService(dsl: DriverDSL.(List<InProcess>) -> Unit) {
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
extraCordappPackagesToScan = listOf("net.corda.notary.raft"),
|
||||
notarySpecs = listOf(NotarySpec(DISTRIBUTED_SERVICE_NAME, cluster = ClusterSpec.Raft(clusterSize = 2)))
|
||||
)) {
|
||||
dsl(defaultNotaryHandle.nodeHandles.getOrThrow().map { (it as InProcess) })
|
||||
|
@ -48,6 +48,14 @@ open class SharedNodeCmdLineOptions {
|
||||
)
|
||||
var devMode: Boolean? = null
|
||||
|
||||
@Option(
|
||||
names = ["--allow-hibernate-to-manage-app-schema"],
|
||||
description = ["Allows hibernate to create/modify app schema for CorDapps based on their mapped schema.",
|
||||
"Use this for rapid app development or for compatibility with pre-4.6 CorDapps.",
|
||||
"Only available in dev mode."]
|
||||
)
|
||||
var allowHibernateToManageAppSchema: Boolean = false
|
||||
|
||||
open fun parseConfiguration(configuration: Config): Valid<NodeConfiguration> {
|
||||
val option = Configuration.Options(strict = unknownConfigKeysPolicy == UnknownConfigKeysPolicy.FAIL)
|
||||
return configuration.parseAsNodeConfiguration(option)
|
||||
|
@ -28,7 +28,7 @@ import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.internal.AttachmentTrustCalculator
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.FlowStateMachineHandle
|
||||
import net.corda.core.internal.NODE_INFO_DIRECTORY
|
||||
import net.corda.core.internal.NamedCacheFactory
|
||||
import net.corda.core.internal.NetworkParametersStorage
|
||||
@ -94,7 +94,6 @@ import net.corda.node.services.api.VaultServiceInternal
|
||||
import net.corda.node.services.api.WritableTransactionStorage
|
||||
import net.corda.node.services.attachments.NodeAttachmentTrustCalculator
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.configureWithDevSSLCertificate
|
||||
import net.corda.node.services.config.rpc.NodeRpcOptions
|
||||
import net.corda.node.services.config.shell.determineUnsafeUsers
|
||||
import net.corda.node.services.config.shell.toShellConfig
|
||||
@ -149,8 +148,6 @@ import net.corda.nodeapi.internal.cordapp.CordappLoader
|
||||
import net.corda.nodeapi.internal.crypto.CertificateType
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.CORDA_CLIENT_CA
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.CORDA_CLIENT_TLS
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.CORDA_ROOT_CA
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.DEFAULT_VALIDITY_WINDOW
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.DISTRIBUTED_NOTARY_COMPOSITE_KEY_ALIAS
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities.DISTRIBUTED_NOTARY_KEY_ALIAS
|
||||
@ -176,18 +173,15 @@ import org.jolokia.jvmagent.JolokiaServer
|
||||
import org.jolokia.jvmagent.JolokiaServerConfig
|
||||
import org.slf4j.Logger
|
||||
import rx.Scheduler
|
||||
import java.io.IOException
|
||||
import java.lang.reflect.InvocationTargetException
|
||||
import java.nio.file.Path
|
||||
import java.security.KeyPair
|
||||
import java.security.KeyStoreException
|
||||
import java.security.cert.X509Certificate
|
||||
import java.sql.Connection
|
||||
import java.sql.Savepoint
|
||||
import java.time.Clock
|
||||
import java.time.Duration
|
||||
import java.time.format.DateTimeParseException
|
||||
import java.util.Properties
|
||||
import java.util.*
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.Executors
|
||||
import java.util.concurrent.LinkedBlockingQueue
|
||||
@ -197,6 +191,8 @@ import java.util.concurrent.TimeUnit.MINUTES
|
||||
import java.util.concurrent.TimeUnit.SECONDS
|
||||
import java.util.function.Consumer
|
||||
import javax.persistence.EntityManager
|
||||
import javax.sql.DataSource
|
||||
import kotlin.collections.ArrayList
|
||||
|
||||
/**
|
||||
* A base node implementation that can be customised either for production (with real implementations that do real
|
||||
@ -214,9 +210,12 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
val serverThread: AffinityExecutor.ServiceAffinityExecutor,
|
||||
val busyNodeLatch: ReusableLatch = ReusableLatch(),
|
||||
djvmBootstrapSource: ApiSource = EmptyApi,
|
||||
djvmCordaSource: UserSource? = null) : SingletonSerializeAsToken() {
|
||||
djvmCordaSource: UserSource? = null,
|
||||
protected val allowHibernateToManageAppSchema: Boolean = false,
|
||||
private val allowAppSchemaUpgradeWithCheckpoints: Boolean = false) : SingletonSerializeAsToken() {
|
||||
|
||||
protected abstract val log: Logger
|
||||
|
||||
@Suppress("LeakingThis")
|
||||
private var tokenizableServices: MutableList<SerializeAsToken>? = mutableListOf(platformClock, this)
|
||||
|
||||
@ -226,6 +225,11 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
|
||||
protected val runOnStop = ArrayList<() -> Any?>()
|
||||
|
||||
protected open val runMigrationScripts: Boolean = configuredDbIsInMemory()
|
||||
|
||||
// if the configured DB is in memory, we will need to run db migrations, as the db does not persist between runs.
|
||||
private fun configuredDbIsInMemory() = configuration.dataSourceProperties.getProperty("dataSource.url").startsWith("jdbc:h2:mem:")
|
||||
|
||||
init {
|
||||
(serverThread as? ExecutorService)?.let {
|
||||
runOnStop += {
|
||||
@ -237,6 +241,12 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
}
|
||||
|
||||
quasarExcludePackages(configuration)
|
||||
|
||||
if (allowHibernateToManageAppSchema && !configuration.devMode) {
|
||||
throw ConfigurationException("Hibernate can only be used to manage app schema in development while using dev mode. " +
|
||||
"Please remove the --allow-hibernate-to-manage-app-schema command line flag and provide schema migration scripts for your CorDapps."
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
private val notaryLoader = configuration.notary?.let {
|
||||
@ -252,7 +262,8 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
schemaService,
|
||||
configuration.dataSourceProperties,
|
||||
cacheFactory,
|
||||
cordappLoader.appClassLoader)
|
||||
cordappLoader.appClassLoader,
|
||||
allowHibernateToManageAppSchema)
|
||||
|
||||
private val transactionSupport = CordaTransactionSupportImpl(database)
|
||||
|
||||
@ -289,19 +300,8 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
val nodeProperties = NodePropertiesPersistentStore(StubbedNodeUniqueIdProvider::value, database, cacheFactory)
|
||||
val flowLogicRefFactory = makeFlowLogicRefFactoryImpl()
|
||||
// TODO Cancelling parameters updates - if we do that, how we ensure that no one uses cancelled parameters in the transactions?
|
||||
val networkMapUpdater = NetworkMapUpdater(
|
||||
networkMapCache,
|
||||
NodeInfoWatcher(
|
||||
configuration.baseDirectory,
|
||||
@Suppress("LeakingThis")
|
||||
rxIoScheduler,
|
||||
Duration.ofMillis(configuration.additionalNodeInfoPollingFrequencyMsec)
|
||||
),
|
||||
networkMapClient,
|
||||
configuration.baseDirectory,
|
||||
configuration.extraNetworkMapKeys,
|
||||
networkParametersStorage
|
||||
).closeOnStop()
|
||||
val networkMapUpdater = makeNetworkMapUpdater()
|
||||
|
||||
@Suppress("LeakingThis")
|
||||
val transactionVerifierService = InMemoryTransactionVerifierService(
|
||||
numberOfWorkers = transactionVerifierWorkerCount,
|
||||
@ -335,17 +335,8 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
val checkpointStorage = DBCheckpointStorage(DBCheckpointPerformanceRecorder(services.monitoringService.metrics), platformClock)
|
||||
@Suppress("LeakingThis")
|
||||
val smm = makeStateMachineManager()
|
||||
val flowStarter = FlowStarterImpl(smm, flowLogicRefFactory)
|
||||
private val schedulerService = NodeSchedulerService(
|
||||
platformClock,
|
||||
database,
|
||||
flowStarter,
|
||||
servicesForResolution,
|
||||
flowLogicRefFactory,
|
||||
nodeProperties,
|
||||
configuration.drainingModePollPeriod,
|
||||
unfinishedSchedules = busyNodeLatch
|
||||
).tokenize().closeOnStop()
|
||||
val flowStarter = FlowStarterImpl(smm, flowLogicRefFactory, DBCheckpointStorage.MAX_CLIENT_ID_LENGTH)
|
||||
private val schedulerService = makeNodeSchedulerService()
|
||||
|
||||
private val cordappServices = MutableClassToInstanceMap.create<SerializeAsToken>()
|
||||
private val shutdownExecutor = Executors.newSingleThreadExecutor()
|
||||
@ -418,18 +409,6 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
return proxies.fold(ops) { delegate, decorate -> decorate(delegate) }
|
||||
}
|
||||
|
||||
private fun initKeyStores(): X509Certificate {
|
||||
if (configuration.devMode) {
|
||||
configuration.configureWithDevSSLCertificate(cryptoService)
|
||||
// configureWithDevSSLCertificate is a devMode process that writes directly to keystore files, so
|
||||
// we should re-synchronise BCCryptoService with the updated keystore file.
|
||||
if (cryptoService is BCCryptoService) {
|
||||
cryptoService.resyncKeystore()
|
||||
}
|
||||
}
|
||||
return validateKeyStores()
|
||||
}
|
||||
|
||||
private fun quasarExcludePackages(nodeConfiguration: NodeConfiguration) {
|
||||
val quasarInstrumentor = Retransform.getInstrumentor()
|
||||
|
||||
@ -441,7 +420,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
open fun generateAndSaveNodeInfo(): NodeInfo {
|
||||
check(started == null) { "Node has already been started" }
|
||||
log.info("Generating nodeInfo ...")
|
||||
val trustRoot = initKeyStores()
|
||||
val trustRoot = configuration.initKeyStores(cryptoService)
|
||||
startDatabase()
|
||||
val (identity, identityKeyPair) = obtainIdentity()
|
||||
val nodeCa = configuration.signingCertificateStore.get()[CORDA_CLIENT_CA]
|
||||
@ -463,6 +442,53 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
}
|
||||
}
|
||||
|
||||
open fun runDatabaseMigrationScripts(
|
||||
updateCoreSchemas: Boolean,
|
||||
updateAppSchemas: Boolean,
|
||||
updateAppSchemasWithCheckpoints: Boolean
|
||||
) {
|
||||
check(started == null) { "Node has already been started" }
|
||||
Node.printBasicNodeInfo("Running database schema migration scripts ...")
|
||||
val props = configuration.dataSourceProperties
|
||||
if (props.isEmpty) throw DatabaseConfigurationException("There must be a database configured.")
|
||||
database.startHikariPool(props, metricRegistry) { dataSource, haveCheckpoints ->
|
||||
SchemaMigration(dataSource, cordappLoader, configuration.baseDirectory, configuration.myLegalName)
|
||||
.checkOrUpdate(schemaService.internalSchemas, updateCoreSchemas, haveCheckpoints, true)
|
||||
.checkOrUpdate(schemaService.appSchemas, updateAppSchemas, !updateAppSchemasWithCheckpoints && haveCheckpoints, false)
|
||||
}
|
||||
// Now log the vendor string as this will also cause a connection to be tested eagerly.
|
||||
logVendorString(database, log)
|
||||
if (allowHibernateToManageAppSchema) {
|
||||
Node.printBasicNodeInfo("Initialising CorDapps to get schemas created by hibernate")
|
||||
val trustRoot = configuration.initKeyStores(cryptoService)
|
||||
networkMapClient?.start(trustRoot)
|
||||
val (netParams, signedNetParams) = NetworkParametersReader(trustRoot, networkMapClient, configuration.baseDirectory).read()
|
||||
log.info("Loaded network parameters: $netParams")
|
||||
check(netParams.minimumPlatformVersion <= versionInfo.platformVersion) {
|
||||
"Node's platform version is lower than network's required minimumPlatformVersion"
|
||||
}
|
||||
networkMapCache.start(netParams.notaries)
|
||||
|
||||
database.transaction {
|
||||
networkParametersStorage.setCurrentParameters(signedNetParams, trustRoot)
|
||||
cordappProvider.start()
|
||||
}
|
||||
}
|
||||
Node.printBasicNodeInfo("Database migration done.")
|
||||
}
|
||||
|
||||
fun runSchemaSync() {
|
||||
check(started == null) { "Node has already been started" }
|
||||
Node.printBasicNodeInfo("Synchronising CorDapp schemas to the changelog ...")
|
||||
val hikariProperties = configuration.dataSourceProperties
|
||||
if (hikariProperties.isEmpty) throw DatabaseConfigurationException("There must be a database configured.")
|
||||
|
||||
val dataSource = DataSourceFactory.createDataSource(hikariProperties, metricRegistry = metricRegistry)
|
||||
SchemaMigration(dataSource, cordappLoader, configuration.baseDirectory, configuration.myLegalName)
|
||||
.synchroniseSchemas(schemaService.appSchemas, false)
|
||||
Node.printBasicNodeInfo("CorDapp schemas synchronised")
|
||||
}
|
||||
|
||||
@Suppress("ComplexMethod")
|
||||
open fun start(): S {
|
||||
check(started == null) { "Node has already been started" }
|
||||
@ -473,7 +499,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
nodeLifecycleEventsDistributor.distributeEvent(NodeLifecycleEvent.BeforeNodeStart(nodeServicesContext))
|
||||
log.info("Node starting up ...")
|
||||
|
||||
val trustRoot = initKeyStores()
|
||||
val trustRoot = configuration.initKeyStores(cryptoService)
|
||||
initialiseJolokia()
|
||||
|
||||
schemaService.mappedSchemasWarnings().forEach {
|
||||
@ -748,6 +774,31 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
// Extracted into a function to allow overriding in subclasses.
|
||||
protected open fun makeFlowLogicRefFactoryImpl() = FlowLogicRefFactoryImpl(cordappLoader.appClassLoader)
|
||||
|
||||
protected open fun makeNetworkMapUpdater() = NetworkMapUpdater(
|
||||
networkMapCache,
|
||||
NodeInfoWatcher(
|
||||
configuration.baseDirectory,
|
||||
@Suppress("LeakingThis")
|
||||
rxIoScheduler,
|
||||
Duration.ofMillis(configuration.additionalNodeInfoPollingFrequencyMsec)
|
||||
),
|
||||
networkMapClient,
|
||||
configuration.baseDirectory,
|
||||
configuration.extraNetworkMapKeys,
|
||||
networkParametersStorage
|
||||
).closeOnStop()
|
||||
|
||||
protected open fun makeNodeSchedulerService() = NodeSchedulerService(
|
||||
platformClock,
|
||||
database,
|
||||
flowStarter,
|
||||
servicesForResolution,
|
||||
flowLogicRefFactory,
|
||||
nodeProperties,
|
||||
configuration.drainingModePollPeriod,
|
||||
unfinishedSchedules = busyNodeLatch
|
||||
).tokenize().closeOnStop()
|
||||
|
||||
private fun makeCordappLoader(configuration: NodeConfiguration, versionInfo: VersionInfo): CordappLoader {
|
||||
val generatedCordapps = mutableListOf(VirtualCordapp.generateCore(versionInfo))
|
||||
notaryLoader?.builtInNotary?.let { notaryImpl ->
|
||||
@ -792,7 +843,7 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
).tokenize()
|
||||
}
|
||||
|
||||
private fun createExternalOperationExecutor(numberOfThreads: Int): ExecutorService {
|
||||
protected open fun createExternalOperationExecutor(numberOfThreads: Int): ExecutorService {
|
||||
when (numberOfThreads) {
|
||||
1 -> log.info("Flow external operation executor has $numberOfThreads thread")
|
||||
else -> log.info("Flow external operation executor has a max of $numberOfThreads threads")
|
||||
@ -917,68 +968,24 @@ abstract class AbstractNode<S>(val configuration: NodeConfiguration,
|
||||
@VisibleForTesting
|
||||
protected open fun acceptableLiveFiberCountOnStop(): Int = 0
|
||||
|
||||
private fun getCertificateStores(): AllCertificateStores? {
|
||||
return try {
|
||||
// The following will throw IOException if key file not found or KeyStoreException if keystore password is incorrect.
|
||||
val sslKeyStore = configuration.p2pSslOptions.keyStore.get()
|
||||
val signingCertificateStore = configuration.signingCertificateStore.get()
|
||||
val trustStore = configuration.p2pSslOptions.trustStore.get()
|
||||
AllCertificateStores(trustStore, sslKeyStore, signingCertificateStore)
|
||||
} catch (e: IOException) {
|
||||
log.error("IO exception while trying to validate keystores and truststore", e)
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
private data class AllCertificateStores(val trustStore: CertificateStore, val sslKeyStore: CertificateStore, val identitiesKeyStore: CertificateStore)
|
||||
|
||||
private fun validateKeyStores(): X509Certificate {
|
||||
// Step 1. Check trustStore, sslKeyStore and identitiesKeyStore exist.
|
||||
val certStores = try {
|
||||
requireNotNull(getCertificateStores()) {
|
||||
"One or more keyStores (identity or TLS) or trustStore not found. " +
|
||||
"Please either copy your existing keys and certificates from another node, " +
|
||||
"or if you don't have one yet, fill out the config file and run corda.jar initial-registration."
|
||||
}
|
||||
} catch (e: KeyStoreException) {
|
||||
throw IllegalArgumentException("At least one of the keystores or truststore passwords does not match configuration.")
|
||||
}
|
||||
// Step 2. Check that trustStore contains the correct key-alias entry.
|
||||
require(CORDA_ROOT_CA in certStores.trustStore) {
|
||||
"Alias for trustRoot key not found. Please ensure you have an updated trustStore file."
|
||||
}
|
||||
// Step 3. Check that tls keyStore contains the correct key-alias entry.
|
||||
require(CORDA_CLIENT_TLS in certStores.sslKeyStore) {
|
||||
"Alias for TLS key not found. Please ensure you have an updated TLS keyStore file."
|
||||
}
|
||||
|
||||
// Step 4. Check that identity keyStores contain the correct key-alias entry for Node CA.
|
||||
require(CORDA_CLIENT_CA in certStores.identitiesKeyStore) {
|
||||
"Alias for Node CA key not found. Please ensure you have an updated identity keyStore file."
|
||||
}
|
||||
|
||||
// Step 5. Check all cert paths chain to the trusted root.
|
||||
val trustRoot = certStores.trustStore[CORDA_ROOT_CA]
|
||||
val sslCertChainRoot = certStores.sslKeyStore.query { getCertificateChain(CORDA_CLIENT_TLS) }.last()
|
||||
val nodeCaCertChainRoot = certStores.identitiesKeyStore.query { getCertificateChain(CORDA_CLIENT_CA) }.last()
|
||||
|
||||
require(sslCertChainRoot == trustRoot) { "TLS certificate must chain to the trusted root." }
|
||||
require(nodeCaCertChainRoot == trustRoot) { "Client CA certificate must chain to the trusted root." }
|
||||
|
||||
return trustRoot
|
||||
}
|
||||
|
||||
// Specific class so that MockNode can catch it.
|
||||
class DatabaseConfigurationException(message: String) : CordaException(message)
|
||||
|
||||
protected open fun startDatabase() {
|
||||
val props = configuration.dataSourceProperties
|
||||
if (props.isEmpty) throw DatabaseConfigurationException("There must be a database configured.")
|
||||
database.startHikariPool(props, configuration.database, schemaService.internalSchemas(), metricRegistry, this.cordappLoader, configuration.baseDirectory, configuration.myLegalName)
|
||||
startHikariPool()
|
||||
// Now log the vendor string as this will also cause a connection to be tested eagerly.
|
||||
logVendorString(database, log)
|
||||
}
|
||||
|
||||
protected open fun startHikariPool() =
|
||||
database.startHikariPool(configuration.dataSourceProperties, metricRegistry) { dataSource, haveCheckpoints ->
|
||||
SchemaMigration(dataSource, cordappLoader, configuration.baseDirectory, configuration.myLegalName)
|
||||
.checkOrUpdate(schemaService.internalSchemas, runMigrationScripts, haveCheckpoints, true)
|
||||
.checkOrUpdate(schemaService.appSchemas, runMigrationScripts, haveCheckpoints && !allowAppSchemaUpgradeWithCheckpoints, false)
|
||||
}
|
||||
|
||||
/** Loads and starts a notary service if it is configured. */
|
||||
private fun maybeStartNotaryService(myNotaryIdentity: PartyAndCertificate?): NotaryService? {
|
||||
return notaryLoader?.let { loader ->
|
||||
@ -1306,13 +1313,22 @@ internal fun logVendorString(database: CordaPersistence, log: Logger) {
|
||||
}
|
||||
|
||||
// TODO Move this into its own file
|
||||
class FlowStarterImpl(private val smm: StateMachineManager, private val flowLogicRefFactory: FlowLogicRefFactory) : FlowStarter {
|
||||
override fun <T> startFlow(event: ExternalEvent.ExternalStartFlowEvent<T>): CordaFuture<FlowStateMachine<T>> {
|
||||
smm.deliverExternalEvent(event)
|
||||
class FlowStarterImpl(
|
||||
private val smm: StateMachineManager,
|
||||
private val flowLogicRefFactory: FlowLogicRefFactory,
|
||||
private val maxClientIdLength: Int
|
||||
) : FlowStarter {
|
||||
override fun <T> startFlow(event: ExternalEvent.ExternalStartFlowEvent<T>): CordaFuture<out FlowStateMachineHandle<T>> {
|
||||
val clientId = event.context.clientId
|
||||
if (clientId != null && clientId.length > maxClientIdLength) {
|
||||
throw IllegalArgumentException("clientId cannot be longer than $maxClientIdLength characters")
|
||||
} else {
|
||||
smm.deliverExternalEvent(event)
|
||||
}
|
||||
return event.future
|
||||
}
|
||||
|
||||
override fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext): CordaFuture<FlowStateMachine<T>> {
|
||||
override fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext): CordaFuture<out FlowStateMachineHandle<T>> {
|
||||
val startFlowEvent = object : ExternalEvent.ExternalStartFlowEvent<T>, DeduplicationHandler {
|
||||
override fun insideDatabaseTransaction() {}
|
||||
|
||||
@ -1329,12 +1345,12 @@ class FlowStarterImpl(private val smm: StateMachineManager, private val flowLogi
|
||||
override val context: InvocationContext
|
||||
get() = context
|
||||
|
||||
override fun wireUpFuture(flowFuture: CordaFuture<FlowStateMachine<T>>) {
|
||||
override fun wireUpFuture(flowFuture: CordaFuture<out FlowStateMachineHandle<T>>) {
|
||||
_future.captureLater(flowFuture)
|
||||
}
|
||||
|
||||
private val _future = openFuture<FlowStateMachine<T>>()
|
||||
override val future: CordaFuture<FlowStateMachine<T>>
|
||||
private val _future = openFuture<FlowStateMachineHandle<T>>()
|
||||
override val future: CordaFuture<FlowStateMachineHandle<T>>
|
||||
get() = _future
|
||||
}
|
||||
return startFlow(startFlowEvent)
|
||||
@ -1343,7 +1359,7 @@ class FlowStarterImpl(private val smm: StateMachineManager, private val flowLogi
|
||||
override fun <T> invokeFlowAsync(
|
||||
logicType: Class<out FlowLogic<T>>,
|
||||
context: InvocationContext,
|
||||
vararg args: Any?): CordaFuture<FlowStateMachine<T>> {
|
||||
vararg args: Any?): CordaFuture<out FlowStateMachineHandle<T>> {
|
||||
val logicRef = flowLogicRefFactory.createForRPC(logicType, *args)
|
||||
val logic: FlowLogic<T> = uncheckedCast(flowLogicRefFactory.toFlowLogic(logicRef))
|
||||
return startFlow(logic, context)
|
||||
@ -1352,13 +1368,15 @@ class FlowStarterImpl(private val smm: StateMachineManager, private val flowLogi
|
||||
|
||||
class ConfigurationException(message: String) : CordaException(message)
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
fun createCordaPersistence(databaseConfig: DatabaseConfig,
|
||||
wellKnownPartyFromX500Name: (CordaX500Name) -> Party?,
|
||||
wellKnownPartyFromAnonymous: (AbstractParty) -> Party?,
|
||||
schemaService: SchemaService,
|
||||
hikariProperties: Properties,
|
||||
cacheFactory: NamedCacheFactory,
|
||||
customClassLoader: ClassLoader?): CordaPersistence {
|
||||
customClassLoader: ClassLoader?,
|
||||
allowHibernateToManageAppSchema: Boolean = false): CordaPersistence {
|
||||
// Register the AbstractPartyDescriptor so Hibernate doesn't warn when encountering AbstractParty. Unfortunately
|
||||
// Hibernate warns about not being able to find a descriptor if we don't provide one, but won't use it by default
|
||||
// so we end up providing both descriptor and converter. We should re-examine this in later versions to see if
|
||||
@ -1369,25 +1387,31 @@ fun createCordaPersistence(databaseConfig: DatabaseConfig,
|
||||
|
||||
val jdbcUrl = hikariProperties.getProperty("dataSource.url", "")
|
||||
return CordaPersistence(
|
||||
databaseConfig,
|
||||
schemaService.schemas,
|
||||
jdbcUrl,
|
||||
cacheFactory,
|
||||
attributeConverters, customClassLoader,
|
||||
errorHandler = { e ->
|
||||
// "corrupting" a DatabaseTransaction only inside a flow state machine execution
|
||||
FlowStateMachineImpl.currentStateMachine()?.let {
|
||||
// register only the very first exception thrown throughout a chain of logical transactions
|
||||
setException(e)
|
||||
}
|
||||
})
|
||||
databaseConfig.exportHibernateJMXStatistics,
|
||||
schemaService.schemas,
|
||||
jdbcUrl,
|
||||
cacheFactory,
|
||||
attributeConverters, customClassLoader,
|
||||
errorHandler = { e ->
|
||||
// "corrupting" a DatabaseTransaction only inside a flow state machine execution
|
||||
FlowStateMachineImpl.currentStateMachine()?.let {
|
||||
// register only the very first exception thrown throughout a chain of logical transactions
|
||||
setException(e)
|
||||
}
|
||||
},
|
||||
allowHibernateToManageAppSchema = allowHibernateToManageAppSchema)
|
||||
}
|
||||
|
||||
fun CordaPersistence.startHikariPool(hikariProperties: Properties, databaseConfig: DatabaseConfig, schemas: Set<MappedSchema>, metricRegistry: MetricRegistry? = null, cordappLoader: CordappLoader? = null, currentDir: Path? = null, ourName: CordaX500Name) {
|
||||
@Suppress("ThrowsCount")
|
||||
fun CordaPersistence.startHikariPool(
|
||||
hikariProperties: Properties,
|
||||
metricRegistry: MetricRegistry? = null,
|
||||
schemaMigration: (DataSource, Boolean) -> Unit) {
|
||||
try {
|
||||
val dataSource = DataSourceFactory.createDataSource(hikariProperties, metricRegistry = metricRegistry)
|
||||
val schemaMigration = SchemaMigration(schemas, dataSource, databaseConfig, cordappLoader, currentDir, ourName)
|
||||
schemaMigration.nodeStartup(dataSource.connection.use { DBCheckpointStorage.getCheckpointCount(it) != 0L })
|
||||
val haveCheckpoints = dataSource.connection.use { DBCheckpointStorage.getCheckpointCount(it) != 0L }
|
||||
|
||||
schemaMigration(dataSource, haveCheckpoints)
|
||||
start(dataSource)
|
||||
} catch (ex: Exception) {
|
||||
when {
|
||||
@ -1411,6 +1435,14 @@ fun CordaPersistence.startHikariPool(hikariProperties: Properties, databaseConfi
|
||||
}
|
||||
}
|
||||
|
||||
fun SchemaMigration.checkOrUpdate(schemas: Set<MappedSchema>, update: Boolean, haveCheckpoints: Boolean, forceThrowOnMissingMigration: Boolean): SchemaMigration {
|
||||
if (update)
|
||||
this.runMigration(haveCheckpoints, schemas, forceThrowOnMissingMigration)
|
||||
else
|
||||
this.checkState(schemas, forceThrowOnMissingMigration)
|
||||
return this
|
||||
}
|
||||
|
||||
fun clientSslOptionsCompatibleWith(nodeRpcOptions: NodeRpcOptions): ClientRpcSslOptions? {
|
||||
|
||||
if (!nodeRpcOptions.useSsl || nodeRpcOptions.sslConfig == null) {
|
||||
|
@ -3,7 +3,7 @@ package net.corda.node.internal
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StartableByService
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.FlowStateMachineHandle
|
||||
import net.corda.core.internal.concurrent.doneFuture
|
||||
import net.corda.core.messaging.FlowHandle
|
||||
import net.corda.core.messaging.FlowHandleImpl
|
||||
@ -78,7 +78,7 @@ internal class AppServiceHubImpl<T : SerializeAsToken>(private val serviceHub: S
|
||||
return FlowProgressHandleImpl(
|
||||
id = stateMachine.id,
|
||||
returnValue = stateMachine.resultFuture,
|
||||
progress = stateMachine.logic.track()?.updates ?: Observable.empty()
|
||||
progress = stateMachine.logic?.track()?.updates ?: Observable.empty()
|
||||
)
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ internal class AppServiceHubImpl<T : SerializeAsToken>(private val serviceHub: S
|
||||
}
|
||||
}
|
||||
|
||||
private fun <T> startFlowChecked(flow: FlowLogic<T>): FlowStateMachine<T> {
|
||||
private fun <T> startFlowChecked(flow: FlowLogic<T>): FlowStateMachineHandle<T> {
|
||||
val logicType = flow.javaClass
|
||||
require(logicType.isAnnotationPresent(StartableByService::class.java)) { "${logicType.name} was not designed for starting by a CordaService" }
|
||||
// TODO check service permissions
|
||||
|
@ -19,7 +19,7 @@ import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.AttachmentTrustInfo
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.FlowStateMachineHandle
|
||||
import net.corda.core.internal.RPC_UPLOADER
|
||||
import net.corda.core.internal.STRUCTURAL_STEP_PREFIX
|
||||
import net.corda.core.internal.messaging.InternalCordaRPCOps
|
||||
@ -27,6 +27,8 @@ import net.corda.core.internal.sign
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.messaging.FlowHandle
|
||||
import net.corda.core.messaging.FlowHandleImpl
|
||||
import net.corda.core.messaging.FlowHandleWithClientId
|
||||
import net.corda.core.messaging.FlowHandleWithClientIdImpl
|
||||
import net.corda.core.messaging.FlowProgressHandle
|
||||
import net.corda.core.messaging.FlowProgressHandleImpl
|
||||
import net.corda.core.messaging.ParametersUpdateInfo
|
||||
@ -170,6 +172,14 @@ internal class CordaRPCOpsImpl(
|
||||
|
||||
override fun killFlow(id: StateMachineRunId): Boolean = smm.killFlow(id)
|
||||
|
||||
override fun <T> reattachFlowWithClientId(clientId: String): FlowHandleWithClientId<T>? {
|
||||
return smm.reattachFlowWithClientId<T>(clientId)?.run {
|
||||
FlowHandleWithClientIdImpl(id = id, returnValue = resultFuture, clientId = clientId)
|
||||
}
|
||||
}
|
||||
|
||||
override fun removeClientId(clientId: String): Boolean = smm.removeClientId(clientId)
|
||||
|
||||
override fun stateMachinesFeed(): DataFeed<List<StateMachineInfo>, StateMachineUpdate> {
|
||||
|
||||
val (allStateMachines, changes) = smm.track()
|
||||
@ -236,27 +246,38 @@ internal class CordaRPCOpsImpl(
|
||||
}
|
||||
|
||||
override fun <T> startTrackedFlowDynamic(logicType: Class<out FlowLogic<T>>, vararg args: Any?): FlowProgressHandle<T> {
|
||||
val stateMachine = startFlow(logicType, args)
|
||||
val stateMachine = startFlow(logicType, context(), args)
|
||||
return FlowProgressHandleImpl(
|
||||
id = stateMachine.id,
|
||||
returnValue = stateMachine.resultFuture,
|
||||
progress = stateMachine.logic.track()?.updates?.filter { !it.startsWith(STRUCTURAL_STEP_PREFIX) } ?: Observable.empty(),
|
||||
stepsTreeIndexFeed = stateMachine.logic.trackStepsTreeIndex(),
|
||||
stepsTreeFeed = stateMachine.logic.trackStepsTree()
|
||||
progress = stateMachine.logic?.track()?.updates?.filter { !it.startsWith(STRUCTURAL_STEP_PREFIX) } ?: Observable.empty(),
|
||||
stepsTreeIndexFeed = stateMachine.logic?.trackStepsTreeIndex(),
|
||||
stepsTreeFeed = stateMachine.logic?.trackStepsTree()
|
||||
)
|
||||
}
|
||||
|
||||
override fun <T> startFlowDynamic(logicType: Class<out FlowLogic<T>>, vararg args: Any?): FlowHandle<T> {
|
||||
val stateMachine = startFlow(logicType, args)
|
||||
val stateMachine = startFlow(logicType, context(), args)
|
||||
return FlowHandleImpl(id = stateMachine.id, returnValue = stateMachine.resultFuture)
|
||||
}
|
||||
|
||||
private fun <T> startFlow(logicType: Class<out FlowLogic<T>>, args: Array<out Any?>): FlowStateMachine<T> {
|
||||
override fun <T> startFlowDynamicWithClientId(
|
||||
clientId: String,
|
||||
logicType: Class<out FlowLogic<T>>,
|
||||
vararg args: Any?
|
||||
): FlowHandleWithClientId<T> {
|
||||
return startFlow(logicType, context().withClientId(clientId), args).run {
|
||||
FlowHandleWithClientIdImpl(id = id, returnValue = resultFuture, clientId = clientId)
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("SpreadOperator")
|
||||
private fun <T> startFlow(logicType: Class<out FlowLogic<T>>, context: InvocationContext, args: Array<out Any?>): FlowStateMachineHandle<T> {
|
||||
if (!logicType.isAnnotationPresent(StartableByRPC::class.java)) throw NonRpcFlowException(logicType)
|
||||
if (isFlowsDrainingModeEnabled()) {
|
||||
throw RejectedCommandException("Node is draining before shutdown. Cannot start new flows through RPC.")
|
||||
}
|
||||
return flowStarter.invokeFlowAsync(logicType, context(), *args).getOrThrow()
|
||||
return flowStarter.invokeFlowAsync(logicType, context, *args).getOrThrow()
|
||||
}
|
||||
|
||||
override fun attachmentExists(id: SecureHash): Boolean {
|
||||
@ -464,4 +485,6 @@ internal class CordaRPCOpsImpl(
|
||||
private inline fun <reified TARGET> Class<*>.checkIsA() {
|
||||
require(TARGET::class.java.isAssignableFrom(this)) { "$name is not a ${TARGET::class.java.name}" }
|
||||
}
|
||||
|
||||
private fun InvocationContext.withClientId(clientId: String) = copy(clientId = clientId)
|
||||
}
|
||||
|
@ -125,7 +125,8 @@ open class Node(configuration: NodeConfiguration,
|
||||
flowManager: FlowManager = NodeFlowManager(configuration.flowOverrides),
|
||||
cacheFactoryPrototype: BindableNamedCacheFactory = DefaultNamedCacheFactory(),
|
||||
djvmBootstrapSource: ApiSource = createBootstrapSource(configuration),
|
||||
djvmCordaSource: UserSource? = createCordaSource(configuration)
|
||||
djvmCordaSource: UserSource? = createCordaSource(configuration),
|
||||
allowHibernateToManageAppSchema: Boolean = false
|
||||
) : AbstractNode<NodeInfo>(
|
||||
configuration,
|
||||
createClock(configuration),
|
||||
@ -135,7 +136,8 @@ open class Node(configuration: NodeConfiguration,
|
||||
// Under normal (non-test execution) it will always be "1"
|
||||
AffinityExecutor.ServiceAffinityExecutor("Node thread-${sameVmNodeCounter.incrementAndGet()}", 1),
|
||||
djvmBootstrapSource = djvmBootstrapSource,
|
||||
djvmCordaSource = djvmCordaSource
|
||||
djvmCordaSource = djvmCordaSource,
|
||||
allowHibernateToManageAppSchema = allowHibernateToManageAppSchema
|
||||
) {
|
||||
|
||||
override fun createStartedNode(nodeInfo: NodeInfo, rpcOps: CordaRPCOps, notaryService: NotaryService?): NodeInfo =
|
||||
@ -524,12 +526,7 @@ open class Node(configuration: NodeConfiguration,
|
||||
}
|
||||
val databaseName = databaseUrl.removePrefix(h2Prefix).substringBefore(';')
|
||||
val baseDir = Paths.get(databaseName).parent.toString()
|
||||
val server = org.h2.tools.Server.createTcpServer(
|
||||
"-tcpPort", effectiveH2Settings.address.port.toString(),
|
||||
"-tcpAllowOthers",
|
||||
"-tcpDaemon",
|
||||
"-baseDir", baseDir,
|
||||
"-key", "node", databaseName)
|
||||
val server = createH2Server(baseDir, databaseName, effectiveH2Settings.address.port)
|
||||
// override interface that createTcpServer listens on (which is always 0.0.0.0)
|
||||
System.setProperty("h2.bindAddress", effectiveH2Settings.address.host)
|
||||
runOnStop += server::stop
|
||||
@ -551,6 +548,14 @@ open class Node(configuration: NodeConfiguration,
|
||||
database.closeOnStop()
|
||||
}
|
||||
|
||||
open fun createH2Server(baseDir: String, databaseName: String, port: Int): org.h2.tools.Server =
|
||||
org.h2.tools.Server.createTcpServer(
|
||||
"-tcpPort", port.toString(),
|
||||
"-tcpAllowOthers",
|
||||
"-tcpDaemon",
|
||||
"-baseDir", baseDir,
|
||||
"-key", "node", databaseName)
|
||||
|
||||
private val _startupComplete = openFuture<Unit>()
|
||||
val startupComplete: CordaFuture<Unit> get() = _startupComplete
|
||||
|
||||
@ -559,6 +564,16 @@ open class Node(configuration: NodeConfiguration,
|
||||
return super.generateAndSaveNodeInfo()
|
||||
}
|
||||
|
||||
override fun runDatabaseMigrationScripts(
|
||||
updateCoreSchemas: Boolean,
|
||||
updateAppSchemas: Boolean,
|
||||
updateAppSchemasWithCheckpoints: Boolean) {
|
||||
if (allowHibernateToManageAppSchema) {
|
||||
initialiseSerialization()
|
||||
}
|
||||
super.runDatabaseMigrationScripts(updateCoreSchemas, updateAppSchemas, updateAppSchemasWithCheckpoints)
|
||||
}
|
||||
|
||||
override fun start(): NodeInfo {
|
||||
registerDefaultExceptionHandler()
|
||||
initialiseSerialization()
|
||||
|
@ -0,0 +1,76 @@
|
||||
package net.corda.node.internal
|
||||
|
||||
import net.corda.core.utilities.loggerFor
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.configureWithDevSSLCertificate
|
||||
import net.corda.nodeapi.internal.config.CertificateStore
|
||||
import net.corda.nodeapi.internal.crypto.X509Utilities
|
||||
import net.corda.nodeapi.internal.cryptoservice.CryptoService
|
||||
import net.corda.nodeapi.internal.cryptoservice.bouncycastle.BCCryptoService
|
||||
import java.io.IOException
|
||||
import java.security.KeyStoreException
|
||||
import java.security.cert.X509Certificate
|
||||
|
||||
private data class AllCertificateStores(val trustStore: CertificateStore, val sslKeyStore: CertificateStore, val identitiesKeyStore: CertificateStore)
|
||||
|
||||
|
||||
internal fun NodeConfiguration.initKeyStores(cryptoService: CryptoService): X509Certificate {
|
||||
if (devMode) {
|
||||
configureWithDevSSLCertificate(cryptoService)
|
||||
// configureWithDevSSLCertificate is a devMode process that writes directly to keystore files, so
|
||||
// we should re-synchronise BCCryptoService with the updated keystore file.
|
||||
if (cryptoService is BCCryptoService) {
|
||||
cryptoService.resyncKeystore()
|
||||
}
|
||||
}
|
||||
return validateKeyStores()
|
||||
}
|
||||
|
||||
private fun NodeConfiguration.validateKeyStores(): X509Certificate {
|
||||
// Step 1. Check trustStore, sslKeyStore and identitiesKeyStore exist.
|
||||
val certStores = try {
|
||||
requireNotNull(getCertificateStores()) {
|
||||
"One or more keyStores (identity or TLS) or trustStore not found. " +
|
||||
"Please either copy your existing keys and certificates from another node, " +
|
||||
"or if you don't have one yet, fill out the config file and run corda.jar initial-registration."
|
||||
}
|
||||
} catch (e: KeyStoreException) {
|
||||
throw IllegalArgumentException("At least one of the keystores or truststore passwords does not match configuration.")
|
||||
}
|
||||
// Step 2. Check that trustStore contains the correct key-alias entry.
|
||||
require(X509Utilities.CORDA_ROOT_CA in certStores.trustStore) {
|
||||
"Alias for trustRoot key not found. Please ensure you have an updated trustStore file."
|
||||
}
|
||||
// Step 3. Check that tls keyStore contains the correct key-alias entry.
|
||||
require(X509Utilities.CORDA_CLIENT_TLS in certStores.sslKeyStore) {
|
||||
"Alias for TLS key not found. Please ensure you have an updated TLS keyStore file."
|
||||
}
|
||||
|
||||
// Step 4. Check that identity keyStores contain the correct key-alias entry for Node CA.
|
||||
require(X509Utilities.CORDA_CLIENT_CA in certStores.identitiesKeyStore) {
|
||||
"Alias for Node CA key not found. Please ensure you have an updated identity keyStore file."
|
||||
}
|
||||
|
||||
// Step 5. Check all cert paths chain to the trusted root.
|
||||
val trustRoot = certStores.trustStore[X509Utilities.CORDA_ROOT_CA]
|
||||
val sslCertChainRoot = certStores.sslKeyStore.query { getCertificateChain(X509Utilities.CORDA_CLIENT_TLS) }.last()
|
||||
val nodeCaCertChainRoot = certStores.identitiesKeyStore.query { getCertificateChain(X509Utilities.CORDA_CLIENT_CA) }.last()
|
||||
|
||||
require(sslCertChainRoot == trustRoot) { "TLS certificate must chain to the trusted root." }
|
||||
require(nodeCaCertChainRoot == trustRoot) { "Client CA certificate must chain to the trusted root." }
|
||||
|
||||
return trustRoot
|
||||
}
|
||||
|
||||
private fun NodeConfiguration.getCertificateStores(): AllCertificateStores? {
|
||||
return try {
|
||||
// The following will throw IOException if key file not found or KeyStoreException if keystore password is incorrect.
|
||||
val sslKeyStore = p2pSslOptions.keyStore.get()
|
||||
val signingCertificateStore = signingCertificateStore.get()
|
||||
val trustStore = p2pSslOptions.trustStore.get()
|
||||
AllCertificateStores(trustStore, sslKeyStore, signingCertificateStore)
|
||||
} catch (e: IOException) {
|
||||
loggerFor<NodeConfiguration>().error("IO exception while trying to validate keystores and truststore", e)
|
||||
null
|
||||
}
|
||||
}
|
@ -76,10 +76,18 @@ open class NodeStartupCli : CordaCliWrapper("corda", "Runs a Corda Node") {
|
||||
private val justGenerateRpcSslCertsCli by lazy { GenerateRpcSslCertsCli(startup) }
|
||||
private val initialRegistrationCli by lazy { InitialRegistrationCli(startup) }
|
||||
private val validateConfigurationCli by lazy { ValidateConfigurationCli() }
|
||||
private val runMigrationScriptsCli by lazy { RunMigrationScriptsCli(startup) }
|
||||
private val synchroniseAppSchemasCli by lazy { SynchroniseSchemasCli(startup) }
|
||||
|
||||
override fun initLogging(): Boolean = this.initLogging(cmdLineOptions.baseDirectory)
|
||||
|
||||
override fun additionalSubCommands() = setOf(networkCacheCli, justGenerateNodeInfoCli, justGenerateRpcSslCertsCli, initialRegistrationCli, validateConfigurationCli)
|
||||
override fun additionalSubCommands() = setOf(networkCacheCli,
|
||||
justGenerateNodeInfoCli,
|
||||
justGenerateRpcSslCertsCli,
|
||||
initialRegistrationCli,
|
||||
validateConfigurationCli,
|
||||
runMigrationScriptsCli,
|
||||
synchroniseAppSchemasCli)
|
||||
|
||||
override fun call(): Int {
|
||||
if (!validateBaseDirectory()) {
|
||||
@ -201,7 +209,7 @@ open class NodeStartup : NodeStartupLogging {
|
||||
|
||||
protected open fun preNetworkRegistration(conf: NodeConfiguration) = Unit
|
||||
|
||||
open fun createNode(conf: NodeConfiguration, versionInfo: VersionInfo): Node = Node(conf, versionInfo)
|
||||
open fun createNode(conf: NodeConfiguration, versionInfo: VersionInfo): Node = Node(conf, versionInfo, allowHibernateToManageAppSchema = cmdLineOptions.allowHibernateToManageAppSchema)
|
||||
|
||||
fun startNode(node: Node, startTime: Long) {
|
||||
if (node.configuration.devMode) {
|
||||
|
@ -0,0 +1,29 @@
|
||||
package net.corda.node.internal.subcommands
|
||||
|
||||
import net.corda.node.internal.Node
|
||||
import net.corda.node.internal.NodeCliCommand
|
||||
import net.corda.node.internal.NodeStartup
|
||||
import net.corda.node.internal.RunAfterNodeInitialisation
|
||||
import picocli.CommandLine
|
||||
|
||||
class RunMigrationScriptsCli(startup: NodeStartup) : NodeCliCommand("run-migration-scripts", "Run the database migration scripts and create or update schemas", startup) {
|
||||
@CommandLine.Option(names = ["--core-schemas"], description = ["Manage the core/node schemas"])
|
||||
var updateCoreSchemas: Boolean = false
|
||||
|
||||
@CommandLine.Option(names = ["--app-schemas"], description = ["Manage the CorDapp schemas"])
|
||||
var updateAppSchemas: Boolean = false
|
||||
|
||||
@CommandLine.Option(names = ["--update-app-schema-with-checkpoints"], description = ["Allow updating app schema even if there are suspended flows"])
|
||||
var updateAppSchemaWithCheckpoints: Boolean = false
|
||||
|
||||
|
||||
|
||||
override fun runProgram(): Int {
|
||||
require(updateAppSchemas || updateCoreSchemas) { "Nothing to do: at least one of --core-schemas or --app-schemas must be set" }
|
||||
return startup.initialiseAndRun(cmdLineOptions, object : RunAfterNodeInitialisation {
|
||||
override fun run(node: Node) {
|
||||
node.runDatabaseMigrationScripts(updateCoreSchemas, updateAppSchemas, updateAppSchemaWithCheckpoints)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
package net.corda.node.internal.subcommands
|
||||
|
||||
import net.corda.node.internal.Node
|
||||
import net.corda.node.internal.NodeCliCommand
|
||||
import net.corda.node.internal.NodeStartup
|
||||
import net.corda.node.internal.RunAfterNodeInitialisation
|
||||
|
||||
class SynchroniseSchemasCli(startup: NodeStartup) : NodeCliCommand("sync-app-schemas", "Create changelog entries for liquibase files found in CorDapps", startup) {
|
||||
override fun runProgram(): Int {
|
||||
return startup.initialiseAndRun(cmdLineOptions, object : RunAfterNodeInitialisation {
|
||||
override fun run(node: Node) {
|
||||
node.runSchemaSync()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -10,9 +10,11 @@ import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.node.SimpleClock
|
||||
import net.corda.node.services.identity.PersistentIdentityService
|
||||
import net.corda.node.services.persistence.*
|
||||
import net.corda.node.services.persistence.AbstractPartyToX500NameAsStringConverter
|
||||
import net.corda.node.services.persistence.DBTransactionStorage
|
||||
import net.corda.node.services.persistence.NodeAttachmentService
|
||||
import net.corda.node.services.persistence.PublicKeyToTextConverter
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import net.corda.nodeapi.internal.persistence.SchemaMigration.Companion.NODE_X500_NAME
|
||||
import java.io.PrintWriter
|
||||
import java.sql.Connection
|
||||
@ -74,7 +76,6 @@ abstract class CordaMigration : CustomTaskChange {
|
||||
cacheFactory: MigrationNamedCacheFactory,
|
||||
identityService: PersistentIdentityService,
|
||||
schema: Set<MappedSchema>): CordaPersistence {
|
||||
val configDefaults = DatabaseConfig()
|
||||
val attributeConverters = listOf(
|
||||
PublicKeyToTextConverter(),
|
||||
AbstractPartyToX500NameAsStringConverter(
|
||||
@ -83,7 +84,7 @@ abstract class CordaMigration : CustomTaskChange {
|
||||
)
|
||||
// Liquibase handles closing the database connection when migrations are finished. If the connection is closed here, then further
|
||||
// migrations may fail.
|
||||
return CordaPersistence(configDefaults, schema, jdbcUrl, cacheFactory, attributeConverters, closeConnection = false)
|
||||
return CordaPersistence(false, schema, jdbcUrl, cacheFactory, attributeConverters, closeConnection = false)
|
||||
}
|
||||
|
||||
override fun validate(database: Database?): ValidationErrors? {
|
||||
|
@ -4,6 +4,7 @@ import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.node.services.statemachine.Checkpoint
|
||||
import net.corda.node.services.statemachine.CheckpointState
|
||||
import net.corda.node.services.statemachine.FlowResultMetadata
|
||||
import net.corda.node.services.statemachine.FlowState
|
||||
import java.util.stream.Stream
|
||||
|
||||
@ -23,6 +24,16 @@ interface CheckpointStorage {
|
||||
fun updateCheckpoint(id: StateMachineRunId, checkpoint: Checkpoint, serializedFlowState: SerializedBytes<FlowState>?,
|
||||
serializedCheckpointState: SerializedBytes<CheckpointState>)
|
||||
|
||||
/**
|
||||
* Update an existing checkpoints status ([Checkpoint.status]).
|
||||
*/
|
||||
fun updateStatus(runId: StateMachineRunId, flowStatus: Checkpoint.FlowStatus)
|
||||
|
||||
/**
|
||||
* Update an existing checkpoints compatibility flag ([Checkpoint.compatible]).
|
||||
*/
|
||||
fun updateCompatible(runId: StateMachineRunId, compatible: Boolean)
|
||||
|
||||
/**
|
||||
* Update all persisted checkpoints with status [Checkpoint.FlowStatus.RUNNABLE] or [Checkpoint.FlowStatus.HOSPITALIZED],
|
||||
* changing the status to [Checkpoint.FlowStatus.PAUSED].
|
||||
@ -31,9 +42,12 @@ interface CheckpointStorage {
|
||||
|
||||
/**
|
||||
* Remove existing checkpoint from the store.
|
||||
*
|
||||
* [mayHavePersistentResults] is used for optimization. If set to [false] it will not attempt to delete the database result or the database exception.
|
||||
* Please note that if there is a doubt on whether a flow could be finished or not [mayHavePersistentResults] should be set to [true].
|
||||
* @return whether the id matched a checkpoint that was removed.
|
||||
*/
|
||||
fun removeCheckpoint(id: StateMachineRunId): Boolean
|
||||
fun removeCheckpoint(id: StateMachineRunId, mayHavePersistentResults: Boolean = true): Boolean
|
||||
|
||||
/**
|
||||
* Load an existing checkpoint from the store.
|
||||
@ -66,5 +80,19 @@ interface CheckpointStorage {
|
||||
*/
|
||||
fun getPausedCheckpoints(): Stream<Pair<StateMachineRunId, Checkpoint.Serialized>>
|
||||
|
||||
fun updateStatus(runId: StateMachineRunId, flowStatus: Checkpoint.FlowStatus)
|
||||
fun getFinishedFlowsResultsMetadata(): Stream<Pair<StateMachineRunId, FlowResultMetadata>>
|
||||
|
||||
/**
|
||||
* Load a flow result from the store. If [throwIfMissing] is true then it throws an [IllegalStateException]
|
||||
* if the flow result is missing in the database.
|
||||
*/
|
||||
fun getFlowResult(id: StateMachineRunId, throwIfMissing: Boolean = false): Any?
|
||||
|
||||
/**
|
||||
* Load a flow exception from the store. If [throwIfMissing] is true then it throws an [IllegalStateException]
|
||||
* if the flow exception is missing in the database.
|
||||
*/
|
||||
fun getFlowException(id: StateMachineRunId, throwIfMissing: Boolean = false): Any?
|
||||
|
||||
fun removeFlowException(id: StateMachineRunId): Boolean
|
||||
}
|
||||
|
@ -215,13 +215,13 @@ interface FlowStarter {
|
||||
* just synthesizes an [ExternalEvent.ExternalStartFlowEvent] and calls the method below.
|
||||
* @param context indicates who started the flow, see: [InvocationContext].
|
||||
*/
|
||||
fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext): CordaFuture<FlowStateMachine<T>>
|
||||
fun <T> startFlow(logic: FlowLogic<T>, context: InvocationContext): CordaFuture<out FlowStateMachineHandle<T>>
|
||||
|
||||
/**
|
||||
* Starts a flow as described by an [ExternalEvent.ExternalStartFlowEvent]. If a transient error
|
||||
* occurs during invocation, it will re-attempt to start the flow.
|
||||
*/
|
||||
fun <T> startFlow(event: ExternalEvent.ExternalStartFlowEvent<T>): CordaFuture<FlowStateMachine<T>>
|
||||
fun <T> startFlow(event: ExternalEvent.ExternalStartFlowEvent<T>): CordaFuture<out FlowStateMachineHandle<T>>
|
||||
|
||||
/**
|
||||
* Will check [logicType] and [args] against a whitelist and if acceptable then construct and initiate the flow.
|
||||
@ -232,9 +232,10 @@ interface FlowStarter {
|
||||
* [logicType] or [args].
|
||||
*/
|
||||
fun <T> invokeFlowAsync(
|
||||
logicType: Class<out FlowLogic<T>>,
|
||||
context: InvocationContext,
|
||||
vararg args: Any?): CordaFuture<FlowStateMachine<T>>
|
||||
logicType: Class<out FlowLogic<T>>,
|
||||
context: InvocationContext,
|
||||
vararg args: Any?
|
||||
): CordaFuture<out FlowStateMachineHandle<T>>
|
||||
}
|
||||
|
||||
interface StartedNodeServices : ServiceHubInternal, FlowStarter
|
||||
|
@ -15,7 +15,6 @@ import net.corda.nodeapi.internal.config.MutualSslConfiguration
|
||||
import net.corda.nodeapi.internal.config.SslConfiguration
|
||||
import net.corda.nodeapi.internal.config.User
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import net.corda.nodeapi.internal.persistence.SchemaInitializationType
|
||||
import net.corda.tools.shell.SSHDConfiguration
|
||||
import java.net.URL
|
||||
import java.nio.file.Path
|
||||
@ -132,8 +131,6 @@ data class NodeConfigurationImpl(
|
||||
fun messagingServerExternal(messagingServerAddress: NetworkHostAndPort?) = messagingServerAddress != null
|
||||
|
||||
fun database(devMode: Boolean) = DatabaseConfig(
|
||||
initialiseSchema = devMode,
|
||||
initialiseAppSchema = if(devMode) SchemaInitializationType.UPDATE else SchemaInitializationType.VALIDATE,
|
||||
exportHibernateJMXStatistics = devMode
|
||||
)
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import net.corda.common.validation.internal.Validated.Companion.invalid
|
||||
import net.corda.common.validation.internal.Validated.Companion.valid
|
||||
import net.corda.core.context.AuthServiceId
|
||||
import net.corda.core.internal.notary.NotaryServiceFlow
|
||||
import net.corda.node.internal.ConfigurationException
|
||||
import net.corda.node.services.config.AuthDataSourceType
|
||||
import net.corda.node.services.config.CertChainPolicyConfig
|
||||
import net.corda.node.services.config.CertChainPolicyType
|
||||
@ -44,7 +45,6 @@ import net.corda.nodeapi.BrokerRpcSslOptions
|
||||
import net.corda.nodeapi.internal.config.User
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import net.corda.nodeapi.internal.persistence.TransactionIsolationLevel
|
||||
import net.corda.nodeapi.internal.persistence.SchemaInitializationType
|
||||
import net.corda.notary.experimental.bftsmart.BFTSmartConfig
|
||||
import net.corda.notary.experimental.raft.RaftConfig
|
||||
import net.corda.tools.shell.SSHDConfiguration
|
||||
@ -267,16 +267,32 @@ internal object SSHDConfigurationSpec : Configuration.Specification<SSHDConfigur
|
||||
override fun parseValid(configuration: Config, options: Configuration.Options): Valid<SSHDConfiguration> = attempt<SSHDConfiguration, IllegalArgumentException> { SSHDConfiguration(configuration.withOptions(options)[port]) }
|
||||
}
|
||||
|
||||
enum class SchemaInitializationType{
|
||||
NONE,
|
||||
VALIDATE,
|
||||
UPDATE
|
||||
}
|
||||
|
||||
internal object DatabaseConfigSpec : Configuration.Specification<DatabaseConfig>("DatabaseConfig") {
|
||||
private val initialiseSchema by boolean().optional().withDefaultValue(DatabaseConfig.Defaults.initialiseSchema)
|
||||
private val initialiseAppSchema by enum(SchemaInitializationType::class).optional().withDefaultValue(DatabaseConfig.Defaults.initialiseAppSchema)
|
||||
private val transactionIsolationLevel by enum(TransactionIsolationLevel::class).optional().withDefaultValue(DatabaseConfig.Defaults.transactionIsolationLevel)
|
||||
private val initialiseSchema by boolean().optional()
|
||||
private val initialiseAppSchema by enum(SchemaInitializationType::class).optional()
|
||||
private val transactionIsolationLevel by enum(TransactionIsolationLevel::class).optional()
|
||||
private val exportHibernateJMXStatistics by boolean().optional().withDefaultValue(DatabaseConfig.Defaults.exportHibernateJMXStatistics)
|
||||
private val mappedSchemaCacheSize by long().optional().withDefaultValue(DatabaseConfig.Defaults.mappedSchemaCacheSize)
|
||||
|
||||
override fun parseValid(configuration: Config, options: Configuration.Options): Valid<DatabaseConfig> {
|
||||
if (initialiseSchema.isSpecifiedBy(configuration)){
|
||||
throw ConfigurationException("Unsupported configuration database/initialiseSchema - this option has been removed, please use the run-migration-scripts sub-command or the database management tool to modify schemas")
|
||||
}
|
||||
if (initialiseAppSchema.isSpecifiedBy(configuration)){
|
||||
throw ConfigurationException("Unsupported configuration database/initialiseAppSchema - this option has been removed, please use the run-migration-scripts sub-command or the database management tool to modify schemas")
|
||||
}
|
||||
if (transactionIsolationLevel.isSpecifiedBy(configuration)){
|
||||
throw ConfigurationException("Unsupported configuration database/transactionIsolationLevel - this option has been removed and cannot be changed")
|
||||
}
|
||||
val config = configuration.withOptions(options)
|
||||
return valid(DatabaseConfig(config[initialiseSchema], config[initialiseAppSchema], config[transactionIsolationLevel], config[exportHibernateJMXStatistics], config[mappedSchemaCacheSize]))
|
||||
|
||||
return valid(DatabaseConfig(config[exportHibernateJMXStatistics], config[mappedSchemaCacheSize]))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -258,7 +258,7 @@ class NodeSchedulerService(private val clock: CordaClock,
|
||||
return "${javaClass.simpleName}($scheduledState)"
|
||||
}
|
||||
|
||||
override fun wireUpFuture(flowFuture: CordaFuture<FlowStateMachine<Any?>>) {
|
||||
override fun wireUpFuture(flowFuture: CordaFuture<out FlowStateMachineHandle<Any?>>) {
|
||||
_future.captureLater(flowFuture)
|
||||
val future = _future.flatMap { it.resultFuture }
|
||||
future.then {
|
||||
@ -266,8 +266,8 @@ class NodeSchedulerService(private val clock: CordaClock,
|
||||
}
|
||||
}
|
||||
|
||||
private val _future = openFuture<FlowStateMachine<Any?>>()
|
||||
override val future: CordaFuture<FlowStateMachine<Any?>>
|
||||
private val _future = openFuture<FlowStateMachineHandle<Any?>>()
|
||||
override val future: CordaFuture<FlowStateMachineHandle<Any?>>
|
||||
get() = _future
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,12 @@ internal fun InvocationContext.pushToLoggingContext() {
|
||||
origin.pushToLoggingContext()
|
||||
externalTrace?.pushToLoggingContext("external_")
|
||||
impersonatedActor?.pushToLoggingContext("impersonating_")
|
||||
|
||||
clientId?.let {
|
||||
MDC.getMDCAdapter().apply {
|
||||
put("client_id", it)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
internal fun Trace.pushToLoggingContext(prefix: String = "") {
|
||||
|
@ -6,8 +6,11 @@ import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.internal.PLATFORM_VERSION
|
||||
import net.corda.core.internal.VisibleForTesting
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.flows.ResultSerializationException
|
||||
import net.corda.core.serialization.SerializationDefaults
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.internal.MissingSerializerException
|
||||
import net.corda.core.serialization.serialize
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.services.api.CheckpointStorage
|
||||
@ -15,6 +18,7 @@ import net.corda.node.services.statemachine.Checkpoint
|
||||
import net.corda.node.services.statemachine.Checkpoint.FlowStatus
|
||||
import net.corda.node.services.statemachine.CheckpointState
|
||||
import net.corda.node.services.statemachine.ErrorState
|
||||
import net.corda.node.services.statemachine.FlowResultMetadata
|
||||
import net.corda.node.services.statemachine.FlowState
|
||||
import net.corda.node.services.statemachine.SubFlowVersion
|
||||
import net.corda.nodeapi.internal.persistence.NODE_DATABASE_PREFIX
|
||||
@ -55,9 +59,28 @@ class DBCheckpointStorage(
|
||||
private const val MAX_EXC_TYPE_LENGTH = 256
|
||||
private const val MAX_FLOW_NAME_LENGTH = 128
|
||||
private const val MAX_PROGRESS_STEP_LENGTH = 256
|
||||
const val MAX_CLIENT_ID_LENGTH = 512
|
||||
|
||||
private val RUNNABLE_CHECKPOINTS = setOf(FlowStatus.RUNNABLE, FlowStatus.HOSPITALIZED)
|
||||
|
||||
// This is a dummy [DBFlowMetadata] object which help us whenever we want to persist a [DBFlowCheckpoint], but not persist its [DBFlowMetadata].
|
||||
// [DBFlowCheckpoint] needs to always reference a [DBFlowMetadata] ([DBFlowCheckpoint.flowMetadata] is not nullable).
|
||||
// However, since we do not -hibernate- cascade, it does not get persisted into the database.
|
||||
private val dummyDBFlowMetadata: DBFlowMetadata = DBFlowMetadata(
|
||||
flowId = "dummyFlowId",
|
||||
invocationId = "dummyInvocationId",
|
||||
flowName = "dummyFlowName",
|
||||
userSuppliedIdentifier = "dummyUserSuppliedIdentifier",
|
||||
startType = StartReason.INITIATED,
|
||||
initialParameters = ByteArray(0),
|
||||
launchingCordapp = "dummyLaunchingCordapp",
|
||||
platformVersion = -1,
|
||||
startedBy = "dummyStartedBy",
|
||||
invocationInstant = Instant.now(),
|
||||
startInstant = Instant.now(),
|
||||
finishInstant = null
|
||||
)
|
||||
|
||||
/**
|
||||
* This needs to run before Hibernate is initialised.
|
||||
*
|
||||
@ -137,7 +160,7 @@ class DBCheckpointStorage(
|
||||
var checkpoint: ByteArray = EMPTY_BYTE_ARRAY,
|
||||
|
||||
@Type(type = "corda-blob")
|
||||
@Column(name = "flow_state")
|
||||
@Column(name = "flow_state", nullable = true)
|
||||
var flowStack: ByteArray?,
|
||||
|
||||
@Type(type = "corda-wrapper-binary")
|
||||
@ -184,28 +207,31 @@ class DBCheckpointStorage(
|
||||
var flow_id: String,
|
||||
|
||||
@Type(type = "corda-blob")
|
||||
@Column(name = "result_value", nullable = false)
|
||||
var value: ByteArray = EMPTY_BYTE_ARRAY,
|
||||
@Column(name = "result_value", nullable = true)
|
||||
var value: ByteArray? = null,
|
||||
|
||||
@Column(name = "timestamp")
|
||||
val persistedInstant: Instant
|
||||
) {
|
||||
@Suppress("ComplexMethod")
|
||||
override fun equals(other: Any?): Boolean {
|
||||
if (this === other) return true
|
||||
if (javaClass != other?.javaClass) return false
|
||||
|
||||
other as DBFlowResult
|
||||
|
||||
if (flow_id != other.flow_id) return false
|
||||
if (!value.contentEquals(other.value)) return false
|
||||
val value = value
|
||||
val otherValue = other.value
|
||||
if (value != null) {
|
||||
if (otherValue == null) return false
|
||||
if (!value.contentEquals(otherValue)) return false
|
||||
} else if (otherValue != null) return false
|
||||
if (persistedInstant != other.persistedInstant) return false
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
override fun hashCode(): Int {
|
||||
var result = flow_id.hashCode()
|
||||
result = 31 * result + value.contentHashCode()
|
||||
result = 31 * result + (value?.contentHashCode() ?: 0)
|
||||
result = 31 * result + persistedInstant.hashCode()
|
||||
return result
|
||||
}
|
||||
@ -298,7 +324,7 @@ class DBCheckpointStorage(
|
||||
@Column(name = "invocation_time", nullable = false)
|
||||
var invocationInstant: Instant,
|
||||
|
||||
@Column(name = "start_time", nullable = true)
|
||||
@Column(name = "start_time", nullable = false)
|
||||
var startInstant: Instant,
|
||||
|
||||
@Column(name = "finish_time", nullable = true)
|
||||
@ -362,14 +388,22 @@ class DBCheckpointStorage(
|
||||
now
|
||||
)
|
||||
|
||||
val metadata = createDBFlowMetadata(flowId, checkpoint)
|
||||
val metadata = createDBFlowMetadata(flowId, checkpoint, now)
|
||||
|
||||
val dbFlowException = if (checkpoint.status == FlowStatus.FAILED || checkpoint.status == FlowStatus.HOSPITALIZED) {
|
||||
val errored = checkpoint.errorState as? ErrorState.Errored
|
||||
errored?.let { createDBFlowException(flowId, it, now) }
|
||||
?: throw IllegalStateException("Found '${checkpoint.status}' checkpoint whose error state is not ${ErrorState.Errored::class.java.simpleName}")
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
// Most fields are null as they cannot have been set when creating the initial checkpoint
|
||||
val dbFlowCheckpoint = DBFlowCheckpoint(
|
||||
flowId = flowId,
|
||||
blob = blob,
|
||||
result = null,
|
||||
exceptionDetails = null,
|
||||
exceptionDetails = dbFlowException,
|
||||
flowMetadata = metadata,
|
||||
status = checkpoint.status,
|
||||
compatible = checkpoint.compatible,
|
||||
@ -381,17 +415,28 @@ class DBCheckpointStorage(
|
||||
currentDBSession().save(dbFlowCheckpoint)
|
||||
currentDBSession().save(blob)
|
||||
currentDBSession().save(metadata)
|
||||
dbFlowException?.let { currentDBSession().save(it) }
|
||||
}
|
||||
|
||||
@Suppress("ComplexMethod")
|
||||
override fun updateCheckpoint(
|
||||
id: StateMachineRunId, checkpoint: Checkpoint, serializedFlowState: SerializedBytes<FlowState>?,
|
||||
id: StateMachineRunId,
|
||||
checkpoint: Checkpoint,
|
||||
serializedFlowState: SerializedBytes<FlowState>?,
|
||||
serializedCheckpointState: SerializedBytes<CheckpointState>
|
||||
) {
|
||||
val now = clock.instant()
|
||||
val flowId = id.uuid.toString()
|
||||
|
||||
// Do not update in DB [Checkpoint.checkpointState] or [Checkpoint.flowState] if flow failed or got hospitalized
|
||||
val blob = if (checkpoint.status == FlowStatus.FAILED || checkpoint.status == FlowStatus.HOSPITALIZED) {
|
||||
val blob = if (checkpoint.status == FlowStatus.HOSPITALIZED) {
|
||||
// Do not update 'checkpointState' or 'flowState' if flow hospitalized
|
||||
null
|
||||
} else if (checkpoint.status == FlowStatus.FAILED) {
|
||||
// We need to update only the 'flowState' to null, and we don't want to update the checkpoint state
|
||||
// because we want to retain the last clean checkpoint state, therefore just use a query for that update.
|
||||
val sqlQuery = "Update ${NODE_DATABASE_PREFIX}checkpoint_blobs set flow_state = null where flow_id = '$flowId'"
|
||||
val query = currentDBSession().createNativeQuery(sqlQuery)
|
||||
query.executeUpdate()
|
||||
null
|
||||
} else {
|
||||
checkpointPerformanceRecorder.record(serializedCheckpointState, serializedFlowState)
|
||||
@ -403,18 +448,31 @@ class DBCheckpointStorage(
|
||||
)
|
||||
}
|
||||
|
||||
//This code needs to be added back in when we want to persist the result. For now this requires the result to be @CordaSerializable.
|
||||
//val result = updateDBFlowResult(entity, checkpoint, now)
|
||||
val exceptionDetails = updateDBFlowException(flowId, checkpoint, now)
|
||||
val dbFlowResult = if (checkpoint.status == FlowStatus.COMPLETED) {
|
||||
try {
|
||||
createDBFlowResult(flowId, checkpoint.result, now)
|
||||
} catch (e: MissingSerializerException) {
|
||||
throw ResultSerializationException(e)
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
val metadata = createDBFlowMetadata(flowId, checkpoint)
|
||||
val dbFlowException = if (checkpoint.status == FlowStatus.FAILED || checkpoint.status == FlowStatus.HOSPITALIZED) {
|
||||
val errored = checkpoint.errorState as? ErrorState.Errored
|
||||
errored?.let { createDBFlowException(flowId, it, now) }
|
||||
?: throw IllegalStateException("Found '${checkpoint.status}' checkpoint whose error state is not ${ErrorState.Errored::class.java.simpleName}")
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
// Updates to children entities ([DBFlowCheckpointBlob], [DBFlowResult], [DBFlowException], [DBFlowMetadata]) are not cascaded to children tables.
|
||||
val dbFlowCheckpoint = DBFlowCheckpoint(
|
||||
flowId = flowId,
|
||||
blob = blob,
|
||||
result = null,
|
||||
exceptionDetails = exceptionDetails,
|
||||
flowMetadata = metadata,
|
||||
result = dbFlowResult,
|
||||
exceptionDetails = dbFlowException,
|
||||
flowMetadata = dummyDBFlowMetadata, // [DBFlowMetadata] will only update its 'finish_time' when a checkpoint finishes
|
||||
status = checkpoint.status,
|
||||
compatible = checkpoint.compatible,
|
||||
progressStep = checkpoint.progressStep?.take(MAX_PROGRESS_STEP_LENGTH),
|
||||
@ -424,9 +482,10 @@ class DBCheckpointStorage(
|
||||
|
||||
currentDBSession().update(dbFlowCheckpoint)
|
||||
blob?.let { currentDBSession().update(it) }
|
||||
dbFlowResult?.let { currentDBSession().save(it) }
|
||||
dbFlowException?.let { currentDBSession().save(it) }
|
||||
if (checkpoint.isFinished()) {
|
||||
metadata.finishInstant = now
|
||||
currentDBSession().update(metadata)
|
||||
setDBFlowMetadataFinishTime(flowId, now)
|
||||
}
|
||||
}
|
||||
|
||||
@ -439,17 +498,18 @@ class DBCheckpointStorage(
|
||||
query.executeUpdate()
|
||||
}
|
||||
|
||||
// DBFlowResult and DBFlowException to be integrated with rest of schema
|
||||
@Suppress("MagicNumber")
|
||||
override fun removeCheckpoint(id: StateMachineRunId): Boolean {
|
||||
override fun removeCheckpoint(id: StateMachineRunId, mayHavePersistentResults: Boolean): Boolean {
|
||||
var deletedRows = 0
|
||||
val flowId = id.uuid.toString()
|
||||
deletedRows += deleteRow(DBFlowMetadata::class.java, DBFlowMetadata::flowId.name, flowId)
|
||||
deletedRows += deleteRow(DBFlowCheckpointBlob::class.java, DBFlowCheckpointBlob::flowId.name, flowId)
|
||||
deletedRows += deleteRow(DBFlowCheckpoint::class.java, DBFlowCheckpoint::flowId.name, flowId)
|
||||
// resultId?.let { deletedRows += deleteRow(DBFlowResult::class.java, DBFlowResult::flow_id.name, it.toString()) }
|
||||
// exceptionId?.let { deletedRows += deleteRow(DBFlowException::class.java, DBFlowException::flow_id.name, it.toString()) }
|
||||
return deletedRows == 3
|
||||
deletedRows += deleteRow(DBFlowCheckpointBlob::class.java, DBFlowCheckpointBlob::flowId.name, flowId)
|
||||
if (mayHavePersistentResults) {
|
||||
deletedRows += deleteRow(DBFlowResult::class.java, DBFlowResult::flow_id.name, flowId)
|
||||
deletedRows += deleteRow(DBFlowException::class.java, DBFlowException::flow_id.name, flowId)
|
||||
}
|
||||
deletedRows += deleteRow(DBFlowMetadata::class.java, DBFlowMetadata::flowId.name, flowId)
|
||||
return deletedRows >= 2
|
||||
}
|
||||
|
||||
private fun <T> deleteRow(clazz: Class<T>, pk: String, value: String): Int {
|
||||
@ -487,6 +547,14 @@ class DBCheckpointStorage(
|
||||
return currentDBSession().find(DBFlowCheckpoint::class.java, id.uuid.toString())
|
||||
}
|
||||
|
||||
private fun getDBFlowResult(id: StateMachineRunId): DBFlowResult? {
|
||||
return currentDBSession().find(DBFlowResult::class.java, id.uuid.toString())
|
||||
}
|
||||
|
||||
private fun getDBFlowException(id: StateMachineRunId): DBFlowException? {
|
||||
return currentDBSession().find(DBFlowException::class.java, id.uuid.toString())
|
||||
}
|
||||
|
||||
override fun getPausedCheckpoints(): Stream<Pair<StateMachineRunId, Checkpoint.Serialized>> {
|
||||
val session = currentDBSession()
|
||||
val jpqlQuery = """select new ${DBPausedFields::class.java.name}(checkpoint.id, blob.checkpoint, checkpoint.status,
|
||||
@ -499,12 +567,53 @@ class DBCheckpointStorage(
|
||||
}
|
||||
}
|
||||
|
||||
override fun getFinishedFlowsResultsMetadata(): Stream<Pair<StateMachineRunId, FlowResultMetadata>> {
|
||||
val session = currentDBSession()
|
||||
val jpqlQuery =
|
||||
"""select new ${DBFlowResultMetadataFields::class.java.name}(checkpoint.id, checkpoint.status, metadata.userSuppliedIdentifier)
|
||||
from ${DBFlowCheckpoint::class.java.name} checkpoint
|
||||
join ${DBFlowMetadata::class.java.name} metadata on metadata.id = checkpoint.flowMetadata
|
||||
where checkpoint.status = ${FlowStatus.COMPLETED.ordinal} or checkpoint.status = ${FlowStatus.FAILED.ordinal}""".trimIndent()
|
||||
val query = session.createQuery(jpqlQuery, DBFlowResultMetadataFields::class.java)
|
||||
return query.resultList.stream().map {
|
||||
StateMachineRunId(UUID.fromString(it.id)) to FlowResultMetadata(it.status, it.clientId)
|
||||
}
|
||||
}
|
||||
|
||||
override fun getFlowResult(id: StateMachineRunId, throwIfMissing: Boolean): Any? {
|
||||
val dbFlowResult = getDBFlowResult(id)
|
||||
if (throwIfMissing && dbFlowResult == null) {
|
||||
throw IllegalStateException("Flow's $id result was not found in the database. Something is very wrong.")
|
||||
}
|
||||
val serializedFlowResult = dbFlowResult?.value?.let { SerializedBytes<Any>(it) }
|
||||
return serializedFlowResult?.deserialize(context = SerializationDefaults.STORAGE_CONTEXT)
|
||||
}
|
||||
|
||||
override fun getFlowException(id: StateMachineRunId, throwIfMissing: Boolean): Any? {
|
||||
val dbFlowException = getDBFlowException(id)
|
||||
if (throwIfMissing && dbFlowException == null) {
|
||||
throw IllegalStateException("Flow's $id exception was not found in the database. Something is very wrong.")
|
||||
}
|
||||
val serializedFlowException = dbFlowException?.value?.let { SerializedBytes<Any>(it) }
|
||||
return serializedFlowException?.deserialize(context = SerializationDefaults.STORAGE_CONTEXT)
|
||||
}
|
||||
|
||||
override fun removeFlowException(id: StateMachineRunId): Boolean {
|
||||
val flowId = id.uuid.toString()
|
||||
return deleteRow(DBFlowException::class.java, DBFlowException::flow_id.name, flowId) == 1
|
||||
}
|
||||
|
||||
override fun updateStatus(runId: StateMachineRunId, flowStatus: FlowStatus) {
|
||||
val update = "Update ${NODE_DATABASE_PREFIX}checkpoints set status = ${flowStatus.ordinal} where flow_id = '${runId.uuid}'"
|
||||
currentDBSession().createNativeQuery(update).executeUpdate()
|
||||
}
|
||||
|
||||
private fun createDBFlowMetadata(flowId: String, checkpoint: Checkpoint): DBFlowMetadata {
|
||||
override fun updateCompatible(runId: StateMachineRunId, compatible: Boolean) {
|
||||
val update = "Update ${NODE_DATABASE_PREFIX}checkpoints set compatible = $compatible where flow_id = '${runId.uuid}'"
|
||||
currentDBSession().createNativeQuery(update).executeUpdate()
|
||||
}
|
||||
|
||||
private fun createDBFlowMetadata(flowId: String, checkpoint: Checkpoint, now: Instant): DBFlowMetadata {
|
||||
val context = checkpoint.checkpointState.invocationContext
|
||||
val flowInfo = checkpoint.checkpointState.subFlowStack.first()
|
||||
return DBFlowMetadata(
|
||||
@ -513,15 +622,14 @@ class DBCheckpointStorage(
|
||||
// Truncate the flow name to fit into the database column
|
||||
// Flow names are unlikely to be this long
|
||||
flowName = flowInfo.flowClass.name.take(MAX_FLOW_NAME_LENGTH),
|
||||
// will come from the context
|
||||
userSuppliedIdentifier = null,
|
||||
userSuppliedIdentifier = context.clientId,
|
||||
startType = context.getStartedType(),
|
||||
initialParameters = context.getFlowParameters().storageSerialize().bytes,
|
||||
launchingCordapp = (flowInfo.subFlowVersion as? SubFlowVersion.CorDappFlow)?.corDappName ?: "Core flow",
|
||||
platformVersion = PLATFORM_VERSION,
|
||||
startedBy = context.principal().name,
|
||||
invocationInstant = context.trace.invocationId.timestamp,
|
||||
startInstant = clock.instant(),
|
||||
startInstant = now,
|
||||
finishInstant = null
|
||||
)
|
||||
}
|
||||
@ -541,70 +649,14 @@ class DBCheckpointStorage(
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates, updates or deletes the result related to the current flow/checkpoint.
|
||||
*
|
||||
* This is needed because updates are not cascading via Hibernate, therefore operations must be handled manually.
|
||||
*
|
||||
* A [DBFlowResult] is created if [DBFlowCheckpoint.result] does not exist and the [Checkpoint] has a result..
|
||||
* The existing [DBFlowResult] is updated if [DBFlowCheckpoint.result] exists and the [Checkpoint] has a result.
|
||||
* The existing [DBFlowResult] is deleted if [DBFlowCheckpoint.result] exists and the [Checkpoint] has no result.
|
||||
* Nothing happens if both [DBFlowCheckpoint] and [Checkpoint] do not have a result.
|
||||
*/
|
||||
private fun updateDBFlowResult(flowId: String, entity: DBFlowCheckpoint, checkpoint: Checkpoint, now: Instant): DBFlowResult? {
|
||||
val result = checkpoint.result?.let { createDBFlowResult(flowId, it, now) }
|
||||
if (entity.result != null) {
|
||||
if (result != null) {
|
||||
result.flow_id = entity.result!!.flow_id
|
||||
currentDBSession().update(result)
|
||||
} else {
|
||||
currentDBSession().delete(entity.result)
|
||||
}
|
||||
} else if (result != null) {
|
||||
currentDBSession().save(result)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
private fun createDBFlowResult(flowId: String, result: Any, now: Instant): DBFlowResult {
|
||||
private fun createDBFlowResult(flowId: String, result: Any?, now: Instant): DBFlowResult {
|
||||
return DBFlowResult(
|
||||
flow_id = flowId,
|
||||
value = result.storageSerialize().bytes,
|
||||
value = result?.storageSerialize()?.bytes,
|
||||
persistedInstant = now
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates, updates or deletes the error related to the current flow/checkpoint.
|
||||
*
|
||||
* This is needed because updates are not cascading via Hibernate, therefore operations must be handled manually.
|
||||
*
|
||||
* A [DBFlowException] is created if [DBFlowCheckpoint.exceptionDetails] does not exist and the [Checkpoint] has an error attached to it.
|
||||
* The existing [DBFlowException] is updated if [DBFlowCheckpoint.exceptionDetails] exists and the [Checkpoint] has an error.
|
||||
* The existing [DBFlowException] is deleted if [DBFlowCheckpoint.exceptionDetails] exists and the [Checkpoint] has no error.
|
||||
* Nothing happens if both [DBFlowCheckpoint] and [Checkpoint] are related to no errors.
|
||||
*/
|
||||
// DBFlowException to be integrated with rest of schema
|
||||
// Add a flag notifying if an exception is already saved in the database for below logic (are we going to do this after all?)
|
||||
private fun updateDBFlowException(flowId: String, checkpoint: Checkpoint, now: Instant): DBFlowException? {
|
||||
val exceptionDetails = (checkpoint.errorState as? ErrorState.Errored)?.let { createDBFlowException(flowId, it, now) }
|
||||
// if (checkpoint.dbExoSkeleton.dbFlowExceptionId != null) {
|
||||
// if (exceptionDetails != null) {
|
||||
// exceptionDetails.flow_id = checkpoint.dbExoSkeleton.dbFlowExceptionId!!
|
||||
// currentDBSession().update(exceptionDetails)
|
||||
// } else {
|
||||
// val session = currentDBSession()
|
||||
// val entity = session.get(DBFlowException::class.java, checkpoint.dbExoSkeleton.dbFlowExceptionId)
|
||||
// session.delete(entity)
|
||||
// return null
|
||||
// }
|
||||
// } else if (exceptionDetails != null) {
|
||||
// currentDBSession().save(exceptionDetails)
|
||||
// checkpoint.dbExoSkeleton.dbFlowExceptionId = exceptionDetails.flow_id
|
||||
// }
|
||||
return exceptionDetails
|
||||
}
|
||||
|
||||
private fun createDBFlowException(flowId: String, errorState: ErrorState.Errored, now: Instant): DBFlowException {
|
||||
return errorState.errors.last().exception.let {
|
||||
DBFlowException(
|
||||
@ -612,12 +664,20 @@ class DBCheckpointStorage(
|
||||
type = it::class.java.name.truncate(MAX_EXC_TYPE_LENGTH, true),
|
||||
message = it.message?.truncate(MAX_EXC_MSG_LENGTH, false),
|
||||
stackTrace = it.stackTraceToString(),
|
||||
value = null, // TODO to be populated upon implementing https://r3-cev.atlassian.net/browse/CORDA-3681
|
||||
value = it.storageSerialize().bytes,
|
||||
persistedInstant = now
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
private fun setDBFlowMetadataFinishTime(flowId: String, now: Instant) {
|
||||
val session = currentDBSession()
|
||||
val sqlQuery = "Update ${NODE_DATABASE_PREFIX}flow_metadata set finish_time = '$now' " +
|
||||
"where flow_id = '$flowId'"
|
||||
val query = session.createNativeQuery(sqlQuery)
|
||||
query.executeUpdate()
|
||||
}
|
||||
|
||||
private fun InvocationContext.getStartedType(): StartReason {
|
||||
return when (origin) {
|
||||
is InvocationOrigin.RPC, is InvocationOrigin.Shell -> StartReason.RPC
|
||||
@ -627,10 +687,14 @@ class DBCheckpointStorage(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("MagicNumber")
|
||||
private fun InvocationContext.getFlowParameters(): List<Any?> {
|
||||
// Only RPC flows have parameters which are found in index 1
|
||||
return if (arguments.isNotEmpty()) {
|
||||
uncheckedCast<Any?, Array<Any?>>(arguments[1]).toList()
|
||||
// Only RPC flows have parameters which are found in index 1 or index 2 (if called with client id)
|
||||
return if (arguments!!.isNotEmpty()) {
|
||||
arguments!!.run {
|
||||
check(size == 2 || size == 3) { "Unexpected argument number provided in rpc call" }
|
||||
uncheckedCast<Any?, Array<Any?>>(last()).toList()
|
||||
}
|
||||
} else {
|
||||
emptyList()
|
||||
}
|
||||
@ -644,7 +708,7 @@ class DBCheckpointStorage(
|
||||
// Always load as a [Clean] checkpoint to represent that the checkpoint is the last _good_ checkpoint
|
||||
errorState = ErrorState.Clean,
|
||||
// A checkpoint with a result should not normally be loaded (it should be [null] most of the time)
|
||||
result = result?.let { SerializedBytes<Any>(it.value) },
|
||||
result = result?.let { dbFlowResult -> dbFlowResult.value?.let { SerializedBytes<Any>(it) } },
|
||||
status = status,
|
||||
progressStep = progressStep,
|
||||
flowIoRequest = ioRequestType,
|
||||
@ -675,6 +739,12 @@ class DBCheckpointStorage(
|
||||
}
|
||||
}
|
||||
|
||||
private class DBFlowResultMetadataFields(
|
||||
val id: String,
|
||||
val status: FlowStatus,
|
||||
val clientId: String?
|
||||
)
|
||||
|
||||
private fun <T : Any> T.storageSerialize(): SerializedBytes<T> {
|
||||
return serialize(context = SerializationDefaults.STORAGE_CONTEXT)
|
||||
}
|
||||
|
@ -95,7 +95,9 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
||||
}
|
||||
}
|
||||
|
||||
private companion object {
|
||||
internal companion object {
|
||||
const val TRANSACTION_ALREADY_IN_PROGRESS_WARNING = "trackTransaction is called with an already existing, open DB transaction. As a result, there might be transactions missing from the returned data feed, because of race conditions."
|
||||
|
||||
// Rough estimate for the average of a public key and the transaction metadata - hard to get exact figures here,
|
||||
// as public keys can vary in size a lot, and if someone else is holding a reference to the key, it won't add
|
||||
// to the memory pressure at all here.
|
||||
@ -111,7 +113,7 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
||||
}
|
||||
}
|
||||
|
||||
fun createTransactionsMap(cacheFactory: NamedCacheFactory, clock: CordaClock)
|
||||
private fun createTransactionsMap(cacheFactory: NamedCacheFactory, clock: CordaClock)
|
||||
: AppendOnlyPersistentMapBase<SecureHash, TxCacheValue, DBTransaction, String> {
|
||||
return WeightBasedAppendOnlyPersistentMap<SecureHash, TxCacheValue, DBTransaction, String>(
|
||||
cacheFactory = cacheFactory,
|
||||
@ -221,12 +223,22 @@ class DBTransactionStorage(private val database: CordaPersistence, cacheFactory:
|
||||
}
|
||||
|
||||
override fun trackTransaction(id: SecureHash): CordaFuture<SignedTransaction> {
|
||||
val (transaction, warning) = trackTransactionInternal(id)
|
||||
warning?.also { log.warn(it) }
|
||||
return transaction
|
||||
}
|
||||
|
||||
if (contextTransactionOrNull != null) {
|
||||
log.warn("trackTransaction is called with an already existing, open DB transaction. As a result, there might be transactions missing from the returned data feed, because of race conditions.")
|
||||
/**
|
||||
* @return a pair of the signed transaction, and a string containing any warning.
|
||||
*/
|
||||
internal fun trackTransactionInternal(id: SecureHash): Pair<CordaFuture<SignedTransaction>, String?> {
|
||||
val warning: String? = if (contextTransactionOrNull != null) {
|
||||
TRANSACTION_ALREADY_IN_PROGRESS_WARNING
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
return trackTransactionWithNoWarning(id)
|
||||
return Pair(trackTransactionWithNoWarning(id), warning)
|
||||
}
|
||||
|
||||
override fun trackTransactionWithNoWarning(id: SecureHash): CordaFuture<SignedTransaction> {
|
||||
|
@ -62,13 +62,12 @@ class NodeSchemaService(private val extraSchemas: Set<MappedSchema> = emptySet()
|
||||
NodeInfoSchemaV1,
|
||||
NodeCoreV1)
|
||||
|
||||
fun internalSchemas() = requiredSchemas + extraSchemas.filter { schema ->
|
||||
// when mapped schemas from the finance module are present, they are considered as internal ones
|
||||
schema::class.qualifiedName == "net.corda.finance.schemas.CashSchemaV1" ||
|
||||
schema::class.qualifiedName == "net.corda.finance.schemas.CommercialPaperSchemaV1" ||
|
||||
val internalSchemas = requiredSchemas + extraSchemas.filter { schema ->
|
||||
schema::class.qualifiedName?.startsWith("net.corda.notary.") ?: false
|
||||
}
|
||||
|
||||
val appSchemas = extraSchemas - internalSchemas
|
||||
|
||||
override val schemas: Set<MappedSchema> = requiredSchemas + extraSchemas
|
||||
|
||||
// Currently returns all schemas supported by the state, with no filtering or enrichment.
|
||||
|
@ -58,9 +58,16 @@ sealed class Action {
|
||||
data class PersistCheckpoint(val id: StateMachineRunId, val checkpoint: Checkpoint, val isCheckpointUpdate: Boolean) : Action()
|
||||
|
||||
/**
|
||||
* Remove the checkpoint corresponding to [id].
|
||||
* Update only the [status] of the checkpoint with [id].
|
||||
*/
|
||||
data class RemoveCheckpoint(val id: StateMachineRunId) : Action()
|
||||
data class UpdateFlowStatus(val id: StateMachineRunId, val status: Checkpoint.FlowStatus): Action()
|
||||
|
||||
/**
|
||||
* Remove the checkpoint corresponding to [id]. [mayHavePersistentResults] denotes that at the time of injecting a [RemoveCheckpoint]
|
||||
* the flow could have persisted its database result or exception.
|
||||
* For more information see [CheckpointStorage.removeCheckpoint].
|
||||
*/
|
||||
data class RemoveCheckpoint(val id: StateMachineRunId, val mayHavePersistentResults: Boolean = false) : Action()
|
||||
|
||||
/**
|
||||
* Persist the deduplication facts of [deduplicationHandlers].
|
||||
@ -106,6 +113,11 @@ sealed class Action {
|
||||
val lastState: StateMachineState
|
||||
) : Action()
|
||||
|
||||
/**
|
||||
* Move the flow corresponding to [flowId] to paused.
|
||||
*/
|
||||
data class MoveFlowToPaused(val currentState: StateMachineState) : Action()
|
||||
|
||||
/**
|
||||
* Schedule [event] to self.
|
||||
*/
|
||||
|
@ -67,6 +67,8 @@ internal class ActionExecutorImpl(
|
||||
is Action.RetryFlowFromSafePoint -> executeRetryFlowFromSafePoint(action)
|
||||
is Action.ScheduleFlowTimeout -> scheduleFlowTimeout(action)
|
||||
is Action.CancelFlowTimeout -> cancelFlowTimeout(action)
|
||||
is Action.MoveFlowToPaused -> executeMoveFlowToPaused(action)
|
||||
is Action.UpdateFlowStatus -> executeUpdateFlowStatus(action)
|
||||
}
|
||||
}
|
||||
private fun executeReleaseSoftLocks(action: Action.ReleaseSoftLocks) {
|
||||
@ -83,7 +85,7 @@ internal class ActionExecutorImpl(
|
||||
val checkpoint = action.checkpoint
|
||||
val flowState = checkpoint.flowState
|
||||
val serializedFlowState = when(flowState) {
|
||||
FlowState.Completed -> null
|
||||
FlowState.Finished -> null
|
||||
// upon implementing CORDA-3816: If we have errored or hospitalized then we don't need to serialize the flowState as it will not get saved in the DB
|
||||
else -> flowState.checkpointSerialize(checkpointSerializationContext)
|
||||
}
|
||||
@ -92,13 +94,18 @@ internal class ActionExecutorImpl(
|
||||
if (action.isCheckpointUpdate) {
|
||||
checkpointStorage.updateCheckpoint(action.id, checkpoint, serializedFlowState, serializedCheckpointState)
|
||||
} else {
|
||||
if (flowState is FlowState.Completed) {
|
||||
throw IllegalStateException("A new checkpoint cannot be created with a Completed FlowState.")
|
||||
if (flowState is FlowState.Finished) {
|
||||
throw IllegalStateException("A new checkpoint cannot be created with a finished flow state.")
|
||||
}
|
||||
checkpointStorage.addCheckpoint(action.id, checkpoint, serializedFlowState!!, serializedCheckpointState)
|
||||
}
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
private fun executeUpdateFlowStatus(action: Action.UpdateFlowStatus) {
|
||||
checkpointStorage.updateStatus(action.id, action.status)
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
private fun executePersistDeduplicationIds(action: Action.PersistDeduplicationFacts) {
|
||||
for (handle in action.deduplicationHandlers) {
|
||||
@ -151,7 +158,7 @@ internal class ActionExecutorImpl(
|
||||
|
||||
@Suspendable
|
||||
private fun executeRemoveCheckpoint(action: Action.RemoveCheckpoint) {
|
||||
checkpointStorage.removeCheckpoint(action.id)
|
||||
checkpointStorage.removeCheckpoint(action.id, action.mayHavePersistentResults)
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
@ -191,6 +198,11 @@ internal class ActionExecutorImpl(
|
||||
stateMachineManager.removeFlow(action.flowId, action.removalReason, action.lastState)
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
private fun executeMoveFlowToPaused(action: Action.MoveFlowToPaused) {
|
||||
stateMachineManager.moveFlowToPaused(action.currentState)
|
||||
}
|
||||
|
||||
@Suspendable
|
||||
@Throws(SQLException::class)
|
||||
private fun executeCreateTransaction() {
|
||||
|
@ -139,7 +139,7 @@ sealed class Event {
|
||||
data class AsyncOperationCompletion(val returnValue: Any?) : Event()
|
||||
|
||||
/**
|
||||
* Signals the faiure of a [FlowAsyncOperation].
|
||||
* Signals the failure of a [FlowAsyncOperation].
|
||||
*
|
||||
* Scheduling is triggered by the service that completes the future returned by the async operation.
|
||||
*
|
||||
@ -179,6 +179,20 @@ sealed class Event {
|
||||
override fun toString() = "WakeUpSleepyFlow"
|
||||
}
|
||||
|
||||
/**
|
||||
* Pause the flow.
|
||||
*/
|
||||
object Pause: Event() {
|
||||
override fun toString() = "Pause"
|
||||
}
|
||||
|
||||
/**
|
||||
* Terminate the specified [sessions], removing them from in-memory datastructures.
|
||||
*
|
||||
* @param sessions The sessions to terminate
|
||||
*/
|
||||
data class TerminateSessions(val sessions: Set<SessionId>) : Event()
|
||||
|
||||
/**
|
||||
* Indicates that an event was generated by an external event and that external event needs to be replayed if we retry the flow,
|
||||
* even if it has not yet been processed and placed on the pending de-duplication handlers list.
|
||||
|
@ -19,20 +19,25 @@ import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.services.api.CheckpointStorage
|
||||
import net.corda.node.services.api.ServiceHubInternal
|
||||
import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import net.corda.node.services.statemachine.FlowStateMachineImpl.Companion.currentStateMachine
|
||||
import net.corda.node.services.statemachine.transitions.StateMachine
|
||||
import net.corda.node.utilities.isEnabledTimedFlow
|
||||
import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import org.apache.activemq.artemis.utils.ReusableLatch
|
||||
import java.security.SecureRandom
|
||||
import java.util.concurrent.Semaphore
|
||||
|
||||
class Flow<A>(val fiber: FlowStateMachineImpl<A>, val resultFuture: OpenFuture<Any?>)
|
||||
|
||||
class NonResidentFlow(val runId: StateMachineRunId, val checkpoint: Checkpoint) {
|
||||
val externalEvents = mutableListOf<Event.DeliverSessionMessage>()
|
||||
data class NonResidentFlow(
|
||||
val runId: StateMachineRunId,
|
||||
var checkpoint: Checkpoint,
|
||||
val resultFuture: OpenFuture<Any?> = openFuture(),
|
||||
val resumable: Boolean = true
|
||||
) {
|
||||
val events = mutableListOf<ExternalEvent>()
|
||||
|
||||
fun addExternalEvent(message: Event.DeliverSessionMessage) {
|
||||
externalEvents.add(message)
|
||||
fun addExternalEvent(message: ExternalEvent) {
|
||||
events.add(message)
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,31 +70,49 @@ class FlowCreator(
|
||||
}
|
||||
else -> nonResidentFlow.checkpoint
|
||||
}
|
||||
return createFlowFromCheckpoint(nonResidentFlow.runId, checkpoint)
|
||||
return createFlowFromCheckpoint(nonResidentFlow.runId, checkpoint, resultFuture = nonResidentFlow.resultFuture)
|
||||
}
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
fun createFlowFromCheckpoint(
|
||||
runId: StateMachineRunId,
|
||||
oldCheckpoint: Checkpoint,
|
||||
reloadCheckpointAfterSuspendCount: Int? = null
|
||||
reloadCheckpointAfterSuspendCount: Int? = null,
|
||||
lock: Semaphore = Semaphore(1),
|
||||
resultFuture: OpenFuture<Any?> = openFuture(),
|
||||
firstRestore: Boolean = true
|
||||
): Flow<*>? {
|
||||
val checkpoint = oldCheckpoint.copy(status = Checkpoint.FlowStatus.RUNNABLE)
|
||||
val fiber = checkpoint.getFiberFromCheckpoint(runId) ?: return null
|
||||
val resultFuture = openFuture<Any?>()
|
||||
val fiber = oldCheckpoint.getFiberFromCheckpoint(runId, firstRestore)
|
||||
var checkpoint = oldCheckpoint
|
||||
if (fiber == null) {
|
||||
updateCompatibleInDb(runId, false)
|
||||
return null
|
||||
} else if (!oldCheckpoint.compatible) {
|
||||
updateCompatibleInDb(runId, true)
|
||||
checkpoint = checkpoint.copy(compatible = true)
|
||||
}
|
||||
checkpoint = checkpoint.copy(status = Checkpoint.FlowStatus.RUNNABLE)
|
||||
|
||||
fiber.logic.stateMachine = fiber
|
||||
verifyFlowLogicIsSuspendable(fiber.logic)
|
||||
val state = createStateMachineState(
|
||||
fiber.transientValues = createTransientValues(runId, resultFuture)
|
||||
fiber.transientState = createStateMachineState(
|
||||
checkpoint = checkpoint,
|
||||
fiber = fiber,
|
||||
anyCheckpointPersisted = true,
|
||||
reloadCheckpointAfterSuspendCount = reloadCheckpointAfterSuspendCount
|
||||
?: if (reloadCheckpointAfterSuspend) checkpoint.checkpointState.numberOfSuspends else null
|
||||
?: if (reloadCheckpointAfterSuspend) checkpoint.checkpointState.numberOfSuspends else null,
|
||||
lock = lock
|
||||
)
|
||||
fiber.transientValues = createTransientValues(runId, resultFuture)
|
||||
fiber.transientState = state
|
||||
return Flow(fiber, resultFuture)
|
||||
}
|
||||
|
||||
private fun updateCompatibleInDb(runId: StateMachineRunId, compatible: Boolean) {
|
||||
database.transaction {
|
||||
checkpointStorage.updateCompatible(runId, compatible)
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
fun <A> createFlowFromLogic(
|
||||
flowId: StateMachineRunId,
|
||||
@ -125,6 +148,7 @@ class FlowCreator(
|
||||
fiber = flowStateMachineImpl,
|
||||
anyCheckpointPersisted = existingCheckpoint != null,
|
||||
reloadCheckpointAfterSuspendCount = if (reloadCheckpointAfterSuspend) 0 else null,
|
||||
lock = Semaphore(1),
|
||||
deduplicationHandler = deduplicationHandler,
|
||||
senderUUID = senderUUID
|
||||
)
|
||||
@ -132,36 +156,45 @@ class FlowCreator(
|
||||
return Flow(flowStateMachineImpl, resultFuture)
|
||||
}
|
||||
|
||||
private fun Checkpoint.getFiberFromCheckpoint(runId: StateMachineRunId): FlowStateMachineImpl<*>? {
|
||||
return when (this.flowState) {
|
||||
is FlowState.Unstarted -> {
|
||||
val logic = tryCheckpointDeserialize(this.flowState.frozenFlowLogic, runId) ?: return null
|
||||
FlowStateMachineImpl(runId, logic, scheduler)
|
||||
}
|
||||
is FlowState.Started -> tryCheckpointDeserialize(this.flowState.frozenFiber, runId) ?: return null
|
||||
// Places calling this function is rely on it to return null if the flow cannot be created from the checkpoint.
|
||||
else -> null
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("TooGenericExceptionCaught")
|
||||
private inline fun <reified T : Any> tryCheckpointDeserialize(bytes: SerializedBytes<T>, flowId: StateMachineRunId): T? {
|
||||
return try {
|
||||
bytes.checkpointDeserialize(context = checkpointSerializationContext)
|
||||
private fun Checkpoint.getFiberFromCheckpoint(runId: StateMachineRunId, firstRestore: Boolean): FlowStateMachineImpl<*>? {
|
||||
try {
|
||||
return when(flowState) {
|
||||
is FlowState.Unstarted -> {
|
||||
val logic = deserializeFlowState(flowState.frozenFlowLogic)
|
||||
FlowStateMachineImpl(runId, logic, scheduler)
|
||||
}
|
||||
is FlowState.Started -> deserializeFlowState(flowState.frozenFiber)
|
||||
// Places calling this function is rely on it to return null if the flow cannot be created from the checkpoint.
|
||||
else -> return null
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
if (reloadCheckpointAfterSuspend && currentStateMachine() != null) {
|
||||
if (reloadCheckpointAfterSuspend && FlowStateMachineImpl.currentStateMachine() != null) {
|
||||
logger.error(
|
||||
"Unable to deserialize checkpoint for flow $flowId. [reloadCheckpointAfterSuspend] is turned on, throwing exception",
|
||||
e
|
||||
"Unable to deserialize checkpoint for flow $runId. [reloadCheckpointAfterSuspend] is turned on, throwing exception",
|
||||
e
|
||||
)
|
||||
throw ReloadFlowFromCheckpointException(e)
|
||||
} else {
|
||||
logger.error("Unable to deserialize checkpoint for flow $flowId. Something is very wrong and this flow will be ignored.", e)
|
||||
null
|
||||
logSerializationError(firstRestore, runId, e)
|
||||
return null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private inline fun <reified T : Any> deserializeFlowState(bytes: SerializedBytes<T>): T {
|
||||
return bytes.checkpointDeserialize(context = checkpointSerializationContext)
|
||||
}
|
||||
|
||||
private fun logSerializationError(firstRestore: Boolean, flowId: StateMachineRunId, exception: Exception) {
|
||||
if (firstRestore) {
|
||||
logger.warn("Flow with id $flowId could not be restored from its checkpoint. Normally this means that a CorDapp has been" +
|
||||
" upgraded without draining the node. To run this flow restart the node after downgrading the CorDapp.", exception)
|
||||
} else {
|
||||
logger.error("Unable to deserialize fiber for flow $flowId. Something is very wrong and this flow will be ignored.", exception)
|
||||
}
|
||||
}
|
||||
|
||||
private fun verifyFlowLogicIsSuspendable(logic: FlowLogic<Any?>) {
|
||||
// Quasar requires (in Java 8) that at least the call method be annotated suspendable. Unfortunately, it's
|
||||
// easy to forget to add this when creating a new flow, so we check here to give the user a better error.
|
||||
@ -196,6 +229,7 @@ class FlowCreator(
|
||||
fiber: FlowStateMachineImpl<*>,
|
||||
anyCheckpointPersisted: Boolean,
|
||||
reloadCheckpointAfterSuspendCount: Int?,
|
||||
lock: Semaphore,
|
||||
deduplicationHandler: DeduplicationHandler? = null,
|
||||
senderUUID: String? = null
|
||||
): StateMachineState {
|
||||
@ -211,7 +245,8 @@ class FlowCreator(
|
||||
isKilled = false,
|
||||
flowLogic = fiber.logic,
|
||||
senderUUID = senderUUID,
|
||||
reloadCheckpointAfterSuspendCount = reloadCheckpointAfterSuspendCount
|
||||
reloadCheckpointAfterSuspendCount = reloadCheckpointAfterSuspendCount,
|
||||
lock = lock
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -146,6 +146,8 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
override val context: InvocationContext get() = transientState.checkpoint.checkpointState.invocationContext
|
||||
override val ourIdentity: Party get() = transientState.checkpoint.checkpointState.ourIdentity
|
||||
override val isKilled: Boolean get() = transientState.isKilled
|
||||
override val clientId: String? get() = transientState.checkpoint.checkpointState.invocationContext.clientId
|
||||
|
||||
/**
|
||||
* What sender identifier to put on messages sent by this flow. This will either be the identifier for the current
|
||||
* state machine manager / messaging client, or null to indicate this flow is restored from a checkpoint and
|
||||
@ -155,6 +157,16 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
|
||||
internal val softLockedStates = mutableSetOf<StateRef>()
|
||||
|
||||
internal inline fun <RESULT> withFlowLock(block: FlowStateMachineImpl<R>.() -> RESULT): RESULT {
|
||||
transientState.lock.acquire()
|
||||
return try {
|
||||
block(this)
|
||||
} finally {
|
||||
transientState.lock.release()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Processes an event by creating the associated transition and executing it using the given executor.
|
||||
* Try to avoid using this directly, instead use [processEventsUntilFlowIsResumed] or [processEventImmediately]
|
||||
@ -162,20 +174,23 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
*/
|
||||
@Suspendable
|
||||
private fun processEvent(transitionExecutor: TransitionExecutor, event: Event): FlowContinuation {
|
||||
setLoggingContext()
|
||||
val stateMachine = transientValues.stateMachine
|
||||
val oldState = transientState
|
||||
val actionExecutor = transientValues.actionExecutor
|
||||
val transition = stateMachine.transition(event, oldState)
|
||||
val (continuation, newState) = transitionExecutor.executeTransition(this, oldState, event, transition, actionExecutor)
|
||||
// Ensure that the next state that is being written to the transient state maintains the [isKilled] flag
|
||||
// This condition can be met if a flow is killed during [TransitionExecutor.executeTransition]
|
||||
if (oldState.isKilled && !newState.isKilled) {
|
||||
newState.isKilled = true
|
||||
return withFlowLock {
|
||||
setLoggingContext()
|
||||
val stateMachine = transientValues.stateMachine
|
||||
val oldState = transientState
|
||||
val actionExecutor = transientValues.actionExecutor
|
||||
val transition = stateMachine.transition(event, oldState)
|
||||
val (continuation, newState) = transitionExecutor.executeTransition(
|
||||
this,
|
||||
oldState,
|
||||
event,
|
||||
transition,
|
||||
actionExecutor
|
||||
)
|
||||
transientState = newState
|
||||
setLoggingContext()
|
||||
continuation
|
||||
}
|
||||
transientState = newState
|
||||
setLoggingContext()
|
||||
return continuation
|
||||
}
|
||||
|
||||
/**
|
||||
@ -212,6 +227,9 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
FlowContinuation.Abort -> abortFiber()
|
||||
}
|
||||
}
|
||||
} catch(t: Throwable) {
|
||||
logUnexpectedExceptionInFlowEventLoop(isDbTransactionOpenOnExit, t)
|
||||
throw t
|
||||
} finally {
|
||||
checkDbTransaction(isDbTransactionOpenOnExit)
|
||||
openThreadLocalWormhole()
|
||||
@ -282,6 +300,14 @@ class FlowStateMachineImpl<R>(override val id: StateMachineRunId,
|
||||
}
|
||||
}
|
||||
|
||||
private fun logUnexpectedExceptionInFlowEventLoop(isDbTransactionOpenOnExit: Boolean, throwable: Throwable) {
|
||||
if (isDbTransactionOpenOnExit && contextTransactionOrNull == null) {
|
||||
logger.error("Unexpected error thrown from flow event loop, transaction context missing", throwable)
|
||||
} else if (!isDbTransactionOpenOnExit && contextTransactionOrNull != null) {
|
||||
logger.error("Unexpected error thrown from flow event loop, transaction is marked as not present, but is not null", throwable)
|
||||
}
|
||||
}
|
||||
|
||||
fun setLoggingContext() {
|
||||
context.pushToLoggingContext()
|
||||
MDC.put("flow-id", id.uuid.toString())
|
||||
|
@ -3,6 +3,7 @@ package net.corda.node.services.statemachine
|
||||
import co.paralleluniverse.fibers.Fiber
|
||||
import co.paralleluniverse.fibers.FiberExecutorScheduler
|
||||
import co.paralleluniverse.fibers.instrument.JavaAgent
|
||||
import co.paralleluniverse.strands.channels.Channel
|
||||
import com.codahale.metrics.Gauge
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
@ -13,12 +14,16 @@ import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.FlowStateMachineHandle
|
||||
import net.corda.core.internal.VisibleForTesting
|
||||
import net.corda.core.internal.bufferUntilSubscribed
|
||||
import net.corda.core.internal.castIfPossible
|
||||
import net.corda.core.internal.concurrent.OpenFuture
|
||||
import net.corda.core.internal.concurrent.doneFuture
|
||||
import net.corda.core.internal.concurrent.map
|
||||
import net.corda.core.internal.concurrent.mapError
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.internal.mapNotNull
|
||||
import net.corda.core.internal.uncheckedCast
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.serialization.deserialize
|
||||
import net.corda.core.serialization.internal.CheckpointSerializationContext
|
||||
@ -72,12 +77,29 @@ internal class SingleThreadedStateMachineManager(
|
||||
) : StateMachineManager, StateMachineManagerInternal {
|
||||
companion object {
|
||||
private val logger = contextLogger()
|
||||
|
||||
private val VALID_KILL_FLOW_STATUSES = setOf(
|
||||
Checkpoint.FlowStatus.RUNNABLE,
|
||||
Checkpoint.FlowStatus.FAILED,
|
||||
Checkpoint.FlowStatus.COMPLETED,
|
||||
Checkpoint.FlowStatus.HOSPITALIZED,
|
||||
Checkpoint.FlowStatus.PAUSED
|
||||
)
|
||||
|
||||
@VisibleForTesting
|
||||
var beforeClientIDCheck: (() -> Unit)? = null
|
||||
@VisibleForTesting
|
||||
var onClientIDNotFound: (() -> Unit)? = null
|
||||
@VisibleForTesting
|
||||
var onCallingStartFlowInternal: (() -> Unit)? = null
|
||||
@VisibleForTesting
|
||||
var onStartFlowInternalThrewAndAboutToRemove: (() -> Unit)? = null
|
||||
}
|
||||
|
||||
private val innerState = StateMachineInnerStateImpl()
|
||||
private val scheduler = FiberExecutorScheduler("Same thread scheduler", executor)
|
||||
private val scheduledFutureExecutor = Executors.newSingleThreadScheduledExecutor(
|
||||
ThreadFactoryBuilder().setNameFormat("flow-scheduled-future-thread").setDaemon(true).build()
|
||||
ThreadFactoryBuilder().setNameFormat("flow-scheduled-future-thread").setDaemon(true).build()
|
||||
)
|
||||
// How many Fibers are running (this includes suspended flows). If zero and stopping is true, then we are halted.
|
||||
private val liveFibers = ReusableLatch()
|
||||
@ -89,7 +111,7 @@ internal class SingleThreadedStateMachineManager(
|
||||
private val flowTimeoutScheduler = FlowTimeoutScheduler(innerState, scheduledFutureExecutor, serviceHub)
|
||||
private val ourSenderUUID = serviceHub.networkService.ourSenderUUID
|
||||
|
||||
private var checkpointSerializationContext: CheckpointSerializationContext? = null
|
||||
private lateinit var checkpointSerializationContext: CheckpointSerializationContext
|
||||
private lateinit var flowCreator: FlowCreator
|
||||
|
||||
override val flowHospital: StaffedFlowHospital = makeFlowHospital()
|
||||
@ -102,6 +124,26 @@ internal class SingleThreadedStateMachineManager(
|
||||
private val totalStartedFlows = metrics.counter("Flows.Started")
|
||||
private val totalFinishedFlows = metrics.counter("Flows.Finished")
|
||||
|
||||
private inline fun <R> Flow<R>.withFlowLock(
|
||||
validStatuses: Set<Checkpoint.FlowStatus>,
|
||||
block: FlowStateMachineImpl<R>.() -> Boolean
|
||||
): Boolean {
|
||||
if (!fiber.hasValidStatus(validStatuses)) return false
|
||||
return fiber.withFlowLock {
|
||||
// Get the flow again, in case another thread removed it from the map
|
||||
innerState.withLock {
|
||||
flows[id]?.run {
|
||||
if (!fiber.hasValidStatus(validStatuses)) return false
|
||||
block(uncheckedCast(this.fiber))
|
||||
}
|
||||
} ?: false
|
||||
}
|
||||
}
|
||||
|
||||
private fun FlowStateMachineImpl<*>.hasValidStatus(validStatuses: Set<Checkpoint.FlowStatus>): Boolean {
|
||||
return transientState.checkpoint.status in validStatuses
|
||||
}
|
||||
|
||||
/**
|
||||
* An observable that emits triples of the changing flow, the type of change, and a process-specific ID number
|
||||
* which may change across restarts.
|
||||
@ -110,6 +152,7 @@ internal class SingleThreadedStateMachineManager(
|
||||
*/
|
||||
override val changes: Observable<StateMachineManager.Change> = innerState.changesPublisher
|
||||
|
||||
@Suppress("ComplexMethod")
|
||||
override fun start(tokenizableServices: List<Any>, startMode: StateMachineManager.StartMode): CordaFuture<Unit> {
|
||||
checkQuasarJavaAgentPresence()
|
||||
val checkpointSerializationContext = CheckpointSerializationDefaults.CHECKPOINT_CONTEXT.withTokenContext(
|
||||
@ -139,12 +182,11 @@ internal class SingleThreadedStateMachineManager(
|
||||
flowTimeoutScheduler::resetCustomTimeout
|
||||
)
|
||||
|
||||
val fibers = restoreFlowsFromCheckpoints()
|
||||
val (fibers, pausedFlows) = restoreFlowsFromCheckpoints()
|
||||
metrics.register("Flows.InFlight", Gauge<Int> { innerState.flows.size })
|
||||
|
||||
setFlowDefaultUncaughtExceptionHandler()
|
||||
|
||||
val pausedFlows = restoreNonResidentFlowsFromPausedCheckpoints()
|
||||
innerState.withLock {
|
||||
this.pausedFlows.putAll(pausedFlows)
|
||||
for ((id, flow) in pausedFlows) {
|
||||
@ -154,6 +196,33 @@ internal class SingleThreadedStateMachineManager(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// - Incompatible checkpoints need to be handled upon implementing CORDA-3897
|
||||
for (flow in fibers.values) {
|
||||
flow.fiber.clientId?.let {
|
||||
innerState.clientIdsToFlowIds[it] = FlowWithClientIdStatus.Active(doneFuture(flow.fiber))
|
||||
}
|
||||
}
|
||||
|
||||
for (pausedFlow in pausedFlows) {
|
||||
pausedFlow.value.checkpoint.checkpointState.invocationContext.clientId?.let {
|
||||
innerState.clientIdsToFlowIds[it] = FlowWithClientIdStatus.Active(
|
||||
doneClientIdFuture(pausedFlow.key, pausedFlow.value.resultFuture, it)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
val finishedFlowsResults = checkpointStorage.getFinishedFlowsResultsMetadata().toList()
|
||||
for ((id, finishedFlowResult) in finishedFlowsResults) {
|
||||
finishedFlowResult.clientId?.let {
|
||||
if (finishedFlowResult.status == Checkpoint.FlowStatus.COMPLETED) {
|
||||
innerState.clientIdsToFlowIds[it] = FlowWithClientIdStatus.Removed(id, true)
|
||||
} else {
|
||||
innerState.clientIdsToFlowIds[it] = FlowWithClientIdStatus.Removed(id, false)
|
||||
}
|
||||
} ?: logger.error("Found finished flow $id without a client id. Something is very wrong and this flow will be ignored.")
|
||||
}
|
||||
|
||||
return serviceHub.networkMapCache.nodeReady.map {
|
||||
logger.info("Node ready, info: ${serviceHub.myInfo}")
|
||||
resumeRestoredFlows(fibers)
|
||||
@ -221,52 +290,88 @@ internal class SingleThreadedStateMachineManager(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("ComplexMethod")
|
||||
private fun <A> startFlow(
|
||||
flowId: StateMachineRunId,
|
||||
flowLogic: FlowLogic<A>,
|
||||
context: InvocationContext,
|
||||
ourIdentity: Party?,
|
||||
deduplicationHandler: DeduplicationHandler?
|
||||
): CordaFuture<FlowStateMachine<A>> {
|
||||
return startFlowInternal(
|
||||
): CordaFuture<out FlowStateMachineHandle<A>> {
|
||||
beforeClientIDCheck?.invoke()
|
||||
|
||||
var newFuture: OpenFuture<FlowStateMachineHandle<A>>? = null
|
||||
|
||||
val clientId = context.clientId
|
||||
if (clientId != null) {
|
||||
var existingStatus: FlowWithClientIdStatus? = null
|
||||
innerState.withLock {
|
||||
clientIdsToFlowIds.compute(clientId) { _, status ->
|
||||
if (status != null) {
|
||||
existingStatus = status
|
||||
status
|
||||
} else {
|
||||
newFuture = openFuture()
|
||||
FlowWithClientIdStatus.Active(newFuture!!)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flow -started with client id- already exists, return the existing's flow future and don't start a new flow.
|
||||
existingStatus?.let {
|
||||
val existingFuture = activeOrRemovedClientIdFuture(it, clientId)
|
||||
return@startFlow uncheckedCast(existingFuture)
|
||||
}
|
||||
onClientIDNotFound?.invoke()
|
||||
}
|
||||
|
||||
return try {
|
||||
startFlowInternal(
|
||||
flowId,
|
||||
invocationContext = context,
|
||||
flowLogic = flowLogic,
|
||||
flowStart = FlowStart.Explicit,
|
||||
ourIdentity = ourIdentity ?: ourFirstIdentity,
|
||||
deduplicationHandler = deduplicationHandler
|
||||
)
|
||||
).also {
|
||||
newFuture?.captureLater(uncheckedCast(it))
|
||||
}
|
||||
} catch (t: Throwable) {
|
||||
onStartFlowInternalThrewAndAboutToRemove?.invoke()
|
||||
innerState.withLock {
|
||||
clientIdsToFlowIds.remove(clientId)
|
||||
newFuture?.setException(t)
|
||||
}
|
||||
// Throwing the exception plain here is the same as to return an exceptionally completed future since the caller calls
|
||||
// getOrThrow() on the returned future at [CordaRPCOpsImpl.startFlow].
|
||||
throw t
|
||||
}
|
||||
}
|
||||
|
||||
override fun killFlow(id: StateMachineRunId): Boolean {
|
||||
val killFlowResult = innerState.withLock {
|
||||
val flow = flows[id]
|
||||
if (flow != null) {
|
||||
val flow = innerState.withLock { flows[id] }
|
||||
val killFlowResult = if (flow != null) {
|
||||
flow.withFlowLock(VALID_KILL_FLOW_STATUSES) {
|
||||
logger.info("Killing flow $id known to this node.")
|
||||
// The checkpoint and soft locks are removed here instead of relying on the processing of the next event after setting
|
||||
// the killed flag. This is to ensure a flow can be removed from the database, even if it is stuck in a infinite loop.
|
||||
database.transaction {
|
||||
checkpointStorage.removeCheckpoint(id)
|
||||
checkpointStorage.removeCheckpoint(id, mayHavePersistentResults = true)
|
||||
serviceHub.vaultService.softLockRelease(id.uuid)
|
||||
}
|
||||
// the same code is NOT done in remove flow when an error occurs
|
||||
// what is the point of this latch?
|
||||
|
||||
unfinishedFibers.countDown()
|
||||
|
||||
val state = flow.fiber.transientState
|
||||
state.isKilled = true
|
||||
flow.fiber.scheduleEvent(Event.DoRemainingWork)
|
||||
flow.fiber.transientState = flow.fiber.transientState.copy(isKilled = true)
|
||||
scheduleEvent(Event.DoRemainingWork)
|
||||
true
|
||||
} else {
|
||||
// It may be that the id refers to a checkpoint that couldn't be deserialised into a flow, so we delete it if it exists.
|
||||
database.transaction { checkpointStorage.removeCheckpoint(id) }
|
||||
}
|
||||
}
|
||||
return if (killFlowResult) {
|
||||
true
|
||||
} else {
|
||||
flowHospital.dropSessionInit(id)
|
||||
// It may be that the id refers to a checkpoint that couldn't be deserialised into a flow, so we delete it if it exists.
|
||||
database.transaction { checkpointStorage.removeCheckpoint(id, mayHavePersistentResults = true) }
|
||||
}
|
||||
|
||||
return killFlowResult || flowHospital.dropSessionInit(id)
|
||||
}
|
||||
|
||||
private fun markAllFlowsAsPaused() {
|
||||
@ -342,30 +447,41 @@ internal class SingleThreadedStateMachineManager(
|
||||
liveFibers.countUp()
|
||||
}
|
||||
|
||||
private fun restoreFlowsFromCheckpoints(): List<Flow<*>> {
|
||||
return checkpointStorage.getCheckpointsToRun().use {
|
||||
it.mapNotNull { (id, serializedCheckpoint) ->
|
||||
// If a flow is added before start() then don't attempt to restore it
|
||||
innerState.withLock { if (id in flows) return@mapNotNull null }
|
||||
val checkpoint = tryDeserializeCheckpoint(serializedCheckpoint, id) ?: return@mapNotNull null
|
||||
flowCreator.createFlowFromCheckpoint(id, checkpoint)
|
||||
}.toList()
|
||||
@Suppress("ComplexMethod")
|
||||
private fun restoreFlowsFromCheckpoints(): Pair<MutableMap<StateMachineRunId, Flow<*>>, MutableMap<StateMachineRunId, NonResidentFlow>> {
|
||||
val flows = mutableMapOf<StateMachineRunId, Flow<*>>()
|
||||
val pausedFlows = mutableMapOf<StateMachineRunId, NonResidentFlow>()
|
||||
checkpointStorage.getCheckpointsToRun().forEach Checkpoints@{(id, serializedCheckpoint) ->
|
||||
// If a flow is added before start() then don't attempt to restore it
|
||||
innerState.withLock { if (id in flows) return@Checkpoints }
|
||||
val checkpoint = tryDeserializeCheckpoint(serializedCheckpoint, id)?.also {
|
||||
if (it.status == Checkpoint.FlowStatus.HOSPITALIZED) {
|
||||
if (checkpointStorage.removeFlowException(id)) {
|
||||
checkpointStorage.updateStatus(id, Checkpoint.FlowStatus.RUNNABLE)
|
||||
} else {
|
||||
logger.error("Unable to remove database exception for flow $id. Something is very wrong. The flow will not be loaded and run.")
|
||||
return@Checkpoints
|
||||
}
|
||||
}
|
||||
} ?: return@Checkpoints
|
||||
val flow = flowCreator.createFlowFromCheckpoint(id, checkpoint)
|
||||
if (flow == null) {
|
||||
// Set the flowState to paused so we don't waste memory storing it anymore.
|
||||
pausedFlows[id] = NonResidentFlow(id, checkpoint.copy(flowState = FlowState.Paused), resumable = false)
|
||||
} else {
|
||||
flows[id] = flow
|
||||
}
|
||||
}
|
||||
checkpointStorage.getPausedCheckpoints().forEach Checkpoints@{ (id, serializedCheckpoint) ->
|
||||
val checkpoint = tryDeserializeCheckpoint(serializedCheckpoint, id) ?: return@Checkpoints
|
||||
pausedFlows[id] = NonResidentFlow(id, checkpoint)
|
||||
}
|
||||
return Pair(flows, pausedFlows)
|
||||
}
|
||||
|
||||
private fun restoreNonResidentFlowsFromPausedCheckpoints(): Map<StateMachineRunId, NonResidentFlow> {
|
||||
return checkpointStorage.getPausedCheckpoints().use {
|
||||
it.mapNotNull { (id, serializedCheckpoint) ->
|
||||
// If a flow is added before start() then don't attempt to restore it
|
||||
val checkpoint = tryDeserializeCheckpoint(serializedCheckpoint, id) ?: return@mapNotNull null
|
||||
id to NonResidentFlow(id, checkpoint)
|
||||
}.toList().toMap()
|
||||
}
|
||||
}
|
||||
|
||||
private fun resumeRestoredFlows(flows: List<Flow<*>>) {
|
||||
for (flow in flows) {
|
||||
addAndStartFlow(flow.fiber.id, flow)
|
||||
private fun resumeRestoredFlows(flows: Map<StateMachineRunId, Flow<*>>) {
|
||||
for ((id, flow) in flows.entries) {
|
||||
addAndStartFlow(id, flow)
|
||||
}
|
||||
}
|
||||
|
||||
@ -382,15 +498,33 @@ internal class SingleThreadedStateMachineManager(
|
||||
val flow = if (currentState.isAnyCheckpointPersisted) {
|
||||
// We intentionally grab the checkpoint from storage rather than relying on the one referenced by currentState. This is so that
|
||||
// we mirror exactly what happens when restarting the node.
|
||||
val serializedCheckpoint = database.transaction { checkpointStorage.getCheckpoint(flowId) }
|
||||
if (serializedCheckpoint == null) {
|
||||
logger.error("Unable to find database checkpoint for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
return
|
||||
}
|
||||
val checkpoint = database.transaction {
|
||||
val serializedCheckpoint = checkpointStorage.getCheckpoint(flowId)
|
||||
if (serializedCheckpoint == null) {
|
||||
logger.error("Unable to find database checkpoint for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
return@transaction null
|
||||
}
|
||||
|
||||
tryDeserializeCheckpoint(serializedCheckpoint, flowId)?.also {
|
||||
if (it.status == Checkpoint.FlowStatus.HOSPITALIZED) {
|
||||
if (checkpointStorage.removeFlowException(flowId)) {
|
||||
checkpointStorage.updateStatus(flowId, Checkpoint.FlowStatus.RUNNABLE)
|
||||
} else {
|
||||
logger.error("Unable to remove database exception for flow $flowId. Something is very wrong. The flow will not be loaded and run.")
|
||||
return@transaction null
|
||||
}
|
||||
}
|
||||
} ?: return@transaction null
|
||||
} ?: return
|
||||
|
||||
val checkpoint = tryDeserializeCheckpoint(serializedCheckpoint, flowId) ?: return
|
||||
// Resurrect flow
|
||||
flowCreator.createFlowFromCheckpoint(flowId, checkpoint, currentState.reloadCheckpointAfterSuspendCount) ?: return
|
||||
flowCreator.createFlowFromCheckpoint(
|
||||
flowId,
|
||||
checkpoint,
|
||||
currentState.reloadCheckpointAfterSuspendCount,
|
||||
currentState.lock,
|
||||
firstRestore = false
|
||||
) ?: return
|
||||
} else {
|
||||
// Just flow initiation message
|
||||
null
|
||||
@ -407,17 +541,56 @@ internal class SingleThreadedStateMachineManager(
|
||||
injectOldProgressTracker(currentState.flowLogic.progressTracker, flow.fiber.logic)
|
||||
addAndStartFlow(flowId, flow)
|
||||
}
|
||||
// Deliver all the external events from the old flow instance.
|
||||
val unprocessedExternalEvents = mutableListOf<ExternalEvent>()
|
||||
do {
|
||||
val event = oldFlowLeftOver.tryReceive()
|
||||
if (event is Event.GeneratedByExternalEvent) {
|
||||
unprocessedExternalEvents += event.deduplicationHandler.externalCause
|
||||
}
|
||||
} while (event != null)
|
||||
val externalEvents = currentState.pendingDeduplicationHandlers.map { it.externalCause } + unprocessedExternalEvents
|
||||
for (externalEvent in externalEvents) {
|
||||
deliverExternalEvent(externalEvent)
|
||||
extractAndScheduleEventsForRetry(oldFlowLeftOver, currentState)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract all the [ExternalEvent] from this flows event queue and queue them (in the correct order) in the PausedFlow.
|
||||
* This differs from [extractAndScheduleEventsForRetry] which also extracts (and schedules) [Event.Pause]. This means that if there are
|
||||
* more events in the flows eventQueue then the flow won't pause again (after it is retried). These events are then scheduled (along
|
||||
* with any [ExistingSessionMessage] which arrive in the interim) when the flow is retried.
|
||||
*/
|
||||
private fun extractAndQueueExternalEventsForPausedFlow(
|
||||
currentEventQueue: Channel<Event>,
|
||||
currentPendingDeduplicationHandlers: List<DeduplicationHandler>,
|
||||
pausedFlow: NonResidentFlow
|
||||
) {
|
||||
pausedFlow.events += currentPendingDeduplicationHandlers.map{it.externalCause}
|
||||
do {
|
||||
val event = currentEventQueue.tryReceive()
|
||||
if (event is Event.GeneratedByExternalEvent) {
|
||||
pausedFlow.events.add(event.deduplicationHandler.externalCause)
|
||||
}
|
||||
} while (event != null)
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Extract all the incomplete deduplication handlers as well as the [ExternalEvent] and [Event.Pause] events from this flows event queue
|
||||
* [oldEventQueue]. Then schedule them (in the same order) for the new flow. This means that if a retried flow has a pause event
|
||||
* scheduled then the retried flow will eventually pause. The new flow will not retry again if future retry events have been scheduled.
|
||||
* When this method is called this flow must have been replaced by the new flow in [StateMachineInnerState.flows]. This method differs
|
||||
* from [extractAndQueueExternalEventsForPausedFlow] where (only) [externalEvents] are extracted and scheduled straight away.
|
||||
*/
|
||||
private fun extractAndScheduleEventsForRetry(oldEventQueue: Channel<Event>, currentState: StateMachineState) {
|
||||
val flow = innerState.withLock {
|
||||
flows[currentState.flowLogic.runId]
|
||||
}
|
||||
val events = mutableListOf<Event>()
|
||||
do {
|
||||
val event = oldEventQueue.tryReceive()
|
||||
if (event is Event.Pause || event is Event.GeneratedByExternalEvent) events.add(event)
|
||||
} while (event != null)
|
||||
|
||||
for (externalEvent in currentState.pendingDeduplicationHandlers) {
|
||||
deliverExternalEvent(externalEvent.externalCause)
|
||||
}
|
||||
for (event in events) {
|
||||
if (event is Event.GeneratedByExternalEvent) {
|
||||
deliverExternalEvent(event.deduplicationHandler.externalCause)
|
||||
} else {
|
||||
flow?.fiber?.scheduleEvent(event)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -456,7 +629,7 @@ internal class SingleThreadedStateMachineManager(
|
||||
val sender = serviceHub.networkMapCache.getPeerByLegalName(peer)
|
||||
if (sender != null) {
|
||||
when (sessionMessage) {
|
||||
is ExistingSessionMessage -> onExistingSessionMessage(sessionMessage, event.deduplicationHandler, sender)
|
||||
is ExistingSessionMessage -> onExistingSessionMessage(sessionMessage, sender, event)
|
||||
is InitialSessionMessage -> onSessionInit(sessionMessage, sender, event)
|
||||
}
|
||||
} else {
|
||||
@ -466,8 +639,13 @@ internal class SingleThreadedStateMachineManager(
|
||||
}
|
||||
}
|
||||
|
||||
private fun onExistingSessionMessage(sessionMessage: ExistingSessionMessage, deduplicationHandler: DeduplicationHandler, sender: Party) {
|
||||
private fun onExistingSessionMessage(
|
||||
sessionMessage: ExistingSessionMessage,
|
||||
sender: Party,
|
||||
externalEvent: ExternalEvent.ExternalMessageEvent
|
||||
) {
|
||||
try {
|
||||
val deduplicationHandler = externalEvent.deduplicationHandler
|
||||
val recipientId = sessionMessage.recipientSessionId
|
||||
val flowId = sessionToFlow[recipientId]
|
||||
if (flowId == null) {
|
||||
@ -486,7 +664,7 @@ internal class SingleThreadedStateMachineManager(
|
||||
innerState.withLock {
|
||||
flows[flowId]?.run { fiber.scheduleEvent(event) }
|
||||
// If flow is not running add it to the list of external events to be processed if/when the flow resumes.
|
||||
?: pausedFlows[flowId]?.run { addExternalEvent(event) }
|
||||
?: pausedFlows[flowId]?.run { addExternalEvent(externalEvent) }
|
||||
?: logger.info("Cannot find fiber corresponding to flow ID $flowId")
|
||||
}
|
||||
}
|
||||
@ -578,6 +756,7 @@ internal class SingleThreadedStateMachineManager(
|
||||
ourIdentity: Party,
|
||||
deduplicationHandler: DeduplicationHandler?
|
||||
): CordaFuture<FlowStateMachine<A>> {
|
||||
onCallingStartFlowInternal?.invoke()
|
||||
|
||||
val existingFlow = innerState.withLock { flows[flowId] }
|
||||
val existingCheckpoint = if (existingFlow != null && existingFlow.fiber.transientState.isAnyCheckpointPersisted) {
|
||||
@ -586,24 +765,25 @@ internal class SingleThreadedStateMachineManager(
|
||||
// CORDA-3359 - Do not start/retry a flow that failed after deleting its checkpoint (the whole of the flow might replay)
|
||||
val existingCheckpoint = database.transaction { checkpointStorage.getCheckpoint(flowId) }
|
||||
existingCheckpoint?.let { serializedCheckpoint ->
|
||||
val checkpoint = tryDeserializeCheckpoint(serializedCheckpoint, flowId)
|
||||
if (checkpoint == null) {
|
||||
return openFuture<FlowStateMachine<A>>().mapError {
|
||||
IllegalStateException(
|
||||
"Unable to deserialize database checkpoint for flow $flowId. " +
|
||||
"Something is very wrong. The flow will not retry."
|
||||
)
|
||||
}
|
||||
} else {
|
||||
checkpoint
|
||||
}
|
||||
tryDeserializeCheckpoint(serializedCheckpoint, flowId) ?: throw IllegalStateException(
|
||||
"Unable to deserialize database checkpoint for flow $flowId. Something is very wrong. The flow will not retry."
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// This is a brand new flow
|
||||
null
|
||||
}
|
||||
|
||||
val flow = flowCreator.createFlowFromLogic(flowId, invocationContext, flowLogic, flowStart, ourIdentity, existingCheckpoint, deduplicationHandler, ourSenderUUID)
|
||||
val flow = flowCreator.createFlowFromLogic(
|
||||
flowId,
|
||||
invocationContext,
|
||||
flowLogic,
|
||||
flowStart,
|
||||
ourIdentity,
|
||||
existingCheckpoint,
|
||||
deduplicationHandler,
|
||||
ourSenderUUID
|
||||
)
|
||||
val startedFuture = openFuture<Unit>()
|
||||
innerState.withLock {
|
||||
startedFutures[flowId] = startedFuture
|
||||
@ -621,9 +801,29 @@ internal class SingleThreadedStateMachineManager(
|
||||
flowTimeoutScheduler.cancel(flowId)
|
||||
}
|
||||
|
||||
override fun moveFlowToPaused(currentState: StateMachineState) {
|
||||
currentState.cancelFutureIfRunning()
|
||||
flowTimeoutScheduler.cancel(currentState.flowLogic.runId)
|
||||
innerState.withLock {
|
||||
val id = currentState.flowLogic.runId
|
||||
val flow = flows.remove(id)
|
||||
if (flow != null) {
|
||||
decrementLiveFibers()
|
||||
//Setting flowState = FlowState.Paused means we don't hold the frozen fiber in memory.
|
||||
val checkpoint = currentState.checkpoint.copy(status = Checkpoint.FlowStatus.PAUSED, flowState = FlowState.Paused)
|
||||
val pausedFlow = NonResidentFlow(id, checkpoint, flow.resultFuture)
|
||||
val eventQueue = flow.fiber.transientValues.eventQueue
|
||||
extractAndQueueExternalEventsForPausedFlow(eventQueue, currentState.pendingDeduplicationHandlers, pausedFlow)
|
||||
pausedFlows.put(id, pausedFlow)
|
||||
} else {
|
||||
logger.warn("Flow $id already removed before pausing")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun tryDeserializeCheckpoint(serializedCheckpoint: Checkpoint.Serialized, flowId: StateMachineRunId): Checkpoint? {
|
||||
return try {
|
||||
serializedCheckpoint.deserialize(checkpointSerializationContext!!)
|
||||
serializedCheckpoint.deserialize(checkpointSerializationContext)
|
||||
} catch (e: Exception) {
|
||||
if (reloadCheckpointAfterSuspend && currentStateMachine() != null) {
|
||||
logger.error(
|
||||
@ -671,7 +871,7 @@ internal class SingleThreadedStateMachineManager(
|
||||
is FlowState.Started -> {
|
||||
Fiber.unparkDeserialized(flow.fiber, scheduler)
|
||||
}
|
||||
is FlowState.Completed -> throw IllegalStateException("Cannot start (or resume) a completed flow.")
|
||||
is FlowState.Finished -> throw IllegalStateException("Cannot start (or resume) a finished flow.")
|
||||
}
|
||||
}
|
||||
|
||||
@ -725,6 +925,7 @@ internal class SingleThreadedStateMachineManager(
|
||||
require(lastState.isRemoved) { "Flow must be in removable state before removal" }
|
||||
require(lastState.checkpoint.checkpointState.subFlowStack.size == 1) { "Checkpointed stack must be empty" }
|
||||
require(flow.fiber.id !in sessionToFlow.values) { "Flow fibre must not be needed by an existing session" }
|
||||
flow.fiber.clientId?.let { setClientIdAsSucceeded(it, flow.fiber.id) }
|
||||
flow.resultFuture.set(removalReason.flowReturnValue)
|
||||
lastState.flowLogic.progressTracker?.currentStep = ProgressTracker.DONE
|
||||
changesPublisher.onNext(StateMachineManager.Change.Removed(lastState.flowLogic, Try.Success(removalReason.flowReturnValue)))
|
||||
@ -736,13 +937,19 @@ internal class SingleThreadedStateMachineManager(
|
||||
lastState: StateMachineState
|
||||
) {
|
||||
drainFlowEventQueue(flow)
|
||||
// Complete the started future, needed when the flow fails during flow init (before completing an [UnstartedFlowTransition])
|
||||
startedFutures.remove(flow.fiber.id)?.set(Unit)
|
||||
flow.fiber.clientId?.let {
|
||||
if (flow.fiber.isKilled) {
|
||||
clientIdsToFlowIds.remove(it)
|
||||
} else {
|
||||
setClientIdAsFailed(it, flow.fiber.id) }
|
||||
}
|
||||
val flowError = removalReason.flowErrors[0] // TODO what to do with several?
|
||||
val exception = flowError.exception
|
||||
(exception as? FlowException)?.originalErrorId = flowError.errorId
|
||||
flow.resultFuture.setException(exception)
|
||||
lastState.flowLogic.progressTracker?.endWithError(exception)
|
||||
// Complete the started future, needed when the flow fails during flow init (before completing an [UnstartedFlowTransition])
|
||||
startedFutures.remove(flow.fiber.id)?.set(Unit)
|
||||
changesPublisher.onNext(StateMachineManager.Change.Removed(lastState.flowLogic, Try.Failure<Nothing>(exception)))
|
||||
}
|
||||
|
||||
@ -778,4 +985,117 @@ internal class SingleThreadedStateMachineManager(
|
||||
future = null
|
||||
}
|
||||
}
|
||||
|
||||
private fun StateMachineInnerState.setClientIdAsSucceeded(clientId: String, id: StateMachineRunId) {
|
||||
setClientIdAsRemoved(clientId, id, true)
|
||||
}
|
||||
|
||||
private fun StateMachineInnerState.setClientIdAsFailed(clientId: String, id: StateMachineRunId) {
|
||||
setClientIdAsRemoved(clientId, id, false)
|
||||
}
|
||||
|
||||
private fun StateMachineInnerState.setClientIdAsRemoved(
|
||||
clientId: String,
|
||||
id: StateMachineRunId,
|
||||
succeeded: Boolean
|
||||
) {
|
||||
clientIdsToFlowIds.compute(clientId) { _, existingStatus ->
|
||||
require(existingStatus != null && existingStatus is FlowWithClientIdStatus.Active)
|
||||
FlowWithClientIdStatus.Removed(id, succeeded)
|
||||
}
|
||||
}
|
||||
|
||||
private fun activeOrRemovedClientIdFuture(existingStatus: FlowWithClientIdStatus, clientId: String) = when (existingStatus) {
|
||||
is FlowWithClientIdStatus.Active -> existingStatus.flowStateMachineFuture
|
||||
is FlowWithClientIdStatus.Removed -> {
|
||||
val flowId = existingStatus.flowId
|
||||
val resultFuture = if (existingStatus.succeeded) {
|
||||
val flowResult = database.transaction { checkpointStorage.getFlowResult(existingStatus.flowId, throwIfMissing = true) }
|
||||
doneFuture(flowResult)
|
||||
} else {
|
||||
val flowException =
|
||||
database.transaction { checkpointStorage.getFlowException(existingStatus.flowId, throwIfMissing = true) }
|
||||
openFuture<Any?>().apply { setException(flowException as Throwable) }
|
||||
}
|
||||
|
||||
doneClientIdFuture(flowId, resultFuture, clientId)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The flow out of which a [doneFuture] will be produced should be a started flow,
|
||||
* i.e. it should not exist in [mutex.content.startedFutures].
|
||||
*/
|
||||
private fun doneClientIdFuture(
|
||||
id: StateMachineRunId,
|
||||
resultFuture: CordaFuture<Any?>,
|
||||
clientId: String
|
||||
): CordaFuture<FlowStateMachineHandle<out Any?>> =
|
||||
doneFuture(object : FlowStateMachineHandle<Any?> {
|
||||
override val logic: Nothing? = null
|
||||
override val id: StateMachineRunId = id
|
||||
override val resultFuture: CordaFuture<Any?> = resultFuture
|
||||
override val clientId: String? = clientId
|
||||
}
|
||||
)
|
||||
|
||||
override fun <T> reattachFlowWithClientId(clientId: String): FlowStateMachineHandle<T>? {
|
||||
return innerState.withLock {
|
||||
clientIdsToFlowIds[clientId]?.let {
|
||||
val existingFuture = activeOrRemovedClientIdFutureForReattach(it, clientId)
|
||||
existingFuture?.let { uncheckedCast(existingFuture.get()) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("NestedBlockDepth")
|
||||
private fun activeOrRemovedClientIdFutureForReattach(
|
||||
existingStatus: FlowWithClientIdStatus,
|
||||
clientId: String
|
||||
): CordaFuture<out FlowStateMachineHandle<out Any?>>? {
|
||||
return when (existingStatus) {
|
||||
is FlowWithClientIdStatus.Active -> existingStatus.flowStateMachineFuture
|
||||
is FlowWithClientIdStatus.Removed -> {
|
||||
val flowId = existingStatus.flowId
|
||||
val resultFuture = if (existingStatus.succeeded) {
|
||||
try {
|
||||
val flowResult =
|
||||
database.transaction { checkpointStorage.getFlowResult(existingStatus.flowId, throwIfMissing = true) }
|
||||
doneFuture(flowResult)
|
||||
} catch (e: IllegalStateException) {
|
||||
null
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
val flowException =
|
||||
database.transaction { checkpointStorage.getFlowException(existingStatus.flowId, throwIfMissing = true) }
|
||||
openFuture<Any?>().apply { setException(flowException as Throwable) }
|
||||
} catch (e: IllegalStateException) {
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
resultFuture?.let { doneClientIdFuture(flowId, it, clientId) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override fun removeClientId(clientId: String): Boolean {
|
||||
var removedFlowId: StateMachineRunId? = null
|
||||
innerState.withLock {
|
||||
clientIdsToFlowIds.computeIfPresent(clientId) { _, existingStatus ->
|
||||
if (existingStatus is FlowWithClientIdStatus.Removed) {
|
||||
removedFlowId = existingStatus.flowId
|
||||
null
|
||||
} else {
|
||||
existingStatus
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
removedFlowId?.let {
|
||||
return database.transaction { checkpointStorage.removeCheckpoint(it, mayHavePersistentResults = true) }
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -104,6 +104,16 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging,
|
||||
*/
|
||||
private val flowsInHospital = ConcurrentHashMap<StateMachineRunId, FlowFiber>()
|
||||
|
||||
/**
|
||||
* Returns true if the flow is currently being treated in the hospital.
|
||||
* The differs to flows with a medical history (which can accessed via [StaffedFlowHospital.contains]).
|
||||
*/
|
||||
@VisibleForTesting
|
||||
internal fun flowInHospital(runId: StateMachineRunId): Boolean {
|
||||
// The .keys avoids https://youtrack.jetbrains.com/issue/KT-18053
|
||||
return runId in flowsInHospital.keys
|
||||
}
|
||||
|
||||
private val mutex = ThreadBox(object {
|
||||
/**
|
||||
* Contains medical history of every flow (a patient) that has entered the hospital. A flow can leave the hospital,
|
||||
|
@ -17,6 +17,7 @@ internal interface StateMachineInnerState {
|
||||
val changesPublisher: PublishSubject<Change>
|
||||
/** Flows scheduled to be retried if not finished within the specified timeout period. */
|
||||
val timedFlows: MutableMap<StateMachineRunId, ScheduledTimeout>
|
||||
val clientIdsToFlowIds: MutableMap<String, FlowWithClientIdStatus>
|
||||
|
||||
fun <R> withMutex(block: StateMachineInnerState.() -> R): R
|
||||
}
|
||||
@ -30,6 +31,7 @@ internal class StateMachineInnerStateImpl : StateMachineInnerState {
|
||||
override val pausedFlows = HashMap<StateMachineRunId, NonResidentFlow>()
|
||||
override val startedFutures = HashMap<StateMachineRunId, OpenFuture<Unit>>()
|
||||
override val timedFlows = HashMap<StateMachineRunId, ScheduledTimeout>()
|
||||
override val clientIdsToFlowIds = HashMap<String, FlowWithClientIdStatus>()
|
||||
|
||||
override fun <R> withMutex(block: StateMachineInnerState.() -> R): R = lock.withLock { block(this) }
|
||||
}
|
||||
|
@ -5,7 +5,9 @@ import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.FlowStateMachineHandle
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.messaging.FlowHandleWithClientId
|
||||
import net.corda.core.utilities.Try
|
||||
import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import net.corda.node.services.messaging.ReceivedMessage
|
||||
@ -97,6 +99,27 @@ interface StateMachineManager {
|
||||
* Returns a snapshot of all [FlowStateMachineImpl]s currently managed.
|
||||
*/
|
||||
fun snapshot(): Set<FlowStateMachineImpl<*>>
|
||||
|
||||
/**
|
||||
* Reattach to an existing flow that was started with [startFlowDynamicWithClientId] and has a [clientId].
|
||||
*
|
||||
* If there is a flow matching the [clientId] then its result or exception is returned.
|
||||
*
|
||||
* When there is no flow matching the [clientId] then [null] is returned directly (not a future/[FlowHandleWithClientId]).
|
||||
*
|
||||
* Calling [reattachFlowWithClientId] after [removeClientId] with the same [clientId] will cause the function to return [null] as
|
||||
* the result/exception of the flow will no longer be available.
|
||||
*
|
||||
* @param clientId The client id relating to an existing flow
|
||||
*/
|
||||
fun <T> reattachFlowWithClientId(clientId: String): FlowStateMachineHandle<T>?
|
||||
|
||||
/**
|
||||
* Removes a flow's [clientId] to result/ exception mapping.
|
||||
*
|
||||
* @return whether the mapping was removed.
|
||||
*/
|
||||
fun removeClientId(clientId: String): Boolean
|
||||
}
|
||||
|
||||
// These must be idempotent! A later failure in the state transition may error the flow state, and a replay may call
|
||||
@ -106,6 +129,7 @@ internal interface StateMachineManagerInternal {
|
||||
fun addSessionBinding(flowId: StateMachineRunId, sessionId: SessionId)
|
||||
fun removeSessionBindings(sessionIds: Set<SessionId>)
|
||||
fun removeFlow(flowId: StateMachineRunId, removalReason: FlowRemovalReason, lastState: StateMachineState)
|
||||
fun moveFlowToPaused(currentState: StateMachineState)
|
||||
fun retryFlowFromSafePoint(currentState: StateMachineState)
|
||||
fun scheduleFlowTimeout(flowId: StateMachineRunId)
|
||||
fun cancelFlowTimeout(flowId: StateMachineRunId)
|
||||
@ -137,11 +161,11 @@ interface ExternalEvent {
|
||||
/**
|
||||
* A callback for the state machine to pass back the [CordaFuture] associated with the flow start to the submitter.
|
||||
*/
|
||||
fun wireUpFuture(flowFuture: CordaFuture<FlowStateMachine<T>>)
|
||||
fun wireUpFuture(flowFuture: CordaFuture<out FlowStateMachineHandle<T>>)
|
||||
|
||||
/**
|
||||
* The future representing the flow start, passed back from the state machine to the submitter of this event.
|
||||
*/
|
||||
val future: CordaFuture<FlowStateMachine<T>>
|
||||
val future: CordaFuture<out FlowStateMachineHandle<T>>
|
||||
}
|
||||
}
|
||||
|
@ -4,13 +4,16 @@ import com.esotericsoftware.kryo.Kryo
|
||||
import com.esotericsoftware.kryo.KryoSerializable
|
||||
import com.esotericsoftware.kryo.io.Input
|
||||
import com.esotericsoftware.kryo.io.Output
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.context.InvocationContext
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.flows.Destination
|
||||
import net.corda.core.flows.FlowInfo
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowIORequest
|
||||
import net.corda.core.internal.FlowStateMachineHandle
|
||||
import net.corda.core.serialization.CordaSerializable
|
||||
import net.corda.core.serialization.SerializationDefaults
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
@ -22,6 +25,7 @@ import net.corda.node.services.messaging.DeduplicationHandler
|
||||
import java.lang.IllegalStateException
|
||||
import java.time.Instant
|
||||
import java.util.concurrent.Future
|
||||
import java.util.concurrent.Semaphore
|
||||
|
||||
/**
|
||||
* The state of the state machine, capturing the state of a flow. It consists of two parts, an *immutable* part that is
|
||||
@ -41,9 +45,12 @@ import java.util.concurrent.Future
|
||||
* @param isRemoved true if the flow has been removed from the state machine manager. This is used to avoid any further
|
||||
* work.
|
||||
* @param isKilled true if the flow has been marked as killed. This is used to cause a flow to move to a killed flow transition no matter
|
||||
* what event it is set to process next. [isKilled] is a `var` and set as [Volatile] to prevent concurrency errors that can occur if a flow
|
||||
* is killed during the middle of a state transition.
|
||||
* what event it is set to process next.
|
||||
* @param senderUUID the identifier of the sending state machine or null if this flow is resumed from a checkpoint so that it does not participate in de-duplication high-water-marking.
|
||||
* @param reloadCheckpointAfterSuspendCount The number of times a flow has been reloaded (not retried). This is [null] when
|
||||
* [NodeConfiguration.reloadCheckpointAfterSuspendCount] is not enabled.
|
||||
* @param lock The flow's lock, used to prevent the flow performing a transition while being interacted with from external threads, and
|
||||
* vise-versa.
|
||||
*/
|
||||
// TODO perhaps add a read-only environment to the state machine for things that don't change over time?
|
||||
// TODO evaluate persistent datastructure libraries to replace the inefficient copying we currently do.
|
||||
@ -57,10 +64,10 @@ data class StateMachineState(
|
||||
val isAnyCheckpointPersisted: Boolean,
|
||||
val isStartIdempotent: Boolean,
|
||||
val isRemoved: Boolean,
|
||||
@Volatile
|
||||
var isKilled: Boolean,
|
||||
val isKilled: Boolean,
|
||||
val senderUUID: String?,
|
||||
val reloadCheckpointAfterSuspendCount: Int?
|
||||
val reloadCheckpointAfterSuspendCount: Int?,
|
||||
val lock: Semaphore
|
||||
) : KryoSerializable {
|
||||
override fun write(kryo: Kryo?, output: Output?) {
|
||||
throw IllegalStateException("${StateMachineState::class.qualifiedName} should never be serialized")
|
||||
@ -124,8 +131,8 @@ data class Checkpoint(
|
||||
listOf(topLevelSubFlow),
|
||||
numberOfSuspends = 0
|
||||
),
|
||||
errorState = ErrorState.Clean,
|
||||
flowState = FlowState.Unstarted(flowStart, frozenFlowLogic)
|
||||
flowState = FlowState.Unstarted(flowStart, frozenFlowLogic),
|
||||
errorState = ErrorState.Clean
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -203,7 +210,7 @@ data class Checkpoint(
|
||||
fun deserialize(checkpointSerializationContext: CheckpointSerializationContext): Checkpoint {
|
||||
val flowState = when(status) {
|
||||
FlowStatus.PAUSED -> FlowState.Paused
|
||||
FlowStatus.COMPLETED -> FlowState.Completed
|
||||
FlowStatus.COMPLETED, FlowStatus.FAILED -> FlowState.Finished
|
||||
else -> serializedFlowState!!.checkpointDeserialize(checkpointSerializationContext)
|
||||
}
|
||||
return Checkpoint(
|
||||
@ -346,9 +353,9 @@ sealed class FlowState {
|
||||
object Paused: FlowState()
|
||||
|
||||
/**
|
||||
* The flow has completed. It does not have a running fiber that needs to be serialized and checkpointed.
|
||||
* The flow has finished. It does not have a running fiber that needs to be serialized and checkpointed.
|
||||
*/
|
||||
object Completed : FlowState()
|
||||
object Finished : FlowState()
|
||||
|
||||
}
|
||||
|
||||
@ -408,3 +415,13 @@ sealed class SubFlowVersion {
|
||||
data class CoreFlow(override val platformVersion: Int) : SubFlowVersion()
|
||||
data class CorDappFlow(override val platformVersion: Int, val corDappName: String, val corDappHash: SecureHash) : SubFlowVersion()
|
||||
}
|
||||
|
||||
sealed class FlowWithClientIdStatus {
|
||||
data class Active(val flowStateMachineFuture: CordaFuture<out FlowStateMachineHandle<out Any?>>) : FlowWithClientIdStatus()
|
||||
data class Removed(val flowId: StateMachineRunId, val succeeded: Boolean) : FlowWithClientIdStatus()
|
||||
}
|
||||
|
||||
data class FlowResultMetadata(
|
||||
val status: Checkpoint.FlowStatus,
|
||||
val clientId: String?
|
||||
)
|
@ -1,6 +1,7 @@
|
||||
package net.corda.node.services.statemachine
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.flows.ResultSerializationException
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.node.services.statemachine.transitions.FlowContinuation
|
||||
import net.corda.node.services.statemachine.transitions.TransitionResult
|
||||
@ -73,22 +74,12 @@ class TransitionExecutorImpl(
|
||||
log.info("Error while executing $action, with event $event, erroring state", exception)
|
||||
}
|
||||
|
||||
// distinguish between a DatabaseTransactionException and an actual StateTransitionException
|
||||
val stateTransitionOrDatabaseTransactionException =
|
||||
if (exception is DatabaseTransactionException) {
|
||||
// if the exception is a DatabaseTransactionException then it is not really a StateTransitionException
|
||||
// it is actually an exception that previously broke a DatabaseTransaction and was suppressed by user code
|
||||
// it was rethrown on [DatabaseTransaction.commit]. Unwrap the original exception and pass it to flow hospital
|
||||
exception.cause
|
||||
} else {
|
||||
// Wrap the exception with [StateTransitionException] for handling by the flow hospital
|
||||
StateTransitionException(action, event, exception)
|
||||
}
|
||||
val flowError = createError(exception, action, event)
|
||||
|
||||
val newState = previousState.copy(
|
||||
checkpoint = previousState.checkpoint.copy(
|
||||
errorState = previousState.checkpoint.errorState.addErrors(
|
||||
listOf(FlowError(secureRandom.nextLong(), stateTransitionOrDatabaseTransactionException))
|
||||
listOf(flowError)
|
||||
)
|
||||
),
|
||||
isFlowResumed = false
|
||||
@ -121,4 +112,23 @@ class TransitionExecutorImpl(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun createError(e: Exception, action: Action, event: Event): FlowError {
|
||||
// distinguish between a DatabaseTransactionException and an actual StateTransitionException
|
||||
val stateTransitionOrOtherException: Throwable =
|
||||
if (e is DatabaseTransactionException) {
|
||||
// if the exception is a DatabaseTransactionException then it is not really a StateTransitionException
|
||||
// it is actually an exception that previously broke a DatabaseTransaction and was suppressed by user code
|
||||
// it was rethrown on [DatabaseTransaction.commit]. Unwrap the original exception and pass it to flow hospital
|
||||
e.cause
|
||||
} else if (e is ResultSerializationException) {
|
||||
// We must not wrap a [ResultSerializationException] with a [StateTransitionException],
|
||||
// because we will propagate the exception to rpc clients and [StateTransitionException] cannot be propagated to rpc clients.
|
||||
e
|
||||
} else {
|
||||
// Wrap the exception with [StateTransitionException] for handling by the flow hospital
|
||||
StateTransitionException(action, event, e)
|
||||
}
|
||||
return FlowError(secureRandom.nextLong(), stateTransitionOrOtherException)
|
||||
}
|
||||
}
|
@ -25,12 +25,13 @@ class DoRemainingWorkTransition(
|
||||
}
|
||||
|
||||
// If the flow is clean check the FlowState
|
||||
@Suppress("ThrowsCount")
|
||||
private fun cleanTransition(): TransitionResult {
|
||||
val flowState = startingState.checkpoint.flowState
|
||||
return when (flowState) {
|
||||
is FlowState.Unstarted -> UnstartedFlowTransition(context, startingState, flowState).transition()
|
||||
is FlowState.Started -> StartedFlowTransition(context, startingState, flowState).transition()
|
||||
is FlowState.Completed -> throw IllegalStateException("Cannot transition a state with completed flow state.")
|
||||
is FlowState.Finished -> throw IllegalStateException("Cannot transition a state with finished flow state.")
|
||||
is FlowState.Paused -> throw IllegalStateException("Cannot transition a state with paused flow state.")
|
||||
}
|
||||
}
|
||||
|
@ -61,9 +61,15 @@ class ErrorFlowTransition(
|
||||
if (!currentState.isRemoved) {
|
||||
val newCheckpoint = startingState.checkpoint.copy(status = Checkpoint.FlowStatus.FAILED)
|
||||
|
||||
val removeOrPersistCheckpoint = if (currentState.checkpoint.checkpointState.invocationContext.clientId == null) {
|
||||
Action.RemoveCheckpoint(context.id)
|
||||
} else {
|
||||
Action.PersistCheckpoint(context.id, newCheckpoint.copy(flowState = FlowState.Finished), isCheckpointUpdate = currentState.isAnyCheckpointPersisted)
|
||||
}
|
||||
|
||||
actions.addAll(arrayOf(
|
||||
Action.CreateTransaction,
|
||||
Action.PersistCheckpoint(context.id, newCheckpoint, isCheckpointUpdate = currentState.isAnyCheckpointPersisted),
|
||||
removeOrPersistCheckpoint,
|
||||
Action.PersistDeduplicationFacts(currentState.pendingDeduplicationHandlers),
|
||||
Action.ReleaseSoftLocks(context.id.uuid),
|
||||
Action.CommitTransaction,
|
||||
|
@ -44,7 +44,7 @@ class KilledFlowTransition(
|
||||
}
|
||||
// The checkpoint and soft locks are also removed directly in [StateMachineManager.killFlow]
|
||||
if (startingState.isAnyCheckpointPersisted) {
|
||||
actions.add(Action.RemoveCheckpoint(context.id))
|
||||
actions.add(Action.RemoveCheckpoint(context.id, mayHavePersistentResults = true))
|
||||
}
|
||||
actions.addAll(
|
||||
arrayOf(
|
||||
|
@ -41,47 +41,18 @@ class StartedFlowTransition(
|
||||
continuation = FlowContinuation.Throw(errorsToThrow[0])
|
||||
)
|
||||
}
|
||||
val sessionsToBeTerminated = findSessionsToBeTerminated(startingState)
|
||||
// if there are sessions to be closed, we close them as part of this transition and normal processing will continue on the next transition.
|
||||
return if (sessionsToBeTerminated.isNotEmpty()) {
|
||||
terminateSessions(sessionsToBeTerminated)
|
||||
} else {
|
||||
when (flowIORequest) {
|
||||
is FlowIORequest.Send -> sendTransition(flowIORequest)
|
||||
is FlowIORequest.Receive -> receiveTransition(flowIORequest)
|
||||
is FlowIORequest.SendAndReceive -> sendAndReceiveTransition(flowIORequest)
|
||||
is FlowIORequest.CloseSessions -> closeSessionTransition(flowIORequest)
|
||||
is FlowIORequest.WaitForLedgerCommit -> waitForLedgerCommitTransition(flowIORequest)
|
||||
is FlowIORequest.Sleep -> sleepTransition(flowIORequest)
|
||||
is FlowIORequest.GetFlowInfo -> getFlowInfoTransition(flowIORequest)
|
||||
is FlowIORequest.WaitForSessionConfirmations -> waitForSessionConfirmationsTransition()
|
||||
is FlowIORequest.ExecuteAsyncOperation<*> -> executeAsyncOperation(flowIORequest)
|
||||
FlowIORequest.ForceCheckpoint -> executeForceCheckpoint()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun findSessionsToBeTerminated(startingState: StateMachineState): SessionMap {
|
||||
return startingState.checkpoint.checkpointState.sessionsToBeClosed.mapNotNull { sessionId ->
|
||||
val sessionState = startingState.checkpoint.checkpointState.sessions[sessionId]!! as SessionState.Initiated
|
||||
if (sessionState.receivedMessages.isNotEmpty() && sessionState.receivedMessages.first() is EndSessionMessage) {
|
||||
sessionId to sessionState
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}.toMap()
|
||||
}
|
||||
|
||||
private fun terminateSessions(sessionsToBeTerminated: SessionMap): TransitionResult {
|
||||
return builder {
|
||||
val sessionsToRemove = sessionsToBeTerminated.keys
|
||||
val newCheckpoint = currentState.checkpoint.removeSessions(sessionsToRemove)
|
||||
.removeSessionsToBeClosed(sessionsToRemove)
|
||||
currentState = currentState.copy(checkpoint = newCheckpoint)
|
||||
actions.add(Action.RemoveSessionBindings(sessionsToRemove))
|
||||
actions.add(Action.ScheduleEvent(Event.DoRemainingWork))
|
||||
FlowContinuation.ProcessEvents
|
||||
}
|
||||
return when (flowIORequest) {
|
||||
is FlowIORequest.Send -> sendTransition(flowIORequest)
|
||||
is FlowIORequest.Receive -> receiveTransition(flowIORequest)
|
||||
is FlowIORequest.SendAndReceive -> sendAndReceiveTransition(flowIORequest)
|
||||
is FlowIORequest.CloseSessions -> closeSessionTransition(flowIORequest)
|
||||
is FlowIORequest.WaitForLedgerCommit -> waitForLedgerCommitTransition(flowIORequest)
|
||||
is FlowIORequest.Sleep -> sleepTransition(flowIORequest)
|
||||
is FlowIORequest.GetFlowInfo -> getFlowInfoTransition(flowIORequest)
|
||||
is FlowIORequest.WaitForSessionConfirmations -> waitForSessionConfirmationsTransition()
|
||||
is FlowIORequest.ExecuteAsyncOperation<*> -> executeAsyncOperation(flowIORequest)
|
||||
FlowIORequest.ForceCheckpoint -> executeForceCheckpoint()
|
||||
}.let { scheduleTerminateSessionsIfRequired(it) }
|
||||
}
|
||||
|
||||
private fun waitForSessionConfirmationsTransition(): TransitionResult {
|
||||
@ -158,6 +129,7 @@ class StartedFlowTransition(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("TooGenericExceptionCaught")
|
||||
private fun sendAndReceiveTransition(flowIORequest: FlowIORequest.SendAndReceive): TransitionResult {
|
||||
val sessionIdToMessage = LinkedHashMap<SessionId, SerializedBytes<Any>>()
|
||||
val sessionIdToSession = LinkedHashMap<SessionId, FlowSessionImpl>()
|
||||
@ -171,18 +143,23 @@ class StartedFlowTransition(
|
||||
if (isErrored()) {
|
||||
FlowContinuation.ProcessEvents
|
||||
} else {
|
||||
val receivedMap = receiveFromSessionsTransition(sessionIdToSession)
|
||||
if (receivedMap == null) {
|
||||
// We don't yet have the messages, change the suspension to be on Receive
|
||||
val newIoRequest = FlowIORequest.Receive(flowIORequest.sessionToMessage.keys.toNonEmptySet())
|
||||
currentState = currentState.copy(
|
||||
try {
|
||||
val receivedMap = receiveFromSessionsTransition(sessionIdToSession)
|
||||
if (receivedMap == null) {
|
||||
// We don't yet have the messages, change the suspension to be on Receive
|
||||
val newIoRequest = FlowIORequest.Receive(flowIORequest.sessionToMessage.keys.toNonEmptySet())
|
||||
currentState = currentState.copy(
|
||||
checkpoint = currentState.checkpoint.copy(
|
||||
flowState = FlowState.Started(newIoRequest, started.frozenFiber)
|
||||
flowState = FlowState.Started(newIoRequest, started.frozenFiber)
|
||||
)
|
||||
)
|
||||
FlowContinuation.ProcessEvents
|
||||
} else {
|
||||
resumeFlowLogic(receivedMap)
|
||||
)
|
||||
FlowContinuation.ProcessEvents
|
||||
} else {
|
||||
resumeFlowLogic(receivedMap)
|
||||
}
|
||||
} catch (t: Throwable) {
|
||||
// E.g. A session end message received while expecting a data session message
|
||||
resumeFlowLogic(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -216,6 +193,7 @@ class StartedFlowTransition(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("TooGenericExceptionCaught")
|
||||
private fun receiveTransition(flowIORequest: FlowIORequest.Receive): TransitionResult {
|
||||
return builder {
|
||||
val sessionIdToSession = LinkedHashMap<SessionId, FlowSessionImpl>()
|
||||
@ -224,11 +202,16 @@ class StartedFlowTransition(
|
||||
}
|
||||
// send initialises to uninitialised sessions
|
||||
sendInitialSessionMessagesIfNeeded(sessionIdToSession.keys)
|
||||
val receivedMap = receiveFromSessionsTransition(sessionIdToSession)
|
||||
if (receivedMap == null) {
|
||||
FlowContinuation.ProcessEvents
|
||||
} else {
|
||||
resumeFlowLogic(receivedMap)
|
||||
try {
|
||||
val receivedMap = receiveFromSessionsTransition(sessionIdToSession)
|
||||
if (receivedMap == null) {
|
||||
FlowContinuation.ProcessEvents
|
||||
} else {
|
||||
resumeFlowLogic(receivedMap)
|
||||
}
|
||||
} catch (t: Throwable) {
|
||||
// E.g. A session end message received while expecting a data session message
|
||||
resumeFlowLogic(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -253,6 +236,8 @@ class StartedFlowTransition(
|
||||
val messages: Map<SessionId, SerializedBytes<Any>>,
|
||||
val newSessionMap: SessionMap
|
||||
)
|
||||
|
||||
@Suppress("ComplexMethod", "NestedBlockDepth")
|
||||
private fun pollSessionMessages(sessions: SessionMap, sessionIds: Set<SessionId>): PollResult? {
|
||||
val newSessionMessages = LinkedHashMap(sessions)
|
||||
val resultMessages = LinkedHashMap<SessionId, SerializedBytes<Any>>()
|
||||
@ -267,7 +252,11 @@ class StartedFlowTransition(
|
||||
} else {
|
||||
newSessionMessages[sessionId] = sessionState.copy(receivedMessages = messages.subList(1, messages.size).toList())
|
||||
// at this point, we've already checked for errors and session ends, so it's guaranteed that the first message will be a data message.
|
||||
resultMessages[sessionId] = (messages[0] as DataSessionMessage).payload
|
||||
resultMessages[sessionId] = if (messages[0] is EndSessionMessage) {
|
||||
throw UnexpectedFlowEndException("Received session end message instead of a data session message. Mismatched send and receive?")
|
||||
} else {
|
||||
(messages[0] as DataSessionMessage).payload
|
||||
}
|
||||
}
|
||||
}
|
||||
else -> {
|
||||
@ -537,4 +526,25 @@ class StartedFlowTransition(
|
||||
private fun executeForceCheckpoint(): TransitionResult {
|
||||
return builder { resumeFlowLogic(Unit) }
|
||||
}
|
||||
|
||||
private fun scheduleTerminateSessionsIfRequired(transition: TransitionResult): TransitionResult {
|
||||
// If there are sessions to be closed, close them on a following transition
|
||||
val sessionsToBeTerminated = findSessionsToBeTerminated(transition.newState)
|
||||
return if (sessionsToBeTerminated.isNotEmpty()) {
|
||||
transition.copy(actions = transition.actions + Action.ScheduleEvent(Event.TerminateSessions(sessionsToBeTerminated.keys)))
|
||||
} else {
|
||||
transition
|
||||
}
|
||||
}
|
||||
|
||||
private fun findSessionsToBeTerminated(startingState: StateMachineState): SessionMap {
|
||||
return startingState.checkpoint.checkpointState.sessionsToBeClosed.mapNotNull { sessionId ->
|
||||
val sessionState = startingState.checkpoint.checkpointState.sessions[sessionId]!! as SessionState.Initiated
|
||||
if (sessionState.receivedMessages.isNotEmpty() && sessionState.receivedMessages.first() is EndSessionMessage) {
|
||||
sessionId to sessionState
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}.toMap()
|
||||
}
|
||||
}
|
||||
|
@ -37,31 +37,38 @@ class TopLevelTransition(
|
||||
val event: Event
|
||||
) : Transition {
|
||||
|
||||
@Suppress("ComplexMethod")
|
||||
@Suppress("ComplexMethod", "TooGenericExceptionCaught")
|
||||
override fun transition(): TransitionResult {
|
||||
return try {
|
||||
if (startingState.isKilled) {
|
||||
return KilledFlowTransition(context, startingState, event).transition()
|
||||
}
|
||||
|
||||
if (startingState.isKilled) {
|
||||
return KilledFlowTransition(context, startingState, event).transition()
|
||||
}
|
||||
|
||||
return when (event) {
|
||||
is Event.DoRemainingWork -> DoRemainingWorkTransition(context, startingState).transition()
|
||||
is Event.DeliverSessionMessage -> DeliverSessionMessageTransition(context, startingState, event).transition()
|
||||
is Event.Error -> errorTransition(event)
|
||||
is Event.TransactionCommitted -> transactionCommittedTransition(event)
|
||||
is Event.SoftShutdown -> softShutdownTransition()
|
||||
is Event.StartErrorPropagation -> startErrorPropagationTransition()
|
||||
is Event.EnterSubFlow -> enterSubFlowTransition(event)
|
||||
is Event.LeaveSubFlow -> leaveSubFlowTransition()
|
||||
is Event.Suspend -> suspendTransition(event)
|
||||
is Event.FlowFinish -> flowFinishTransition(event)
|
||||
is Event.InitiateFlow -> initiateFlowTransition(event)
|
||||
is Event.AsyncOperationCompletion -> asyncOperationCompletionTransition(event)
|
||||
is Event.AsyncOperationThrows -> asyncOperationThrowsTransition(event)
|
||||
is Event.RetryFlowFromSafePoint -> retryFlowFromSafePointTransition()
|
||||
is Event.ReloadFlowFromCheckpointAfterSuspend -> reloadFlowFromCheckpointAfterSuspendTransition()
|
||||
is Event.OvernightObservation -> overnightObservationTransition()
|
||||
is Event.WakeUpFromSleep -> wakeUpFromSleepTransition()
|
||||
when (event) {
|
||||
is Event.DoRemainingWork -> DoRemainingWorkTransition(context, startingState).transition()
|
||||
is Event.DeliverSessionMessage -> DeliverSessionMessageTransition(context, startingState, event).transition()
|
||||
is Event.Error -> errorTransition(event)
|
||||
is Event.TransactionCommitted -> transactionCommittedTransition(event)
|
||||
is Event.SoftShutdown -> softShutdownTransition()
|
||||
is Event.StartErrorPropagation -> startErrorPropagationTransition()
|
||||
is Event.EnterSubFlow -> enterSubFlowTransition(event)
|
||||
is Event.LeaveSubFlow -> leaveSubFlowTransition()
|
||||
is Event.Suspend -> suspendTransition(event)
|
||||
is Event.FlowFinish -> flowFinishTransition(event)
|
||||
is Event.InitiateFlow -> initiateFlowTransition(event)
|
||||
is Event.AsyncOperationCompletion -> asyncOperationCompletionTransition(event)
|
||||
is Event.AsyncOperationThrows -> asyncOperationThrowsTransition(event)
|
||||
is Event.RetryFlowFromSafePoint -> retryFlowFromSafePointTransition()
|
||||
is Event.ReloadFlowFromCheckpointAfterSuspend -> reloadFlowFromCheckpointAfterSuspendTransition()
|
||||
is Event.OvernightObservation -> overnightObservationTransition()
|
||||
is Event.WakeUpFromSleep -> wakeUpFromSleepTransition()
|
||||
is Event.Pause -> pausedFlowTransition()
|
||||
is Event.TerminateSessions -> terminateSessionsTransition(event)
|
||||
}
|
||||
} catch (t: Throwable) {
|
||||
// All errors coming from the transition should be sent back to the flow
|
||||
// Letting the flow re-enter standard error handling
|
||||
builder { resumeFlowLogic(t) }
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,7 +185,7 @@ class TopLevelTransition(
|
||||
private fun suspendTransition(event: Event.Suspend): TransitionResult {
|
||||
return builder {
|
||||
val newCheckpoint = currentState.checkpoint.run {
|
||||
val newCheckpointState = if (checkpointState.invocationContext.arguments.isNotEmpty()) {
|
||||
val newCheckpointState = if (checkpointState.invocationContext.arguments!!.isNotEmpty()) {
|
||||
checkpointState.copy(
|
||||
invocationContext = checkpointState.invocationContext.copy(arguments = emptyList()),
|
||||
numberOfSuspends = checkpointState.numberOfSuspends + 1
|
||||
@ -232,7 +239,7 @@ class TopLevelTransition(
|
||||
checkpointState = checkpoint.checkpointState.copy(
|
||||
numberOfSuspends = checkpoint.checkpointState.numberOfSuspends + 1
|
||||
),
|
||||
flowState = FlowState.Completed,
|
||||
flowState = FlowState.Finished,
|
||||
result = event.returnValue,
|
||||
status = Checkpoint.FlowStatus.COMPLETED
|
||||
),
|
||||
@ -240,10 +247,22 @@ class TopLevelTransition(
|
||||
isFlowResumed = false,
|
||||
isRemoved = true
|
||||
)
|
||||
val allSourceSessionIds = checkpoint.checkpointState.sessions.keys
|
||||
|
||||
if (currentState.isAnyCheckpointPersisted) {
|
||||
actions.add(Action.RemoveCheckpoint(context.id))
|
||||
if (currentState.checkpoint.checkpointState.invocationContext.clientId == null) {
|
||||
actions.add(Action.RemoveCheckpoint(context.id))
|
||||
} else {
|
||||
actions.add(
|
||||
Action.PersistCheckpoint(
|
||||
context.id,
|
||||
currentState.checkpoint,
|
||||
isCheckpointUpdate = currentState.isAnyCheckpointPersisted
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
val allSourceSessionIds = currentState.checkpoint.checkpointState.sessions.keys
|
||||
actions.addAll(arrayOf(
|
||||
Action.PersistDeduplicationFacts(pendingDeduplicationHandlers),
|
||||
Action.ReleaseSoftLocks(event.softLocksId),
|
||||
@ -366,4 +385,32 @@ class TopLevelTransition(
|
||||
resumeFlowLogic(Unit)
|
||||
}
|
||||
}
|
||||
|
||||
private fun pausedFlowTransition(): TransitionResult {
|
||||
return builder {
|
||||
if (!startingState.isFlowResumed) {
|
||||
actions.add(Action.CreateTransaction)
|
||||
}
|
||||
actions.addAll(
|
||||
arrayOf(
|
||||
Action.UpdateFlowStatus(context.id, Checkpoint.FlowStatus.PAUSED),
|
||||
Action.CommitTransaction,
|
||||
Action.MoveFlowToPaused(currentState)
|
||||
)
|
||||
)
|
||||
FlowContinuation.Abort
|
||||
}
|
||||
}
|
||||
|
||||
private fun terminateSessionsTransition(event: Event.TerminateSessions): TransitionResult {
|
||||
return builder {
|
||||
val sessions = event.sessions
|
||||
val newCheckpoint = currentState.checkpoint
|
||||
.removeSessions(sessions)
|
||||
.removeSessionsToBeClosed(sessions)
|
||||
currentState = currentState.copy(checkpoint = newCheckpoint)
|
||||
actions.add(Action.RemoveSessionBindings(sessions))
|
||||
FlowContinuation.ProcessEvents
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
additionalP2PAddresses = []
|
||||
crlCheckSoftFail = true
|
||||
database = {
|
||||
transactionIsolationLevel = "REPEATABLE_READ"
|
||||
exportHibernateJMXStatistics = "false"
|
||||
}
|
||||
dataSourceProperties = {
|
||||
|
@ -12,12 +12,18 @@
|
||||
<addPrimaryKey columnNames="flow_id" constraintName="node_checkpoints_pk" tableName="node_checkpoints"/>
|
||||
</changeSet>
|
||||
|
||||
<!-- TODO: add indexes for the rest of the tables as well (Results + Exceptions) -->
|
||||
<!-- TODO: add indexes for Exceptions table as well -->
|
||||
<!-- TODO: the following only add indexes so maybe also align name of file? -->
|
||||
<changeSet author="R3.Corda" id="add_new_checkpoint_schema_indexes">
|
||||
<createIndex indexName="node_checkpoint_blobs_idx" tableName="node_checkpoint_blobs" clustered="false" unique="true">
|
||||
<column name="flow_id"/>
|
||||
</createIndex>
|
||||
<createIndex indexName="node_flow_results_idx" tableName="node_flow_results" clustered="false" unique="true">
|
||||
<column name="flow_id"/>
|
||||
</createIndex>
|
||||
<createIndex indexName="node_flow_exceptions_idx" tableName="node_flow_exceptions" clustered="false" unique="true">
|
||||
<column name="flow_id"/>
|
||||
</createIndex>
|
||||
<createIndex indexName="node_flow_metadata_idx" tableName="node_flow_metadata" clustered="false" unique="true">
|
||||
<column name="flow_id"/>
|
||||
</createIndex>
|
||||
|
@ -49,14 +49,13 @@
|
||||
</createTable>
|
||||
</changeSet>
|
||||
|
||||
|
||||
<changeSet author="R3.Corda" id="add_new_flow_result_table-postgres" dbms="postgresql">
|
||||
<createTable tableName="node_flow_results">
|
||||
<column name="flow_id" type="NVARCHAR(64)">
|
||||
<constraints nullable="false"/>
|
||||
</column>
|
||||
<column name="result_value" type="varbinary(33554432)">
|
||||
<constraints nullable="false"/>
|
||||
<constraints nullable="true"/>
|
||||
</column>
|
||||
<column name="timestamp" type="java.sql.Types.TIMESTAMP">
|
||||
<constraints nullable="false"/>
|
||||
|
@ -49,14 +49,13 @@
|
||||
</createTable>
|
||||
</changeSet>
|
||||
|
||||
|
||||
<changeSet author="R3.Corda" id="add_new_flow_result_table" dbms="!postgresql">
|
||||
<createTable tableName="node_flow_results">
|
||||
<column name="flow_id" type="NVARCHAR(64)">
|
||||
<constraints nullable="false"/>
|
||||
</column>
|
||||
<column name="result_value" type="blob">
|
||||
<constraints nullable="false"/>
|
||||
<constraints nullable="true"/>
|
||||
</column>
|
||||
<column name="timestamp" type="java.sql.Types.TIMESTAMP">
|
||||
<constraints nullable="false"/>
|
||||
|
@ -6,6 +6,8 @@ import net.corda.core.context.InvocationOrigin
|
||||
import net.corda.core.contracts.ContractState
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.StartableByService
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.packageName
|
||||
import net.corda.core.node.AppServiceHub
|
||||
import net.corda.core.node.ServiceHub
|
||||
import net.corda.core.node.services.CordaService
|
||||
@ -16,12 +18,20 @@ import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.ProgressTracker
|
||||
import net.corda.finance.DOLLARS
|
||||
import net.corda.finance.flows.CashIssueFlow
|
||||
import net.corda.finance.schemas.CashSchemaV1
|
||||
import net.corda.node.internal.cordapp.DummyRPCFlow
|
||||
import net.corda.testing.core.BOC_NAME
|
||||
import net.corda.testing.core.DUMMY_NOTARY_NAME
|
||||
import net.corda.testing.core.TestIdentity
|
||||
import net.corda.testing.internal.vault.DummyLinearStateSchemaV1
|
||||
import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNetworkParameters
|
||||
import net.corda.testing.node.MockServices
|
||||
import net.corda.testing.node.StartedMockNode
|
||||
import net.corda.testing.node.internal.FINANCE_CONTRACTS_CORDAPP
|
||||
import net.corda.testing.node.internal.enclosedCordapp
|
||||
import net.corda.testing.node.makeTestIdentityService
|
||||
import org.assertj.core.api.Assertions
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
@ -100,6 +110,22 @@ class CordaServiceTest {
|
||||
nodeA.services.cordaService(EntityManagerService::class.java)
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `MockServices when initialized with package name not on classpath throws ClassNotFoundException`() {
|
||||
val cordappPackages = listOf(
|
||||
"com.r3.corda.sdk.tokens.money",
|
||||
"net.corda.finance.contracts",
|
||||
CashSchemaV1::class.packageName,
|
||||
DummyLinearStateSchemaV1::class.packageName)
|
||||
val bankOfCorda = TestIdentity(BOC_NAME)
|
||||
val dummyCashIssuer = TestIdentity(CordaX500Name("Snake Oil Issuer", "London", "GB"), 10)
|
||||
val dummyNotary = TestIdentity(DUMMY_NOTARY_NAME, 20)
|
||||
val identityService = makeTestIdentityService(dummyNotary.identity)
|
||||
|
||||
Assertions.assertThatThrownBy { MockServices(cordappPackages, dummyNotary, identityService, dummyCashIssuer.keyPair, bankOfCorda.keyPair) }
|
||||
.isInstanceOf(ClassNotFoundException::class.java).hasMessage("Could not create jar file as the given package is not found on the classpath: com.r3.corda.sdk.tokens.money")
|
||||
}
|
||||
|
||||
@StartableByService
|
||||
class DummyServiceFlow : FlowLogic<InvocationContext>() {
|
||||
companion object {
|
||||
|
@ -0,0 +1,173 @@
|
||||
package net.corda.node.internal
|
||||
|
||||
import com.nhaarman.mockito_kotlin.atLeast
|
||||
import com.nhaarman.mockito_kotlin.mock
|
||||
import com.nhaarman.mockito_kotlin.verify
|
||||
import com.nhaarman.mockito_kotlin.whenever
|
||||
import net.corda.core.serialization.SerializeAsToken
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.node.VersionInfo
|
||||
import net.corda.node.services.config.NodeConfiguration
|
||||
import net.corda.node.services.config.NodeH2Settings
|
||||
import net.corda.node.services.events.NodeSchedulerService
|
||||
import net.corda.node.services.messaging.MessagingService
|
||||
import net.corda.node.services.network.NetworkMapUpdater
|
||||
import net.corda.node.services.statemachine.StateMachineManager
|
||||
import net.corda.nodeapi.internal.cryptoservice.CryptoService
|
||||
import net.corda.nodeapi.internal.persistence.CouldNotCreateDataSourceException
|
||||
import net.corda.nodeapi.internal.persistence.DatabaseConfig
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.h2.tools.Server
|
||||
import org.junit.Test
|
||||
import java.net.InetAddress
|
||||
import java.sql.Connection
|
||||
import java.sql.DatabaseMetaData
|
||||
import java.util.*
|
||||
import java.util.concurrent.ExecutorService
|
||||
import javax.sql.DataSource
|
||||
import kotlin.test.assertFailsWith
|
||||
|
||||
class NodeH2SecurityTests {
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on the host name requires non-default database password`() {
|
||||
hikaryProperties.setProperty("dataSource.url", "jdbc:h2:file:my_file")
|
||||
hikaryProperties.setProperty("dataSource.password", "")
|
||||
address = NetworkHostAndPort(InetAddress.getLocalHost().hostName, 1080)
|
||||
val node = MockNode()
|
||||
|
||||
val exception = assertFailsWith(CouldNotCreateDataSourceException::class) {
|
||||
node.startDb()
|
||||
}
|
||||
assertThat(exception.message).contains("Database password is required for H2 server listening on ")
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on the host IP requires non-default database password`() {
|
||||
hikaryProperties.setProperty("dataSource.url", "jdbc:h2:file:my_file")
|
||||
hikaryProperties.setProperty("dataSource.password", "")
|
||||
address = NetworkHostAndPort(InetAddress.getLocalHost().hostAddress, 1080)
|
||||
val node = MockNode()
|
||||
|
||||
val exception = assertFailsWith(CouldNotCreateDataSourceException::class) {
|
||||
node.startDb()
|
||||
}
|
||||
assertThat(exception.message).contains("Database password is required for H2 server listening on")
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on the host name requires non-blank database password`() {
|
||||
hikaryProperties.setProperty("dataSource.url", "jdbc:h2:file:my_file")
|
||||
hikaryProperties.setProperty("dataSource.password", " ")
|
||||
address = NetworkHostAndPort(InetAddress.getLocalHost().hostName, 1080)
|
||||
val node = MockNode()
|
||||
|
||||
val exception = assertFailsWith(CouldNotCreateDataSourceException::class) {
|
||||
node.startDb()
|
||||
}
|
||||
assertThat(exception.message).contains("Database password is required for H2 server listening on")
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on the host IP requires non-blank database password`() {
|
||||
hikaryProperties.setProperty("dataSource.url", "jdbc:h2:file:my_file")
|
||||
hikaryProperties.setProperty("dataSource.password", " ")
|
||||
address = NetworkHostAndPort(InetAddress.getLocalHost().hostAddress, 1080)
|
||||
val node = MockNode()
|
||||
|
||||
val exception = assertFailsWith(CouldNotCreateDataSourceException::class) {
|
||||
node.startDb()
|
||||
}
|
||||
|
||||
assertThat(exception.message).contains("Database password is required for H2 server listening on")
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server on localhost runs with the default database password`() {
|
||||
hikaryProperties.setProperty("dataSource.url", "jdbc:h2:file:dir/file;")
|
||||
hikaryProperties.setProperty("dataSource.password", "")
|
||||
address = NetworkHostAndPort("localhost", 80)
|
||||
|
||||
val node = MockNode()
|
||||
node.startDb()
|
||||
|
||||
verify(dataSource, atLeast(1)).connection
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server to loopback IP runs with the default database password`() {
|
||||
hikaryProperties.setProperty("dataSource.url", "jdbc:h2:file:dir/file;")
|
||||
hikaryProperties.setProperty("dataSource.password", "")
|
||||
address = NetworkHostAndPort("127.0.0.1", 80)
|
||||
|
||||
val node = MockNode()
|
||||
node.startDb()
|
||||
|
||||
verify(dataSource, atLeast(1)).connection
|
||||
}
|
||||
|
||||
@Test(timeout=300_000)
|
||||
fun `h2 server set allowedClasses system properties`() {
|
||||
System.setProperty("h2.allowedClasses", "*")
|
||||
hikaryProperties.setProperty("dataSource.url", "jdbc:h2:file:dir/file;")
|
||||
hikaryProperties.setProperty("dataSource.password", "")
|
||||
address = NetworkHostAndPort("127.0.0.1", 80)
|
||||
|
||||
val node = MockNode()
|
||||
node.startDb()
|
||||
|
||||
val allowClasses = System.getProperty("h2.allowedClasses").split(",")
|
||||
assertThat(allowClasses).contains("org.h2.mvstore.db.MVTableEngine",
|
||||
"org.locationtech.jts.geom.Geometry" ,
|
||||
"org.h2.server.TcpServer")
|
||||
assertThat(allowClasses).doesNotContain("*")
|
||||
}
|
||||
|
||||
private val config = mock<NodeConfiguration>()
|
||||
private val hikaryProperties = Properties()
|
||||
private val database = DatabaseConfig()
|
||||
private var address: NetworkHostAndPort? = null
|
||||
val dataSource = mock<DataSource>()
|
||||
|
||||
init {
|
||||
whenever(config.database).thenReturn(database)
|
||||
whenever(config.dataSourceProperties).thenReturn(hikaryProperties)
|
||||
whenever(config.baseDirectory).thenReturn(mock())
|
||||
whenever(config.effectiveH2Settings).thenAnswer { NodeH2Settings(address) }
|
||||
}
|
||||
|
||||
private inner class MockNode: Node(config, VersionInfo.UNKNOWN, false) {
|
||||
fun startDb() = startDatabase()
|
||||
|
||||
override fun makeMessagingService(): MessagingService {
|
||||
val service = mock<MessagingService>(extraInterfaces = arrayOf(SerializeAsToken::class))
|
||||
whenever(service.activeChange).thenReturn(mock())
|
||||
return service
|
||||
}
|
||||
|
||||
override fun makeStateMachineManager(): StateMachineManager = mock()
|
||||
|
||||
override fun createExternalOperationExecutor(numberOfThreads: Int): ExecutorService = mock()
|
||||
|
||||
override fun makeCryptoService(): CryptoService = mock()
|
||||
|
||||
override fun makeNetworkMapUpdater(): NetworkMapUpdater = mock()
|
||||
|
||||
override fun makeNodeSchedulerService(): NodeSchedulerService = mock()
|
||||
|
||||
override fun startHikariPool() {
|
||||
val connection = mock<Connection>()
|
||||
val metaData = mock<DatabaseMetaData>()
|
||||
whenever(dataSource.connection).thenReturn(connection)
|
||||
whenever(connection.metaData).thenReturn(metaData)
|
||||
database.start(dataSource)
|
||||
}
|
||||
|
||||
override fun createH2Server(baseDir: String, databaseName: String, port: Int): Server {
|
||||
val server = mock<Server>()
|
||||
whenever(server.start()).thenReturn(server)
|
||||
whenever(server.url).thenReturn("")
|
||||
return server
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user