mirror of
https://github.com/corda/corda.git
synced 2024-12-24 07:06:44 +00:00
Merge remote-tracking branch 'origin/release/os/4.3' into my_merge_branch
# Conflicts: # core/src/main/kotlin/net/corda/core/internal/TransactionVerifierServiceInternal.kt # node/src/integration-test/kotlin/net/corda/node/services/rpc/RpcExceptionHandlingTest.kt # testing/node-driver/src/main/kotlin/net/corda/testing/node/internal/DriverDSLImpl.kt
This commit is contained in:
commit
6a3a9eded3
@ -4034,7 +4034,7 @@ public interface net.corda.core.node.services.VaultService
|
||||
public abstract net.corda.core.concurrent.CordaFuture<net.corda.core.node.services.Vault$Update<net.corda.core.contracts.ContractState>> whenConsumed(net.corda.core.contracts.StateRef)
|
||||
##
|
||||
public final class net.corda.core.node.services.VaultServiceKt extends java.lang.Object
|
||||
public static final int MAX_CONSTRAINT_DATA_SIZE = 563
|
||||
public static final int MAX_CONSTRAINT_DATA_SIZE = 20000
|
||||
##
|
||||
@CordaSerializable
|
||||
public final class net.corda.core.node.services.vault.AggregateFunctionType extends java.lang.Enum
|
||||
|
51
.ci/dev/smoke/Jenkinsfile
vendored
51
.ci/dev/smoke/Jenkinsfile
vendored
@ -4,69 +4,94 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob
|
||||
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
|
||||
|
||||
pipeline {
|
||||
agent { label 'k8s' }
|
||||
options { timestamps() }
|
||||
|
||||
triggers {
|
||||
issueCommentTrigger('.*smoke tests.*')
|
||||
}
|
||||
|
||||
agent { label 'k8s' }
|
||||
options { timestamps() }
|
||||
|
||||
environment {
|
||||
DOCKER_TAG_TO_USE = "${env.GIT_COMMIT.subSequence(0, 8)}st"
|
||||
EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}"
|
||||
BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Smoke Tests') {
|
||||
stage('Corda Smoke Tests') {
|
||||
steps {
|
||||
script {
|
||||
if (currentBuildTriggeredByComment()) {
|
||||
stage('Run Smoke Tests') {
|
||||
script {
|
||||
pullRequest.createStatus(status: 'pending',
|
||||
context: 'continuous-integration/jenkins/pr-merge/smokeTest',
|
||||
description: 'Smoke Tests Building',
|
||||
description: 'Smoke Tests Running',
|
||||
targetUrl: "${env.JOB_URL}")
|
||||
}
|
||||
|
||||
withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) {
|
||||
sh "./gradlew " +
|
||||
"-DbuildId=\"\${BUILD_ID}\" " +
|
||||
"-Dkubenetize=true " +
|
||||
"-DpreAllocatePods=true " +
|
||||
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
|
||||
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
|
||||
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
|
||||
" allParallelSmokeTest"
|
||||
" clean allParallelSmokeTest --stacktrace"
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
|
||||
always {
|
||||
junit testResults: '**/build/test-results-xml/**/*.xml', allowEmptyResults: false
|
||||
script {
|
||||
if (currentBuildTriggeredByComment()) {
|
||||
archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false
|
||||
junit '**/build/test-results-xml/**/*.xml'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
success {
|
||||
script {
|
||||
if (currentBuildTriggeredByComment()) {
|
||||
pullRequest.createStatus(status: 'success',
|
||||
context: 'continuous-integration/jenkins/pr-merge/smokeTest',
|
||||
description: 'Smoke Tests Passed',
|
||||
targetUrl: "${env.JOB_URL}testResults")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
if (currentBuildTriggeredByComment()) {
|
||||
pullRequest.createStatus(status: 'failure',
|
||||
context: 'continuous-integration/jenkins/pr-merge/smokeTest',
|
||||
description: 'Smoke Tests Failed',
|
||||
targetUrl: "${env.JOB_URL}testResults")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cleanup {
|
||||
deleteDir() /* clean up our workspace */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@NonCPS
|
||||
def currentBuildTriggeredByComment() {
|
||||
def triggerCause = currentBuild.rawBuild.getCause(org.jenkinsci.plugins.pipeline.github.trigger.IssueCommentCause)
|
||||
if (triggerCause) {
|
||||
echo("Build was started by ${triggerCause.userLogin}, who wrote: " +
|
||||
"\"${triggerCause.comment}\", which matches the " +
|
||||
"\"${triggerCause.triggerPattern}\" trigger pattern.")
|
||||
} else {
|
||||
echo('Build was not started by a trigger')
|
||||
}
|
||||
|
||||
return triggerCause != null
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
import net.corda.testing.DistributedTesting
|
||||
import net.corda.testing.DistributeTestsBy
|
||||
import net.corda.testing.DistributedTesting
|
||||
import net.corda.testing.ImageBuilding
|
||||
import net.corda.testing.ParallelTestGroup
|
||||
import net.corda.testing.PodLogLevel
|
||||
@ -608,7 +608,7 @@ task allParallelIntegrationTest(type: ParallelTestGroup) {
|
||||
streamOutput false
|
||||
coresPerFork 5
|
||||
memoryInGbPerFork 10
|
||||
distribute DistributeTestsBy.CLASS
|
||||
distribute DistributeTestsBy.METHOD
|
||||
}
|
||||
task allParallelUnitTest(type: ParallelTestGroup) {
|
||||
podLogLevel PodLogLevel.INFO
|
||||
@ -645,5 +645,3 @@ task allParallelSmokeTest(type: ParallelTestGroup) {
|
||||
}
|
||||
apply plugin: ImageBuilding
|
||||
apply plugin: DistributedTesting
|
||||
|
||||
|
||||
|
@ -0,0 +1,5 @@
|
||||
package net.corda.testing;
|
||||
|
||||
public enum DistributeTestsBy {
|
||||
CLASS, METHOD
|
||||
}
|
@ -50,7 +50,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
project.logger.info("Evaluating ${task.getPath()}")
|
||||
if (task in requestedTasks && !task.hasProperty("ignoreForDistribution")) {
|
||||
project.logger.info "Modifying ${task.getPath()}"
|
||||
ListTests testListerTask = createTestListingTasks(task, subProject)
|
||||
Task testListerTask = createTestListingTasks(task, subProject)
|
||||
globalAllocator.addSource(testListerTask, task)
|
||||
Test modifiedTestTask = modifyTestTaskForParallelExecution(subProject, task, globalAllocator)
|
||||
} else {
|
||||
@ -79,7 +79,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
userGroups.forEach { testGrouping ->
|
||||
|
||||
//for each "group" (ie: test, integrationTest) within the grouping find all the Test tasks which have the same name.
|
||||
List<Test> testTasksToRunInGroup = ((ParallelTestGroup) testGrouping).groups.collect {
|
||||
List<Test> testTasksToRunInGroup = ((ParallelTestGroup) testGrouping).getGroups().collect {
|
||||
allTestTasksGroupedByType.get(it)
|
||||
}.flatten()
|
||||
|
||||
@ -95,7 +95,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
imageBuildTask.dependsOn preAllocateTask
|
||||
}
|
||||
|
||||
def userDefinedParallelTask = project.rootProject.tasks.create("userDefined" + testGrouping.name.capitalize(), KubesTest) {
|
||||
def userDefinedParallelTask = project.rootProject.tasks.create("userDefined" + testGrouping.getName().capitalize(), KubesTest) {
|
||||
group = GRADLE_GROUP
|
||||
|
||||
if (!tagToUseForRunningTests) {
|
||||
@ -106,24 +106,24 @@ class DistributedTesting implements Plugin<Project> {
|
||||
dependsOn deAllocateTask
|
||||
}
|
||||
numberOfPods = testGrouping.getShardCount()
|
||||
printOutput = testGrouping.printToStdOut
|
||||
printOutput = testGrouping.getPrintToStdOut()
|
||||
fullTaskToExecutePath = superListOfTasks
|
||||
taskToExecuteName = testGrouping.groups.join("And")
|
||||
memoryGbPerFork = testGrouping.gbOfMemory
|
||||
numberOfCoresPerFork = testGrouping.coresToUse
|
||||
distribution = testGrouping.distribution
|
||||
podLogLevel = testGrouping.logLevel
|
||||
taskToExecuteName = testGrouping.getGroups().join("And")
|
||||
memoryGbPerFork = testGrouping.getGbOfMemory()
|
||||
numberOfCoresPerFork = testGrouping.getCoresToUse()
|
||||
distribution = testGrouping.getDistribution()
|
||||
podLogLevel = testGrouping.getLogLevel()
|
||||
doFirst {
|
||||
dockerTag = tagToUseForRunningTests ? (ImageBuilding.registryName + ":" + tagToUseForRunningTests) : (imagePushTask.imageName.get() + ":" + imagePushTask.tag.get())
|
||||
}
|
||||
}
|
||||
def reportOnAllTask = project.rootProject.tasks.create("userDefinedReports${testGrouping.name.capitalize()}", KubesReporting) {
|
||||
def reportOnAllTask = project.rootProject.tasks.create("userDefinedReports${testGrouping.getName().capitalize()}", KubesReporting) {
|
||||
group = GRADLE_GROUP
|
||||
dependsOn userDefinedParallelTask
|
||||
destinationDir new File(project.rootProject.getBuildDir(), "userDefinedReports${testGrouping.name.capitalize()}")
|
||||
destinationDir new File(project.rootProject.getBuildDir(), "userDefinedReports${testGrouping.getName().capitalize()}")
|
||||
doFirst {
|
||||
destinationDir.deleteDir()
|
||||
shouldPrintOutput = !testGrouping.printToStdOut
|
||||
shouldPrintOutput = !testGrouping.getPrintToStdOut()
|
||||
podResults = userDefinedParallelTask.containerResults
|
||||
reportOn(userDefinedParallelTask.testOutput)
|
||||
}
|
||||
@ -145,14 +145,14 @@ class DistributedTesting implements Plugin<Project> {
|
||||
|
||||
private List<Task> generatePreAllocateAndDeAllocateTasksForGrouping(Project project, ParallelTestGroup testGrouping) {
|
||||
PodAllocator allocator = new PodAllocator(project.getLogger())
|
||||
Task preAllocateTask = project.rootProject.tasks.create("preAllocateFor" + testGrouping.name.capitalize()) {
|
||||
Task preAllocateTask = project.rootProject.tasks.create("preAllocateFor" + testGrouping.getName().capitalize()) {
|
||||
group = GRADLE_GROUP
|
||||
doFirst {
|
||||
String dockerTag = System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_BUILDING_PROPERTY)
|
||||
if (dockerTag == null) {
|
||||
throw new GradleException("pre allocation cannot be used without a stable docker tag - please provide one using -D" + ImageBuilding.PROVIDE_TAG_FOR_BUILDING_PROPERTY)
|
||||
}
|
||||
int seed = (dockerTag.hashCode() + testGrouping.name.hashCode())
|
||||
int seed = (dockerTag.hashCode() + testGrouping.getName().hashCode())
|
||||
String podPrefix = new BigInteger(64, new Random(seed)).toString(36)
|
||||
//here we will pre-request the correct number of pods for this testGroup
|
||||
int numberOfPodsToRequest = testGrouping.getShardCount()
|
||||
@ -162,14 +162,14 @@ class DistributedTesting implements Plugin<Project> {
|
||||
}
|
||||
}
|
||||
|
||||
Task deAllocateTask = project.rootProject.tasks.create("deAllocateFor" + testGrouping.name.capitalize()) {
|
||||
Task deAllocateTask = project.rootProject.tasks.create("deAllocateFor" + testGrouping.getName().capitalize()) {
|
||||
group = GRADLE_GROUP
|
||||
doFirst {
|
||||
String dockerTag = System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_RUNNING_PROPERTY)
|
||||
if (dockerTag == null) {
|
||||
throw new GradleException("pre allocation cannot be used without a stable docker tag - please provide one using -D" + ImageBuilding.PROVIDE_TAG_FOR_RUNNING_PROPERTY)
|
||||
}
|
||||
int seed = (dockerTag.hashCode() + testGrouping.name.hashCode())
|
||||
int seed = (dockerTag.hashCode() + testGrouping.getName().hashCode())
|
||||
String podPrefix = new BigInteger(64, new Random(seed)).toString(36);
|
||||
allocator.tearDownPods(podPrefix)
|
||||
}
|
||||
@ -249,12 +249,12 @@ class DistributedTesting implements Plugin<Project> {
|
||||
project.plugins.apply(ImageBuilding)
|
||||
}
|
||||
|
||||
private ListTests createTestListingTasks(Test task, Project subProject) {
|
||||
private Task createTestListingTasks(Test task, Project subProject) {
|
||||
def taskName = task.getName()
|
||||
def capitalizedTaskName = task.getName().capitalize()
|
||||
//determine all the tests which are present in this test task.
|
||||
//this list will then be shared between the various worker forks
|
||||
def createdListTask = subProject.tasks.create("listTestsFor" + capitalizedTaskName, ListTests) {
|
||||
ListTests createdListTask = subProject.tasks.create("listTestsFor" + capitalizedTaskName, ListTests) {
|
||||
group = GRADLE_GROUP
|
||||
//the convention is that a testing task is backed by a sourceSet with the same name
|
||||
dependsOn subProject.getTasks().getByName("${taskName}Classes")
|
||||
@ -281,7 +281,7 @@ class DistributedTesting implements Plugin<Project> {
|
||||
subProject.logger.info("created task: " + createdListTask.getPath() + " in project: " + subProject + " it dependsOn: " + createdListTask.dependsOn)
|
||||
subProject.logger.info("created task: " + createdPrintTask.getPath() + " in project: " + subProject + " it dependsOn: " + createdPrintTask.dependsOn)
|
||||
|
||||
return createdListTask as ListTests
|
||||
return createdListTask
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ public class KubesTest extends DefaultTask {
|
||||
client.pods().delete(createdPod);
|
||||
client.persistentVolumeClaims().delete(pvc);
|
||||
}
|
||||
return new KubePodResult(resCode, podOutput, binaryResults);
|
||||
return new KubePodResult(podIdx, resCode, podOutput, binaryResults);
|
||||
});
|
||||
} catch (Retry.RetryException e) {
|
||||
throw new RuntimeException("Failed to build in pod " + podName + " (" + podIdx + "/" + numberOfPods + ") in " + numberOfRetries + " attempts", e);
|
||||
|
@ -0,0 +1,37 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Random;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
class ListShufflerAndAllocator {
|
||||
|
||||
private final List<String> tests;
|
||||
|
||||
public ListShufflerAndAllocator(List<String> tests) {
|
||||
this.tests = new ArrayList<>(tests);
|
||||
}
|
||||
|
||||
public List<String> getTestsForFork(int fork, int forks, Integer seed) {
|
||||
final Random shuffler = new Random(seed);
|
||||
final List<String> copy = new ArrayList<>(tests);
|
||||
while (copy.size() < forks) {
|
||||
//pad the list
|
||||
copy.add(null);
|
||||
}
|
||||
Collections.shuffle(copy, shuffler);
|
||||
final int numberOfTestsPerFork = Math.max((copy.size() / forks), 1);
|
||||
final int consumedTests = numberOfTestsPerFork * forks;
|
||||
final int ourStartIdx = numberOfTestsPerFork * fork;
|
||||
final int ourEndIdx = ourStartIdx + numberOfTestsPerFork;
|
||||
final int ourSupplementaryIdx = consumedTests + fork;
|
||||
final ArrayList<String> toReturn = new ArrayList<>(copy.subList(ourStartIdx, ourEndIdx));
|
||||
if (ourSupplementaryIdx < copy.size()) {
|
||||
toReturn.add(copy.get(ourSupplementaryIdx));
|
||||
}
|
||||
return toReturn.stream().filter(Objects::nonNull).collect(Collectors.toList());
|
||||
}
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
package net.corda.testing
|
||||
|
||||
import io.github.classgraph.ClassGraph
|
||||
import io.github.classgraph.ClassInfo
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
import java.util.stream.Collectors
|
||||
|
||||
class ListShufflerAndAllocator {
|
||||
|
||||
private final List<String> tests
|
||||
|
||||
public ListShufflerAndAllocator(List<String> tests) {
|
||||
this.tests = new ArrayList<>(tests)
|
||||
}
|
||||
|
||||
List<String> getTestsForFork(int fork, int forks, Integer seed) {
|
||||
Random shuffler = new Random(seed);
|
||||
List<String> copy = new ArrayList<>(tests);
|
||||
while (copy.size() < forks) {
|
||||
//pad the list
|
||||
copy.add(null);
|
||||
}
|
||||
Collections.shuffle(copy, shuffler);
|
||||
int numberOfTestsPerFork = Math.max((copy.size() / forks).intValue(), 1);
|
||||
int consumedTests = numberOfTestsPerFork * forks;
|
||||
int ourStartIdx = numberOfTestsPerFork * fork;
|
||||
int ourEndIdx = ourStartIdx + numberOfTestsPerFork;
|
||||
int ourSupplementaryIdx = consumedTests + fork;
|
||||
ArrayList<String> toReturn = new ArrayList<>(copy.subList(ourStartIdx, ourEndIdx));
|
||||
if (ourSupplementaryIdx < copy.size()) {
|
||||
toReturn.add(copy.get(ourSupplementaryIdx));
|
||||
}
|
||||
return toReturn.stream().filter { it -> it != null }.collect(Collectors.toList());
|
||||
}
|
||||
}
|
||||
|
||||
interface TestLister {
|
||||
List<String> getAllTestsDiscovered()
|
||||
}
|
||||
|
||||
class ListTests extends DefaultTask implements TestLister {
|
||||
|
||||
public static final String DISTRIBUTION_PROPERTY = "distributeBy"
|
||||
|
||||
FileCollection scanClassPath
|
||||
List<String> allTests
|
||||
DistributeTestsBy distribution = System.getProperty(DISTRIBUTION_PROPERTY) ? DistributeTestsBy.valueOf(System.getProperty(DISTRIBUTION_PROPERTY)) : DistributeTestsBy.METHOD
|
||||
|
||||
def getTestsForFork(int fork, int forks, Integer seed) {
|
||||
def gitSha = new BigInteger(project.hasProperty("corda_revision") ? project.property("corda_revision").toString() : "0", 36)
|
||||
if (fork >= forks) {
|
||||
throw new IllegalArgumentException("requested shard ${fork + 1} for total shards ${forks}")
|
||||
}
|
||||
def seedToUse = seed ? (seed + ((String) this.getPath()).hashCode() + gitSha.intValue()) : 0
|
||||
return new ListShufflerAndAllocator(allTests).getTestsForFork(fork, forks, seedToUse)
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getAllTestsDiscovered() {
|
||||
return new ArrayList<>(allTests)
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
def discoverTests() {
|
||||
switch (distribution) {
|
||||
case DistributeTestsBy.METHOD:
|
||||
Collection<String> results = new ClassGraph()
|
||||
.enableClassInfo()
|
||||
.enableMethodInfo()
|
||||
.ignoreClassVisibility()
|
||||
.ignoreMethodVisibility()
|
||||
.enableAnnotationInfo()
|
||||
.overrideClasspath(scanClassPath)
|
||||
.scan()
|
||||
.getClassesWithMethodAnnotation("org.junit.Test")
|
||||
.collect { c -> (c.getSubclasses() + Collections.singletonList(c)) }
|
||||
.flatten()
|
||||
.collect { ClassInfo c ->
|
||||
c.getMethodInfo().filter { m -> m.hasAnnotation("org.junit.Test") }.collect { m -> c.name + "." + m.name }
|
||||
}.flatten()
|
||||
.toSet()
|
||||
|
||||
this.allTests = results.stream().sorted().collect(Collectors.toList())
|
||||
break
|
||||
case DistributeTestsBy.CLASS:
|
||||
Collection<String> results = new ClassGraph()
|
||||
.enableClassInfo()
|
||||
.enableMethodInfo()
|
||||
.ignoreClassVisibility()
|
||||
.ignoreMethodVisibility()
|
||||
.enableAnnotationInfo()
|
||||
.overrideClasspath(scanClassPath)
|
||||
.scan()
|
||||
.getClassesWithMethodAnnotation("org.junit.Test")
|
||||
.collect { c -> (c.getSubclasses() + Collections.singletonList(c)) }
|
||||
.flatten()
|
||||
.collect { ClassInfo c -> c.name }.flatten()
|
||||
.toSet()
|
||||
this.allTests = results.stream().sorted().collect(Collectors.toList())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public enum DistributeTestsBy {
|
||||
CLASS, METHOD
|
||||
}
|
99
buildSrc/src/main/groovy/net/corda/testing/ListTests.java
Normal file
99
buildSrc/src/main/groovy/net/corda/testing/ListTests.java
Normal file
@ -0,0 +1,99 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import io.github.classgraph.ClassGraph;
|
||||
import io.github.classgraph.ClassInfo;
|
||||
import io.github.classgraph.ClassInfoList;
|
||||
import org.gradle.api.DefaultTask;
|
||||
import org.gradle.api.file.FileCollection;
|
||||
import org.gradle.api.tasks.TaskAction;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
interface TestLister {
|
||||
List<String> getAllTestsDiscovered();
|
||||
}
|
||||
|
||||
public class ListTests extends DefaultTask implements TestLister {
|
||||
|
||||
public static final String DISTRIBUTION_PROPERTY = "distributeBy";
|
||||
|
||||
public FileCollection scanClassPath;
|
||||
private List<String> allTests;
|
||||
private DistributeTestsBy distribution = System.getProperty(DISTRIBUTION_PROPERTY) != null && !System.getProperty(DISTRIBUTION_PROPERTY).isEmpty() ?
|
||||
DistributeTestsBy.valueOf(System.getProperty(DISTRIBUTION_PROPERTY)) : DistributeTestsBy.METHOD;
|
||||
|
||||
public List<String> getTestsForFork(int fork, int forks, Integer seed) {
|
||||
BigInteger gitSha = new BigInteger(getProject().hasProperty("corda_revision") ?
|
||||
getProject().property("corda_revision").toString() : "0", 36);
|
||||
if (fork >= forks) {
|
||||
throw new IllegalArgumentException("requested shard ${fork + 1} for total shards ${forks}");
|
||||
}
|
||||
int seedToUse = seed != null ? (seed + (this.getPath()).hashCode() + gitSha.intValue()) : 0;
|
||||
return new ListShufflerAndAllocator(allTests).getTestsForFork(fork, forks, seedToUse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getAllTestsDiscovered() {
|
||||
return new ArrayList<>(allTests);
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
void discoverTests() {
|
||||
Collection<String> results;
|
||||
switch (distribution) {
|
||||
case METHOD:
|
||||
results = new ClassGraph()
|
||||
.enableClassInfo()
|
||||
.enableMethodInfo()
|
||||
.ignoreClassVisibility()
|
||||
.ignoreMethodVisibility()
|
||||
.enableAnnotationInfo()
|
||||
.overrideClasspath(scanClassPath)
|
||||
.scan()
|
||||
.getClassesWithMethodAnnotation("org.junit.Test")
|
||||
.stream()
|
||||
.map(classInfo -> {
|
||||
ClassInfoList returnList = new ClassInfoList();
|
||||
returnList.add(classInfo);
|
||||
returnList.addAll(classInfo.getSubclasses());
|
||||
return returnList;
|
||||
})
|
||||
.flatMap(ClassInfoList::stream)
|
||||
.map(classInfo -> classInfo.getMethodInfo().filter(methodInfo -> methodInfo.hasAnnotation("org.junit.Test"))
|
||||
.stream().map(methodInfo -> classInfo.getName() + "." + methodInfo.getName()))
|
||||
.flatMap(Function.identity())
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
this.allTests = results.stream().sorted().collect(Collectors.toList());
|
||||
break;
|
||||
case CLASS:
|
||||
results = new ClassGraph()
|
||||
.enableClassInfo()
|
||||
.enableMethodInfo()
|
||||
.ignoreClassVisibility()
|
||||
.ignoreMethodVisibility()
|
||||
.enableAnnotationInfo()
|
||||
.overrideClasspath(scanClassPath)
|
||||
.scan()
|
||||
.getClassesWithMethodAnnotation("org.junit.Test")
|
||||
.stream()
|
||||
.map(classInfo -> {
|
||||
ClassInfoList returnList = new ClassInfoList();
|
||||
returnList.add(classInfo);
|
||||
returnList.addAll(classInfo.getSubclasses());
|
||||
return returnList;
|
||||
})
|
||||
.flatMap(ClassInfoList::stream)
|
||||
.map(ClassInfo::getName)
|
||||
.collect(Collectors.toSet());
|
||||
this.allTests = results.stream().sorted().collect(Collectors.toList());
|
||||
break;
|
||||
}
|
||||
getProject().getLogger().lifecycle("THESE ARE ALL THE TESTSSS!!!!!!!!: " + allTests.toString());
|
||||
}
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
package net.corda.testing
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
|
||||
class ParallelTestGroup extends DefaultTask {
|
||||
|
||||
DistributeTestsBy distribution = DistributeTestsBy.METHOD
|
||||
|
||||
List<String> groups = new ArrayList<>()
|
||||
int shardCount = 20
|
||||
int coresToUse = 4
|
||||
int gbOfMemory = 4
|
||||
boolean printToStdOut = true
|
||||
PodLogLevel logLevel = PodLogLevel.INFO
|
||||
|
||||
void numberOfShards(int shards) {
|
||||
this.shardCount = shards
|
||||
}
|
||||
|
||||
void podLogLevel(PodLogLevel level) {
|
||||
this.logLevel = level
|
||||
}
|
||||
|
||||
void distribute(DistributeTestsBy dist) {
|
||||
this.distribution = dist
|
||||
}
|
||||
|
||||
void coresPerFork(int cores) {
|
||||
this.coresToUse = cores
|
||||
}
|
||||
|
||||
void memoryInGbPerFork(int gb) {
|
||||
this.gbOfMemory = gb
|
||||
}
|
||||
|
||||
//when this is false, only containers will "failed" exit codes will be printed to stdout
|
||||
void streamOutput(boolean print) {
|
||||
this.printToStdOut = print
|
||||
}
|
||||
|
||||
void testGroups(String... group) {
|
||||
testGroups(group.toList())
|
||||
}
|
||||
|
||||
void testGroups(List<String> group) {
|
||||
group.forEach {
|
||||
groups.add(it)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,80 @@
|
||||
package net.corda.testing;
|
||||
|
||||
import org.gradle.api.DefaultTask;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class ParallelTestGroup extends DefaultTask {
|
||||
|
||||
private DistributeTestsBy distribution = DistributeTestsBy.METHOD;
|
||||
private List<String> groups = new ArrayList<>();
|
||||
private int shardCount = 20;
|
||||
private int coresToUse = 4;
|
||||
private int gbOfMemory = 4;
|
||||
private boolean printToStdOut = true;
|
||||
private PodLogLevel logLevel = PodLogLevel.INFO;
|
||||
|
||||
public DistributeTestsBy getDistribution() {
|
||||
return distribution;
|
||||
}
|
||||
|
||||
public List<String> getGroups() {
|
||||
return groups;
|
||||
}
|
||||
|
||||
public int getShardCount() {
|
||||
return shardCount;
|
||||
}
|
||||
|
||||
public int getCoresToUse() {
|
||||
return coresToUse;
|
||||
}
|
||||
|
||||
public int getGbOfMemory() {
|
||||
return gbOfMemory;
|
||||
}
|
||||
|
||||
public boolean getPrintToStdOut() {
|
||||
return printToStdOut;
|
||||
}
|
||||
|
||||
public PodLogLevel getLogLevel() {
|
||||
return logLevel;
|
||||
}
|
||||
|
||||
public void numberOfShards(int shards) {
|
||||
this.shardCount = shards;
|
||||
}
|
||||
|
||||
public void podLogLevel(PodLogLevel level) {
|
||||
this.logLevel = level;
|
||||
}
|
||||
|
||||
public void distribute(DistributeTestsBy dist) {
|
||||
this.distribution = dist;
|
||||
}
|
||||
|
||||
public void coresPerFork(int cores) {
|
||||
this.coresToUse = cores;
|
||||
}
|
||||
|
||||
public void memoryInGbPerFork(int gb) {
|
||||
this.gbOfMemory = gb;
|
||||
}
|
||||
|
||||
//when this is false, only containers will "failed" exit codes will be printed to stdout
|
||||
public void streamOutput(boolean print) {
|
||||
this.printToStdOut = print;
|
||||
}
|
||||
|
||||
public void testGroups(String... group) {
|
||||
testGroups(Arrays.asList(group));
|
||||
}
|
||||
|
||||
private void testGroups(List<String> group) {
|
||||
groups.addAll(group);
|
||||
}
|
||||
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
package net.corda.testing
|
||||
|
||||
import org.gradle.api.Action
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
import java.util.concurrent.CompletableFuture
|
||||
|
||||
class RunInParallel extends DefaultTask {
|
||||
|
||||
private List<Task> tasksToRunInParallel = new ArrayList<>()
|
||||
|
||||
public RunInParallel runInParallel(Task... tasks) {
|
||||
for (Task task : tasks) {
|
||||
tasksToRunInParallel.add(task)
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
def void run() {
|
||||
tasksToRunInParallel.collect { t ->
|
||||
CompletableFuture.runAsync {
|
||||
def actions = t.getActions()
|
||||
for (Action action : actions) {
|
||||
action.execute(t)
|
||||
}
|
||||
}
|
||||
}.join()
|
||||
}
|
||||
}
|
@ -5,11 +5,13 @@ import java.util.Collection;
|
||||
|
||||
public class KubePodResult {
|
||||
|
||||
private final int podIndex;
|
||||
private final int resultCode;
|
||||
private final File output;
|
||||
private final Collection<File> binaryResults;
|
||||
|
||||
public KubePodResult(int resultCode, File output, Collection<File> binaryResults) {
|
||||
public KubePodResult(int podIndex, int resultCode, File output, Collection<File> binaryResults) {
|
||||
this.podIndex = podIndex;
|
||||
this.resultCode = resultCode;
|
||||
this.output = output;
|
||||
this.binaryResults = binaryResults;
|
||||
@ -26,4 +28,8 @@ public class KubePodResult {
|
||||
public Collection<File> getBinaryResults() {
|
||||
return binaryResults;
|
||||
}
|
||||
|
||||
public int getPodIndex() {
|
||||
return podIndex;
|
||||
}
|
||||
}
|
||||
|
@ -154,9 +154,9 @@ public class KubesReporting extends DefaultTask {
|
||||
if (shouldPrintOutput) {
|
||||
containersWithNonZeroReturnCodes.forEach(podResult -> {
|
||||
try {
|
||||
System.out.println("\n##### CONTAINER OUTPUT START #####");
|
||||
System.out.println("\n##### CONTAINER " + podResult.getPodIndex() + " OUTPUT START #####");
|
||||
IOUtils.copy(new FileInputStream(podResult.getOutput()), System.out);
|
||||
System.out.println("##### CONTAINER OUTPUT END #####\n");
|
||||
System.out.println("##### CONTAINER " + podResult.getPodIndex() + " OUTPUT END #####\n");
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
});
|
||||
|
@ -1,25 +1,30 @@
|
||||
package net.corda.testing
|
||||
package net.corda.testing;
|
||||
|
||||
import org.hamcrest.CoreMatchers
|
||||
import org.junit.Assert
|
||||
import org.junit.Test
|
||||
import org.hamcrest.CoreMatchers;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.stream.Collectors
|
||||
import java.util.stream.IntStream
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.hamcrest.core.Is.is
|
||||
import static org.hamcrest.core.IsEqual.equalTo
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.hamcrest.core.IsEqual.equalTo;
|
||||
|
||||
class ListTestsTest {
|
||||
public class ListTestsTest {
|
||||
|
||||
@Test
|
||||
void shouldAllocateTests() {
|
||||
public void shouldAllocateTests() {
|
||||
|
||||
for (int numberOfTests = 0; numberOfTests < 100; numberOfTests++) {
|
||||
for (int numberOfForks = 1; numberOfForks < 100; numberOfForks++) {
|
||||
|
||||
|
||||
List<String> tests = IntStream.range(0, numberOfTests).collect { z -> "Test.method" + z }
|
||||
List<String> tests = IntStream.range(0, numberOfTests).boxed()
|
||||
.map(integer -> "Test.method" + integer.toString())
|
||||
.collect(Collectors.toList());
|
||||
ListShufflerAndAllocator testLister = new ListShufflerAndAllocator(tests);
|
||||
|
||||
List<String> listOfLists = new ArrayList<>();
|
@ -96,6 +96,20 @@ class TransactionVerificationExceptionSerialisationTests {
|
||||
assertEquals(exception.txId, exception2.txId)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun invalidConstraintRejectionError() {
|
||||
val exception = TransactionVerificationException.InvalidConstraintRejection(txid, "Some contract class", "for being too funny")
|
||||
val exceptionAfterSerialisation = DeserializationInput(factory).deserialize(
|
||||
SerializationOutput(factory).serialize(exception, context),
|
||||
context
|
||||
)
|
||||
|
||||
assertEquals(exception.message, exceptionAfterSerialisation.message)
|
||||
assertEquals(exception.cause?.message, exceptionAfterSerialisation.cause?.message)
|
||||
assertEquals(exception.contractClass, exceptionAfterSerialisation.contractClass)
|
||||
assertEquals(exception.reason, exceptionAfterSerialisation.reason)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun contractCreationErrorTest() {
|
||||
val cause = Throwable("wibble")
|
||||
|
@ -92,6 +92,16 @@ abstract class TransactionVerificationException(val txId: SecureHash, message: S
|
||||
class ContractConstraintRejection(txId: SecureHash, val contractClass: String)
|
||||
: TransactionVerificationException(txId, "Contract constraints failed for $contractClass", null)
|
||||
|
||||
/**
|
||||
* A constraint attached to a state was invalid, e.g. due to size limitations.
|
||||
*
|
||||
* @property contractClass The fully qualified class name of the failing contract.
|
||||
* @property reason a message containing the reason the constraint is invalid included in thrown the exception.
|
||||
*/
|
||||
@KeepForDJVM
|
||||
class InvalidConstraintRejection(txId: SecureHash, val contractClass: String, val reason: String)
|
||||
: TransactionVerificationException(txId, "Contract constraints failed for $contractClass. $reason", null)
|
||||
|
||||
/**
|
||||
* A state requested a contract class via its [TransactionState.contract] field that didn't appear in any attached
|
||||
* JAR at all. This usually implies the attachments were forgotten or a version mismatch.
|
||||
|
@ -21,6 +21,10 @@ class NotaryException(
|
||||
/** Specifies the cause for notarisation request failure. */
|
||||
@CordaSerializable
|
||||
sealed class NotaryError {
|
||||
companion object {
|
||||
const val NUM_STATES = 5
|
||||
}
|
||||
|
||||
/** Occurs when one or more input states have already been consumed by another transaction. */
|
||||
data class Conflict(
|
||||
/** Id of the transaction that was attempted to be notarised. */
|
||||
@ -28,8 +32,9 @@ sealed class NotaryError {
|
||||
/** Specifies which states have already been consumed in another transaction. */
|
||||
val consumedStates: Map<StateRef, StateConsumptionDetails>
|
||||
) : NotaryError() {
|
||||
override fun toString() = "One or more input states or referenced states have already been used as input states in other transactions. Conflicting state count: ${consumedStates.size}, consumption details:\n" +
|
||||
"${consumedStates.asSequence().joinToString(",\n", limit = 5) { it.key.toString() + " -> " + it.value }}.\n" +
|
||||
override fun toString() = "One or more input states or referenced states have already been used as input states in other transactions. " +
|
||||
"Conflicting state count: ${consumedStates.size}, consumption details:\n" +
|
||||
"${consumedStates.asSequence().joinToString(",\n", limit = NUM_STATES) { it.key.toString() + " -> " + it.value }}.\n" +
|
||||
"To find out if any of the conflicting transactions have been generated by this node you can use the hashLookup Corda shell command."
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,13 @@ import net.corda.core.utilities.loggerFor
|
||||
*/
|
||||
typealias Version = Int
|
||||
|
||||
/**
|
||||
* The maximum number of keys in a signature constraint that the platform supports.
|
||||
*
|
||||
* Attention: this value affects consensus, so it requires a minimum platform version bump in order to be changed.
|
||||
*/
|
||||
const val MAX_NUMBER_OF_KEYS_IN_SIGNATURE_CONSTRAINT = 20
|
||||
|
||||
private val log = loggerFor<AttachmentConstraint>()
|
||||
|
||||
val Attachment.contractVersion: Version get() = if (this is ContractAttachment) version else CordappImpl.DEFAULT_CORDAPP_VERSION
|
||||
|
@ -5,6 +5,7 @@ import net.corda.core.KeepForDJVM
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.contracts.*
|
||||
import net.corda.core.contracts.TransactionVerificationException.TransactionContractConflictException
|
||||
import net.corda.core.crypto.CompositeKey
|
||||
import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.internal.rules.StateContractValidationEnforcementRule
|
||||
import net.corda.core.transactions.LedgerTransaction
|
||||
@ -329,8 +330,23 @@ abstract class Verifier(val ltx: LedgerTransaction, protected val transactionCla
|
||||
private fun verifyConstraints(contractAttachmentsByContract: Map<ContractClassName, ContractAttachment>) {
|
||||
// For each contract/constraint pair check that the relevant attachment is valid.
|
||||
allStates.map { it.contract to it.constraint }.toSet().forEach { (contract, constraint) ->
|
||||
if (constraint is SignatureAttachmentConstraint)
|
||||
if (constraint is SignatureAttachmentConstraint) {
|
||||
/**
|
||||
* Support for signature constraints has been added on min. platform version >= 4.
|
||||
* On minimum platform version >= 5, an explicit check has been introduced on the supported number of leaf keys
|
||||
* in composite keys of signature constraints in order to harden consensus.
|
||||
*/
|
||||
checkMinimumPlatformVersion(ltx.networkParameters?.minimumPlatformVersion ?: 1, 4, "Signature constraints")
|
||||
val constraintKey = constraint.key
|
||||
if (ltx.networkParameters?.minimumPlatformVersion ?: 1 >= 5) {
|
||||
if (constraintKey is CompositeKey && constraintKey.leafKeys.size > MAX_NUMBER_OF_KEYS_IN_SIGNATURE_CONSTRAINT) {
|
||||
throw TransactionVerificationException.InvalidConstraintRejection(ltx.id, contract,
|
||||
"Signature constraint contains composite key with ${constraintKey.leafKeys.size} leaf keys, " +
|
||||
"which is more than the maximum allowed number of keys " +
|
||||
"($MAX_NUMBER_OF_KEYS_IN_SIGNATURE_CONSTRAINT).")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We already checked that there is one and only one attachment.
|
||||
val contractAttachment = contractAttachmentsByContract[contract]!!
|
||||
|
@ -10,6 +10,7 @@ import net.corda.core.crypto.SecureHash
|
||||
import net.corda.core.flows.FlowException
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.internal.MAX_NUMBER_OF_KEYS_IN_SIGNATURE_CONSTRAINT
|
||||
import net.corda.core.internal.concurrent.doneFuture
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.node.services.Vault.RelevancyStatus.*
|
||||
@ -256,9 +257,15 @@ class Vault<out T : ContractState>(val states: Iterable<StateAndRef<T>>) {
|
||||
|
||||
/**
|
||||
* The maximum permissible size of contract constraint type data (for storage in vault states database table).
|
||||
* Maximum value equates to a CompositeKey with 10 EDDSA_ED25519_SHA512 keys stored in.
|
||||
*
|
||||
* This value establishes an upper limit of a CompositeKey with up to [MAX_NUMBER_OF_KEYS_IN_SIGNATURE_CONSTRAINT] keys stored in.
|
||||
* However, note this assumes a rather conservative upper bound per key.
|
||||
* For reference, measurements have shown the following numbers for each algorithm:
|
||||
* - 2048-bit RSA keys: 1 key -> 294 bytes, 2 keys -> 655 bytes, 3 keys -> 961 bytes
|
||||
* - 256-bit ECDSA (k1) keys: 1 key -> 88 bytes, 2 keys -> 231 bytes, 3 keys -> 331 bytes
|
||||
* - 256-bit EDDSA keys: 1 key -> 44 bytes, 2 keys -> 140 bytes, 3 keys -> 195 bytes
|
||||
*/
|
||||
const val MAX_CONSTRAINT_DATA_SIZE = 563
|
||||
const val MAX_CONSTRAINT_DATA_SIZE = 1_000 * MAX_NUMBER_OF_KEYS_IN_SIGNATURE_CONSTRAINT
|
||||
|
||||
/**
|
||||
* A [VaultService] is responsible for securely and safely persisting the current state of a vault to storage. The
|
||||
|
@ -92,6 +92,7 @@ sealed class QueryCriteria : GenericQueryCriteria<QueryCriteria, IQueryCriteriaP
|
||||
/**
|
||||
* VaultQueryCriteria: provides query by attributes defined in [VaultSchema.VaultStates]
|
||||
*/
|
||||
@Suppress("MagicNumber") // need to list deprecation versions explicitly
|
||||
data class VaultQueryCriteria(
|
||||
override val status: Vault.StateStatus = Vault.StateStatus.UNCONSUMED,
|
||||
override val contractStateTypes: Set<Class<out ContractState>>? = null,
|
||||
@ -264,6 +265,7 @@ sealed class QueryCriteria : GenericQueryCriteria<QueryCriteria, IQueryCriteriaP
|
||||
/**
|
||||
* LinearStateQueryCriteria: provides query by attributes defined in [VaultSchema.VaultLinearState]
|
||||
*/
|
||||
@Suppress("MagicNumber") // need to list deprecation versions explicitly
|
||||
data class LinearStateQueryCriteria(
|
||||
override val participants: List<AbstractParty>? = null,
|
||||
val uuid: List<UUID>? = null,
|
||||
@ -545,6 +547,7 @@ sealed class AttachmentQueryCriteria : GenericQueryCriteria<AttachmentQueryCrite
|
||||
/**
|
||||
* AttachmentsQueryCriteria:
|
||||
*/
|
||||
@Suppress("MagicNumber") // need to list deprecation versions explicitly
|
||||
data class AttachmentsQueryCriteria(val uploaderCondition: ColumnPredicate<String>? = null,
|
||||
val filenameCondition: ColumnPredicate<String>? = null,
|
||||
val uploadDateCondition: ColumnPredicate<Instant>? = null,
|
||||
|
@ -90,7 +90,9 @@ class PersistentState(@EmbeddedId override var stateRef: PersistentStateRef? = n
|
||||
@KeepForDJVM
|
||||
@Embeddable
|
||||
@Immutable
|
||||
|
||||
data class PersistentStateRef(
|
||||
@Suppress("MagicNumber") // column width
|
||||
@Column(name = "transaction_id", length = 64, nullable = false)
|
||||
var txId: String,
|
||||
|
||||
|
@ -143,8 +143,6 @@
|
||||
<ID>ComplexMethod:CustomSerializerRegistry.kt$CachingCustomSerializerRegistry$private fun doFindCustomSerializer(clazz: Class<*>, declaredType: Type): AMQPSerializer<Any>?</ID>
|
||||
<ID>ComplexMethod:DeserializationInput.kt$DeserializationInput$fun readObject(obj: Any, schemas: SerializationSchemas, type: Type, context: SerializationContext): Any</ID>
|
||||
<ID>ComplexMethod:DriverDSLImpl.kt$DriverDSLImpl$override fun start()</ID>
|
||||
<ID>ComplexMethod:DriverDSLImpl.kt$DriverDSLImpl$private fun startNodeInternal(config: NodeConfig, webAddress: NetworkHostAndPort, localNetworkMap: LocalNetworkMap?, parameters: NodeParameters): CordaFuture<NodeHandle></ID>
|
||||
<ID>ComplexMethod:DriverDSLImpl.kt$DriverDSLImpl$private fun startRegisteredNode(name: CordaX500Name, localNetworkMap: LocalNetworkMap?, parameters: NodeParameters, p2pAddress: NetworkHostAndPort = portAllocation.nextHostAndPort()): CordaFuture<NodeHandle></ID>
|
||||
<ID>ComplexMethod:Expect.kt$ fun <S, E : Any> S.genericExpectEvents( isStrict: Boolean = true, stream: S.((E) -> Unit) -> Unit, expectCompose: () -> ExpectCompose<E> )</ID>
|
||||
<ID>ComplexMethod:FinalityFlow.kt$FinalityFlow$@Suspendable @Throws(NotaryException::class) override fun call(): SignedTransaction</ID>
|
||||
<ID>ComplexMethod:FlowMonitor.kt$FlowMonitor$private fun warningMessageForFlowWaitingOnIo(request: FlowIORequest<*>, flow: FlowStateMachineImpl<*>, now: Instant): String</ID>
|
||||
@ -688,7 +686,7 @@
|
||||
<ID>LongParameterList:DriverDSL.kt$DriverDSL$( defaultParameters: NodeParameters = NodeParameters(), providedName: CordaX500Name? = defaultParameters.providedName, rpcUsers: List<User> = defaultParameters.rpcUsers, verifierType: VerifierType = defaultParameters.verifierType, customOverrides: Map<String, Any?> = defaultParameters.customOverrides, startInSameProcess: Boolean? = defaultParameters.startInSameProcess, maximumHeapSize: String = defaultParameters.maximumHeapSize )</ID>
|
||||
<ID>LongParameterList:DriverDSL.kt$DriverDSL$( defaultParameters: NodeParameters = NodeParameters(), providedName: CordaX500Name? = defaultParameters.providedName, rpcUsers: List<User> = defaultParameters.rpcUsers, verifierType: VerifierType = defaultParameters.verifierType, customOverrides: Map<String, Any?> = defaultParameters.customOverrides, startInSameProcess: Boolean? = defaultParameters.startInSameProcess, maximumHeapSize: String = defaultParameters.maximumHeapSize, logLevelOverride: String? = defaultParameters.logLevelOverride )</ID>
|
||||
<ID>LongParameterList:DriverDSLImpl.kt$( isDebug: Boolean = DriverParameters().isDebug, driverDirectory: Path = DriverParameters().driverDirectory, portAllocation: PortAllocation = DriverParameters().portAllocation, debugPortAllocation: PortAllocation = DriverParameters().debugPortAllocation, systemProperties: Map<String, String> = DriverParameters().systemProperties, useTestClock: Boolean = DriverParameters().useTestClock, startNodesInProcess: Boolean = DriverParameters().startNodesInProcess, extraCordappPackagesToScan: List<String> = @Suppress("DEPRECATION") DriverParameters().extraCordappPackagesToScan, waitForAllNodesToFinish: Boolean = DriverParameters().waitForAllNodesToFinish, notarySpecs: List<NotarySpec> = DriverParameters().notarySpecs, jmxPolicy: JmxPolicy = DriverParameters().jmxPolicy, networkParameters: NetworkParameters = DriverParameters().networkParameters, compatibilityZone: CompatibilityZoneParams? = null, notaryCustomOverrides: Map<String, Any?> = DriverParameters().notaryCustomOverrides, inMemoryDB: Boolean = DriverParameters().inMemoryDB, cordappsForAllNodes: Collection<TestCordappInternal>? = null, dsl: DriverDSLImpl.() -> A )</ID>
|
||||
<ID>LongParameterList:DriverDSLImpl.kt$DriverDSLImpl.Companion$( config: NodeConfig, quasarJarPath: String, debugPort: Int?, overriddenSystemProperties: Map<String, String>, maximumHeapSize: String, logLevelOverride: String?, vararg extraCmdLineFlag: String )</ID>
|
||||
<ID>LongParameterList:DriverDSLImpl.kt$DriverDSLImpl.Companion$( config: NodeConfig, quasarJarPath: String, debugPort: Int?, bytemanJarPath: String?, bytemanPort: Int?, overriddenSystemProperties: Map<String, String>, maximumHeapSize: String, logLevelOverride: String?, vararg extraCmdLineFlag: String )</ID>
|
||||
<ID>LongParameterList:DummyFungibleContract.kt$DummyFungibleContract$(inputs: List<State>, outputs: List<State>, tx: LedgerTransaction, issueCommand: CommandWithParties<Commands.Issue>, currency: Currency, issuer: PartyAndReference)</ID>
|
||||
<ID>LongParameterList:IRS.kt$FloatingRatePaymentEvent$(date: LocalDate = this.date, accrualStartDate: LocalDate = this.accrualStartDate, accrualEndDate: LocalDate = this.accrualEndDate, dayCountBasisDay: DayCountBasisDay = this.dayCountBasisDay, dayCountBasisYear: DayCountBasisYear = this.dayCountBasisYear, fixingDate: LocalDate = this.fixingDate, notional: Amount<Currency> = this.notional, rate: Rate = this.rate)</ID>
|
||||
<ID>LongParameterList:IRS.kt$InterestRateSwap$(floatingLeg: FloatingLeg, fixedLeg: FixedLeg, calculation: Calculation, common: Common, oracle: Party, notary: Party)</ID>
|
||||
@ -1209,6 +1207,7 @@
|
||||
<ID>MagicNumber:TransactionUtils.kt$4</ID>
|
||||
<ID>MagicNumber:TransactionVerificationException.kt$TransactionVerificationException.ConstraintPropagationRejection$3</ID>
|
||||
<ID>MagicNumber:TransactionVerifierServiceInternal.kt$Verifier$4</ID>
|
||||
<ID>MagicNumber:TransactionVerifierServiceInternal.kt$Verifier$5</ID>
|
||||
<ID>MagicNumber:TransactionViewer.kt$TransactionViewer$15.0</ID>
|
||||
<ID>MagicNumber:TransactionViewer.kt$TransactionViewer$20.0</ID>
|
||||
<ID>MagicNumber:TransactionViewer.kt$TransactionViewer$200.0</ID>
|
||||
@ -2062,9 +2061,9 @@
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl$private</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl$val flowOverrideConfig = FlowOverrideConfig(parameters.flowOverrides.map { FlowOverride(it.key.canonicalName, it.value.canonicalName) })</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl$val jdbcUrl = "jdbc:h2:mem:persistence${inMemoryCounter.getAndIncrement()};DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100"</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl$val process = startOutOfProcessNode(config, quasarJarPath, debugPort, systemProperties, parameters.maximumHeapSize, parameters.logLevelOverride)</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl.Companion$if (bytemanAgent != null && debugPort != null) listOf("-Dorg.jboss.byteman.verbose=true", "-Dorg.jboss.byteman.debug=true") else emptyList()</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl.Companion$private operator fun Config.plus(property: Pair<String, Any>)</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl.Companion${ log.info("Starting out-of-process Node ${config.corda.myLegalName.organisation}, debug port is " + (debugPort ?: "not enabled")) // Write node.conf writeConfig(config.corda.baseDirectory, "node.conf", config.typesafe.toNodeOnly()) val systemProperties = mutableMapOf( "name" to config.corda.myLegalName, "visualvm.display.name" to "corda-${config.corda.myLegalName}" ) debugPort?.let { systemProperties += "log4j2.level" to "debug" systemProperties += "log4j2.debug" to "true" } systemProperties += inheritFromParentProcess() systemProperties += overriddenSystemProperties // See experimental/quasar-hook/README.md for how to generate. val excludePattern = "x(antlr**;bftsmart**;ch**;co.paralleluniverse**;com.codahale**;com.esotericsoftware**;" + "com.fasterxml**;com.google**;com.ibm**;com.intellij**;com.jcabi**;com.nhaarman**;com.opengamma**;" + "com.typesafe**;com.zaxxer**;de.javakaffee**;groovy**;groovyjarjarantlr**;groovyjarjarasm**;io.atomix**;" + "io.github**;io.netty**;jdk**;joptsimple**;junit**;kotlin**;net.bytebuddy**;net.i2p**;org.apache**;" + "org.assertj**;org.bouncycastle**;org.codehaus**;org.crsh**;org.dom4j**;org.fusesource**;org.h2**;" + "org.hamcrest**;org.hibernate**;org.jboss**;org.jcp**;org.joda**;org.junit**;org.mockito**;org.objectweb**;" + "org.objenesis**;org.slf4j**;org.w3c**;org.xml**;org.yaml**;reflectasm**;rx**;org.jolokia**;" + "com.lmax**;picocli**;liquibase**;com.github.benmanes**;org.json**;org.postgresql**;nonapi.io.github.classgraph**;)" val extraJvmArguments = systemProperties.removeResolvedClasspath().map { "-D${it.key}=${it.value}" } + "-javaagent:$quasarJarPath=$excludePattern" val loggingLevel = when { logLevelOverride != null -> logLevelOverride debugPort == null -> "INFO" else -> "DEBUG" } val arguments = mutableListOf( "--base-directory=${config.corda.baseDirectory}", "--logging-level=$loggingLevel", "--no-local-shell").also { it += extraCmdLineFlag }.toList() // The following dependencies are excluded from the classpath of the created JVM, so that the environment resembles a real one as close as possible. // These are either classes that will be added as attachments to the node (i.e. samples, finance, opengamma etc.) or irrelevant testing libraries (test, corda-mock etc.). // TODO: There is pending work to fix this issue without custom blacklisting. See: https://r3-cev.atlassian.net/browse/CORDA-2164. val exclude = listOf("samples", "finance", "integrationTest", "test", "corda-mock", "com.opengamma.strata") val cp = ProcessUtilities.defaultClassPath.filterNot { cpEntry -> exclude.any { token -> cpEntry.contains("${File.separatorChar}$token") } || cpEntry.endsWith("-tests.jar") } return ProcessUtilities.startJavaProcess( className = "net.corda.node.Corda", // cannot directly get class for this, so just use string arguments = arguments, jdwpPort = debugPort, extraJvmArguments = extraJvmArguments, workingDirectory = config.corda.baseDirectory, maximumHeapSize = maximumHeapSize, classPath = cp ) }</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$DriverDSLImpl.Companion${ log.info("Starting out-of-process Node ${config.corda.myLegalName.organisation}, " + "debug port is " + (debugPort ?: "not enabled") + ", " + "byteMan: " + if (bytemanJarPath == null) "not in classpath" else "port is " + (bytemanPort ?: "not enabled")) // Write node.conf writeConfig(config.corda.baseDirectory, "node.conf", config.typesafe.toNodeOnly()) val systemProperties = mutableMapOf( "name" to config.corda.myLegalName, "visualvm.display.name" to "corda-${config.corda.myLegalName}" ) debugPort?.let { systemProperties += "log4j2.level" to "debug" systemProperties += "log4j2.debug" to "true" } systemProperties += inheritFromParentProcess() systemProperties += overriddenSystemProperties // See experimental/quasar-hook/README.md for how to generate. val excludePattern = "x(antlr**;bftsmart**;ch**;co.paralleluniverse**;com.codahale**;com.esotericsoftware**;" + "com.fasterxml**;com.google**;com.ibm**;com.intellij**;com.jcabi**;com.nhaarman**;com.opengamma**;" + "com.typesafe**;com.zaxxer**;de.javakaffee**;groovy**;groovyjarjarantlr**;groovyjarjarasm**;io.atomix**;" + "io.github**;io.netty**;jdk**;joptsimple**;junit**;kotlin**;net.bytebuddy**;net.i2p**;org.apache**;" + "org.assertj**;org.bouncycastle**;org.codehaus**;org.crsh**;org.dom4j**;org.fusesource**;org.h2**;" + "org.hamcrest**;org.hibernate**;org.jboss**;org.jcp**;org.joda**;org.junit**;org.mockito**;org.objectweb**;" + "org.objenesis**;org.slf4j**;org.w3c**;org.xml**;org.yaml**;reflectasm**;rx**;org.jolokia**;" + "com.lmax**;picocli**;liquibase**;com.github.benmanes**;org.json**;org.postgresql**;nonapi.io.github.classgraph**;)" val extraJvmArguments = systemProperties.removeResolvedClasspath().map { "-D${it.key}=${it.value}" } + "-javaagent:$quasarJarPath=$excludePattern" val loggingLevel = when { logLevelOverride != null -> logLevelOverride debugPort == null -> "INFO" else -> "DEBUG" } val arguments = mutableListOf( "--base-directory=${config.corda.baseDirectory}", "--logging-level=$loggingLevel", "--no-local-shell").also { it += extraCmdLineFlag }.toList() val bytemanJvmArgs = { val bytemanAgent = bytemanJarPath?.let { bytemanPort?.let { "-javaagent:$bytemanJarPath=port:$bytemanPort,listener:true" } } listOfNotNull(bytemanAgent) + if (bytemanAgent != null && debugPort != null) listOf("-Dorg.jboss.byteman.verbose=true", "-Dorg.jboss.byteman.debug=true") else emptyList() }.invoke() // The following dependencies are excluded from the classpath of the created JVM, so that the environment resembles a real one as close as possible. // These are either classes that will be added as attachments to the node (i.e. samples, finance, opengamma etc.) or irrelevant testing libraries (test, corda-mock etc.). // TODO: There is pending work to fix this issue without custom blacklisting. See: https://r3-cev.atlassian.net/browse/CORDA-2164. val exclude = listOf("samples", "finance", "integrationTest", "test", "corda-mock", "com.opengamma.strata") val cp = ProcessUtilities.defaultClassPath.filterNot { cpEntry -> exclude.any { token -> cpEntry.contains("${File.separatorChar}$token") } || cpEntry.endsWith("-tests.jar") } return ProcessUtilities.startJavaProcess( className = "net.corda.node.Corda", // cannot directly get class for this, so just use string arguments = arguments, jdwpPort = debugPort, extraJvmArguments = extraJvmArguments + bytemanJvmArgs, workingDirectory = config.corda.baseDirectory, maximumHeapSize = maximumHeapSize, classPath = cp ) }</ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$InternalDriverDSL$ fun <A> pollUntilNonNull(pollName: String, pollInterval: Duration = DEFAULT_POLL_INTERVAL, warnCount: Int = DEFAULT_WARN_COUNT, check: () -> A?): CordaFuture<A></ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$InternalDriverDSL$ fun pollUntilTrue(pollName: String, pollInterval: Duration = DEFAULT_POLL_INTERVAL, warnCount: Int = DEFAULT_WARN_COUNT, check: () -> Boolean): CordaFuture<Unit></ID>
|
||||
<ID>MaxLineLength:DriverDSLImpl.kt$fun DriverDSL.startNode(providedName: CordaX500Name, devMode: Boolean, parameters: NodeParameters = NodeParameters()): CordaFuture<NodeHandle></ID>
|
||||
@ -3530,6 +3529,7 @@
|
||||
<ID>MaxLineLength:TransactionVerifierServiceInternal.kt$Verifier$if (ltx.attachments.size != ltx.attachments.toSet().size) throw TransactionVerificationException.DuplicateAttachmentsRejection(ltx.id, ltx.attachments.groupBy { it }.filterValues { it.size > 1 }.keys.first())</ID>
|
||||
<ID>MaxLineLength:TransactionVerifierServiceInternal.kt$Verifier$if (result.keys != contractClasses) throw TransactionVerificationException.MissingAttachmentRejection(ltx.id, contractClasses.minus(result.keys).first())</ID>
|
||||
<ID>MaxLineLength:TransactionVerifierServiceInternal.kt$Verifier$val constraintAttachment = AttachmentWithContext(contractAttachment, contract, ltx.networkParameters!!.whitelistedContractImplementations)</ID>
|
||||
<ID>MaxLineLength:TransactionVerifierServiceInternal.kt$Verifier${ /** * Signature constraints are supported on min. platform version >= 4, but this only includes support for a single key per constraint. * Signature contstraints with composite keys containing more than 1 leaf key are supported on min. platform version >= 5. */ checkMinimumPlatformVersion(ltx.networkParameters?.minimumPlatformVersion ?: 1, 4, "Signature constraints") val constraintKey = constraint.key if (constraintKey is CompositeKey && constraintKey.leafKeys.size > 1) { checkMinimumPlatformVersion(ltx.networkParameters?.minimumPlatformVersion ?: 1, 5, "Composite keys for signature constraints") val leafKeysNumber = constraintKey.leafKeys.size if (leafKeysNumber > MAX_NUMBER_OF_KEYS_IN_SIGNATURE_CONSTRAINT) throw TransactionVerificationException.InvalidConstraintRejection(ltx.id, contract, "Signature constraint contains composite key with $leafKeysNumber leaf keys, " + "which is more than the maximum allowed number of keys " + "($MAX_NUMBER_OF_KEYS_IN_SIGNATURE_CONSTRAINT).") } }</ID>
|
||||
<ID>MaxLineLength:TransactionVerifierServiceInternal.kt$Verifier${ // checkNoNotaryChange and checkEncumbrancesValid are called here, and not in the c'tor, as they need access to the "outputs" // list, the contents of which need to be deserialized under the correct classloader. checkNoNotaryChange() checkEncumbrancesValid() // The following checks ensure the integrity of the current transaction and also of the future chain. // See: https://docs.corda.net/head/api-contract-constraints.html // A transaction contains both the data and the code that must be executed to validate the transition of the data. // Transactions can be created by malicious adversaries, who can try to use code that allows them to create transactions that appear valid but are not. // 1. Check that there is one and only one attachment for each relevant contract. val contractAttachmentsByContract = getUniqueContractAttachmentsByContract() // 2. Check that the attachments satisfy the constraints of the states. (The contract verification code is correct.) verifyConstraints(contractAttachmentsByContract) // 3. Check that the actual state constraints are correct. This is necessary because transactions can be built by potentially malicious nodes // who can create output states with a weaker constraint which can be exploited in a future transaction. verifyConstraintsValidity(contractAttachmentsByContract) // 4. Check that the [TransactionState] objects are correctly formed. validateStatesAgainstContract() // 5. Final step is to run the contract code. After the first 4 steps we are now sure that we are running the correct code. verifyContracts() }</ID>
|
||||
<ID>MaxLineLength:TransactionViewer.kt$TransactionViewer$private</ID>
|
||||
<ID>MaxLineLength:TransactionViewer.kt$TransactionViewer$private fun ObservableList<StateAndRef<ContractState>>.getParties()</ID>
|
||||
@ -3804,6 +3804,7 @@
|
||||
<ID>NestedBlockDepth:StartedFlowTransition.kt$StartedFlowTransition$private fun TransitionBuilder.sendToSessionsTransition(sourceSessionIdToMessage: Map<SessionId, SerializedBytes<Any>>)</ID>
|
||||
<ID>NestedBlockDepth:StatusTransitions.kt$StatusTransitions$ fun verify(tx: LedgerTransaction)</ID>
|
||||
<ID>NestedBlockDepth:ThrowableSerializer.kt$ThrowableSerializer$override fun fromProxy(proxy: ThrowableProxy): Throwable</ID>
|
||||
<ID>NestedBlockDepth:TransactionVerifierServiceInternal.kt$Verifier$ private fun verifyConstraints(contractAttachmentsByContract: Map<ContractClassName, ContractAttachment>)</ID>
|
||||
<ID>NestedBlockDepth:TransactionVerifierServiceInternal.kt$Verifier$ private fun verifyConstraintsValidity(contractAttachmentsByContract: Map<ContractClassName, ContractAttachment>)</ID>
|
||||
<ID>SpreadOperator:AMQPSerializationScheme.kt$AbstractAMQPSerializationScheme$(*it.whitelist.toTypedArray())</ID>
|
||||
<ID>SpreadOperator:AbstractNode.kt$FlowStarterImpl$(logicType, *args)</ID>
|
||||
@ -3841,7 +3842,7 @@
|
||||
<ID>SpreadOperator:DemoBench.kt$DemoBench.Companion$(DemoBench::class.java, *args)</ID>
|
||||
<ID>SpreadOperator:DevCertificatesTest.kt$DevCertificatesTest$(*oldX509Certificates)</ID>
|
||||
<ID>SpreadOperator:DockerInstantiator.kt$DockerInstantiator$(*it.toTypedArray())</ID>
|
||||
<ID>SpreadOperator:DriverDSLImpl.kt$DriverDSLImpl$( config, quasarJarPath, debugPort, systemProperties, "512m", null, *extraCmdLineFlag )</ID>
|
||||
<ID>SpreadOperator:DriverDSLImpl.kt$DriverDSLImpl$( config, quasarJarPath, debugPort, bytemanJarPath, null, systemProperties, "512m", null, *extraCmdLineFlag )</ID>
|
||||
<ID>SpreadOperator:DummyContract.kt$DummyContract.Companion$( /* INPUTS */ *priors.toTypedArray(), /* COMMAND */ Command(cmd, priorState.owner.owningKey), /* OUTPUT */ StateAndContract(state, PROGRAM_ID) )</ID>
|
||||
<ID>SpreadOperator:DummyContract.kt$DummyContract.Companion$(*items)</ID>
|
||||
<ID>SpreadOperator:DummyContractV2.kt$DummyContractV2.Companion$( /* INPUTS */ *priors.toTypedArray(), /* COMMAND */ Command(cmd, priorState.owners.map { it.owningKey }), /* OUTPUT */ StateAndContract(state, DummyContractV2.PROGRAM_ID) )</ID>
|
||||
|
@ -100,6 +100,9 @@ Expanding on the previous section, for an app to use Signature Constraints, it m
|
||||
The signers of the app can consist of a single organisation or multiple organisations. Once the app has been signed, it can be distributed
|
||||
across the nodes that intend to use it.
|
||||
|
||||
.. note:: The platform currently supports ``CompositeKey``\s with up to 20 keys maximum.
|
||||
This maximum limit is assuming keys that are either 2048-bit ``RSA`` keys or 256-bit elliptic curve (``EC``) keys.
|
||||
|
||||
Each transaction received by a node will then verify that the apps attached to it have the correct signers as specified by its
|
||||
Signature Constraints. This ensures that the version of each app is acceptable to the transaction's input states.
|
||||
|
||||
|
@ -266,6 +266,7 @@ class InitiatorFlow(val arg1: Boolean, val arg2: Int, private val counterparty:
|
||||
val ourOutputState: DummyState = DummyState()
|
||||
// DOCEND 22
|
||||
// Or as copies of other states with some properties changed.
|
||||
@Suppress("MagicNumber") // literally a magic number
|
||||
// DOCSTART 23
|
||||
val ourOtherOutputState: DummyState = ourOutputState.copy(magicNumber = 77)
|
||||
// DOCEND 23
|
||||
|
@ -51,7 +51,13 @@ Specifically, there are two main ways a flow is hospitalized:
|
||||
|
||||
* **Database constraint violation** (``ConstraintViolationException``):
|
||||
This scenario may occur due to natural contention between racing flows as Corda delegates handling using the database's optimistic concurrency control.
|
||||
As the likelihood of re-occurrence should be low, the flow will actually error and fail if it experiences this at the same point more than 3 times. No intervention required.
|
||||
If this exception occurs, the flow will retry. After retrying a number of times, the errored flow is kept in for observation.
|
||||
|
||||
* ``SQLTransientConnectionException``:
|
||||
Database connection pooling errors are dealt with. If this exception occurs, the flow will retry. After retrying a number of times, the errored flow is kept in for observation.
|
||||
|
||||
* All other instances of ``SQLException``:
|
||||
Any ``SQLException`` that is thrown and not handled by any of the scenarios detailed above, will be kept in for observation after their first failure.
|
||||
|
||||
* **Finality Flow handling** - Corda 3.x (old style) ``FinalityFlow`` and Corda 4.x ``ReceiveFinalityFlow`` handling:
|
||||
If on the receive side of the finality flow, any error will result in the flow being kept in for observation to allow the cause of the
|
||||
@ -64,7 +70,8 @@ Specifically, there are two main ways a flow is hospitalized:
|
||||
The time is hard to document as the notary members, if actually alive, will inform the requester of the ETA of a response.
|
||||
This can occur an infinite number of times. i.e. we never give up notarising. No intervention required.
|
||||
|
||||
* ``SQLTransientConnectionException``:
|
||||
Database connection pooling errors are dealt with. If this exception occurs, the flow will retry. After retrying a number of times, the errored flow is kept in for observation.
|
||||
* **Internal Corda errors**:
|
||||
Flows that experience errors from inside the Corda statemachine, that are not handled by any of the scenarios details above, will be retried a number of times
|
||||
and then kept in for observation if the error continues.
|
||||
|
||||
.. note:: Flows that are kept in for observation are retried upon node restart.
|
||||
|
@ -18,6 +18,7 @@ object CashSchema
|
||||
* First version of a cash contract ORM schema that maps all fields of the [Cash] contract state as it stood
|
||||
* at the time of writing.
|
||||
*/
|
||||
@Suppress("MagicNumber") // SQL column length
|
||||
@CordaSerializable
|
||||
object CashSchemaV1 : MappedSchema(schemaFamily = CashSchema.javaClass, version = 1, mappedTypes = listOf(PersistentCashState::class.java)) {
|
||||
|
||||
|
@ -22,6 +22,7 @@ object CommercialPaperSchema
|
||||
* as it stood at the time of writing.
|
||||
*/
|
||||
@CordaSerializable
|
||||
@Suppress("MagicNumber") // SQL column length
|
||||
object CommercialPaperSchemaV1 : MappedSchema(schemaFamily = CommercialPaperSchema.javaClass, version = 1, mappedTypes = listOf(PersistentCommercialPaperState::class.java)) {
|
||||
|
||||
override val migrationResource = "commercial-paper.changelog-master"
|
||||
|
@ -20,6 +20,7 @@ import java.util.concurrent.ConcurrentHashMap
|
||||
import java.util.concurrent.CopyOnWriteArrayList
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
import javax.persistence.AttributeConverter
|
||||
import javax.persistence.PersistenceException
|
||||
import javax.sql.DataSource
|
||||
|
||||
/**
|
||||
@ -98,7 +99,8 @@ class CordaPersistence(
|
||||
cacheFactory: NamedCacheFactory,
|
||||
attributeConverters: Collection<AttributeConverter<*, *>> = emptySet(),
|
||||
customClassLoader: ClassLoader? = null,
|
||||
val closeConnection: Boolean = true
|
||||
val closeConnection: Boolean = true,
|
||||
val errorHandler: (t: Throwable) -> Unit = {}
|
||||
) : Closeable {
|
||||
companion object {
|
||||
private val log = contextLogger()
|
||||
@ -189,10 +191,18 @@ class CordaPersistence(
|
||||
}
|
||||
|
||||
fun createSession(): Connection {
|
||||
try {
|
||||
// We need to set the database for the current [Thread] or [Fiber] here as some tests share threads across databases.
|
||||
_contextDatabase.set(this)
|
||||
currentDBSession().flush()
|
||||
return contextTransaction.connection
|
||||
} catch (sqlException: SQLException) {
|
||||
errorHandler(sqlException)
|
||||
throw sqlException
|
||||
} catch (persistenceException: PersistenceException) {
|
||||
errorHandler(persistenceException)
|
||||
throw persistenceException
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -220,11 +230,19 @@ class CordaPersistence(
|
||||
recoverAnyNestedSQLException: Boolean, statement: DatabaseTransaction.() -> T): T {
|
||||
_contextDatabase.set(this)
|
||||
val outer = contextTransactionOrNull
|
||||
try {
|
||||
return if (outer != null) {
|
||||
outer.statement()
|
||||
} else {
|
||||
inTopLevelTransaction(isolationLevel, recoverableFailureTolerance, recoverAnyNestedSQLException, statement)
|
||||
}
|
||||
} catch (sqlException: SQLException) {
|
||||
errorHandler(sqlException)
|
||||
throw sqlException
|
||||
} catch (persistenceException: PersistenceException) {
|
||||
errorHandler(persistenceException)
|
||||
throw persistenceException
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -253,6 +253,12 @@ dependencies {
|
||||
// Required by JVMAgentUtil (x-compatible java 8 & 11 agent lookup mechanism)
|
||||
compile files("${System.properties['java.home']}/../lib/tools.jar")
|
||||
|
||||
// Byteman for runtime (termination) rules injection on the running node
|
||||
// Submission tool allowing to install rules on running nodes
|
||||
integrationTestCompile "org.jboss.byteman:byteman-submit:4.0.3"
|
||||
// The actual Byteman agent which should only be in the classpath of the out of process nodes
|
||||
integrationTestCompile "org.jboss.byteman:byteman:4.0.3"
|
||||
|
||||
testCompile(project(':test-cli'))
|
||||
testCompile(project(':test-utils'))
|
||||
|
||||
@ -262,6 +268,8 @@ dependencies {
|
||||
slowIntegrationTestCompile configurations.testCompile
|
||||
slowIntegrationTestRuntime configurations.runtime
|
||||
slowIntegrationTestRuntime configurations.testRuntime
|
||||
|
||||
testCompile project(':testing:cordapps:dbfailure:dbfworkflows')
|
||||
}
|
||||
|
||||
tasks.withType(JavaCompile) {
|
||||
|
@ -0,0 +1,89 @@
|
||||
package net.corda.contracts
|
||||
|
||||
import net.corda.core.contracts.TransactionVerificationException
|
||||
import net.corda.core.utilities.OpaqueBytes
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.finance.DOLLARS
|
||||
import net.corda.finance.flows.CashIssueFlow
|
||||
import net.corda.testing.common.internal.testNetworkParameters
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.node.internal.FINANCE_WORKFLOWS_CORDAPP
|
||||
import net.corda.testing.node.internal.cordappWithPackages
|
||||
import org.assertj.core.api.Assertions
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.rules.TemporaryFolder
|
||||
|
||||
class SignatureConstraintGatingTests {
|
||||
|
||||
@Rule
|
||||
@JvmField
|
||||
val tempFolder = TemporaryFolder()
|
||||
|
||||
@Test
|
||||
fun `signature constraints can be used with up to the maximum allowed number of (RSA) keys`() {
|
||||
tempFolder.root.toPath().let {path ->
|
||||
val financeCordapp = cordappWithPackages("net.corda.finance.contracts", "net.corda.finance.schemas")
|
||||
.signed(keyStorePath = path, numberOfSignatures = 20, keyAlgorithm = "RSA")
|
||||
|
||||
driver(DriverParameters(
|
||||
networkParameters = testNetworkParameters().copy(minimumPlatformVersion = 5),
|
||||
cordappsForAllNodes = setOf(financeCordapp, FINANCE_WORKFLOWS_CORDAPP),
|
||||
startNodesInProcess = true,
|
||||
inMemoryDB = true
|
||||
)) {
|
||||
val node = startNode().getOrThrow()
|
||||
|
||||
node.rpc.startFlowDynamic(CashIssueFlow::class.java, 10.DOLLARS, OpaqueBytes.of(0), defaultNotaryIdentity)
|
||||
.returnValue.getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `signature constraints can be used with up to the maximum allowed number of (EC) keys`() {
|
||||
tempFolder.root.toPath().let {path ->
|
||||
val financeCordapp = cordappWithPackages("net.corda.finance.contracts", "net.corda.finance.schemas")
|
||||
.signed(keyStorePath = path, numberOfSignatures = 20, keyAlgorithm = "EC")
|
||||
|
||||
driver(DriverParameters(
|
||||
networkParameters = testNetworkParameters().copy(minimumPlatformVersion = 5),
|
||||
cordappsForAllNodes = setOf(financeCordapp, FINANCE_WORKFLOWS_CORDAPP),
|
||||
startNodesInProcess = true,
|
||||
inMemoryDB = true
|
||||
)) {
|
||||
val node = startNode().getOrThrow()
|
||||
|
||||
node.rpc.startFlowDynamic(CashIssueFlow::class.java, 10.DOLLARS, OpaqueBytes.of(0), defaultNotaryIdentity)
|
||||
.returnValue.getOrThrow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `signature constraints cannot be used with more than the maximum allowed number of keys`() {
|
||||
tempFolder.root.toPath().let {path ->
|
||||
val financeCordapp = cordappWithPackages("net.corda.finance.contracts", "net.corda.finance.schemas")
|
||||
.signed(keyStorePath = path, numberOfSignatures = 21)
|
||||
|
||||
driver(DriverParameters(
|
||||
networkParameters = testNetworkParameters().copy(minimumPlatformVersion = 5),
|
||||
cordappsForAllNodes = setOf(financeCordapp, FINANCE_WORKFLOWS_CORDAPP),
|
||||
startNodesInProcess = true,
|
||||
inMemoryDB = true
|
||||
)) {
|
||||
val node = startNode().getOrThrow()
|
||||
|
||||
Assertions.assertThatThrownBy {
|
||||
node.rpc.startFlowDynamic(CashIssueFlow::class.java, 10.DOLLARS, OpaqueBytes.of(0), defaultNotaryIdentity)
|
||||
.returnValue.getOrThrow()
|
||||
}
|
||||
.isInstanceOf(TransactionVerificationException.InvalidConstraintRejection::class.java)
|
||||
.hasMessageContaining("Signature constraint contains composite key with 21 leaf keys, " +
|
||||
"which is more than the maximum allowed number of keys (20).")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -17,6 +17,7 @@ import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.unwrap
|
||||
import net.corda.node.services.Permissions
|
||||
import net.corda.node.services.statemachine.FlowTimeoutException
|
||||
import net.corda.node.services.statemachine.StaffedFlowHospital
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.core.BOB_NAME
|
||||
import net.corda.testing.core.singleIdentity
|
||||
@ -25,6 +26,7 @@ import net.corda.testing.driver.driver
|
||||
import net.corda.testing.node.User
|
||||
import org.assertj.core.api.Assertions.assertThatExceptionOfType
|
||||
import org.hibernate.exception.ConstraintViolationException
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
import java.lang.management.ManagementFactory
|
||||
@ -46,6 +48,12 @@ class FlowRetryTest {
|
||||
TransientConnectionFailureFlow.retryCount = -1
|
||||
WrappedTransientConnectionFailureFlow.retryCount = -1
|
||||
GeneralExternalFailureFlow.retryCount = -1
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.add { true }
|
||||
}
|
||||
|
||||
@After
|
||||
fun cleanUp() {
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -390,7 +398,9 @@ class WrappedTransientConnectionFailureFlow(private val party: Party) : FlowLogi
|
||||
initiateFlow(party).send("hello there")
|
||||
// checkpoint will restart the flow after the send
|
||||
retryCount += 1
|
||||
throw IllegalStateException("wrapped error message", IllegalStateException("another layer deep", SQLTransientConnectionException("Connection is not available")/*.fillInStackTrace()*/))
|
||||
throw IllegalStateException(
|
||||
"wrapped error message",
|
||||
IllegalStateException("another layer deep", SQLTransientConnectionException("Connection is not available")))
|
||||
}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,313 @@
|
||||
package net.corda.node.services.vault
|
||||
|
||||
import com.r3.dbfailure.workflows.CreateStateFlow
|
||||
import com.r3.dbfailure.workflows.CreateStateFlow.Initiator
|
||||
import com.r3.dbfailure.workflows.CreateStateFlow.errorTargetsToNum
|
||||
import net.corda.core.CordaRuntimeException
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.messaging.startFlow
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.node.services.Permissions
|
||||
import net.corda.node.services.statemachine.StaffedFlowHospital
|
||||
import net.corda.testing.core.ALICE_NAME
|
||||
import net.corda.testing.driver.DriverParameters
|
||||
import net.corda.testing.driver.driver
|
||||
import net.corda.testing.node.User
|
||||
import net.corda.testing.node.internal.findCordapp
|
||||
import org.junit.After
|
||||
import org.junit.Assert
|
||||
import org.junit.Test
|
||||
import rx.exceptions.OnErrorNotImplementedException
|
||||
import java.sql.SQLException
|
||||
import java.time.Duration
|
||||
import java.time.temporal.ChronoUnit
|
||||
import java.util.concurrent.TimeoutException
|
||||
import javax.persistence.PersistenceException
|
||||
import kotlin.test.assertFailsWith
|
||||
|
||||
class VaultObserverExceptionTest {
|
||||
companion object {
|
||||
|
||||
val log = contextLogger()
|
||||
|
||||
private fun testCordapps() = listOf(
|
||||
findCordapp("com.r3.dbfailure.contracts"),
|
||||
findCordapp("com.r3.dbfailure.workflows"),
|
||||
findCordapp("com.r3.dbfailure.schemas"))
|
||||
}
|
||||
|
||||
@After
|
||||
fun tearDown() {
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
|
||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.clear()
|
||||
StaffedFlowHospital.onFlowAdmitted.clear()
|
||||
}
|
||||
|
||||
/**
|
||||
* Causing an SqlException via a syntax error in a vault observer causes the flow to hit the
|
||||
* DatabsaseEndocrinologist in the FlowHospital and being kept for overnight observation
|
||||
*/
|
||||
@Test
|
||||
fun unhandledSqlExceptionFromVaultObserverGetsHospitatlised() {
|
||||
val testControlFuture = openFuture<Boolean>().toCompletableFuture()
|
||||
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.add {
|
||||
when (it) {
|
||||
is OnErrorNotImplementedException -> Assert.fail("OnErrorNotImplementedException should be unwrapped")
|
||||
is SQLException -> {
|
||||
testControlFuture.complete(true)
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
aliceNode.rpc.startFlow(
|
||||
::Initiator,
|
||||
"Syntax Error in Custom SQL",
|
||||
CreateStateFlow.errorTargetsToNum(CreateStateFlow.ErrorTarget.ServiceSqlSyntaxError)
|
||||
).returnValue.then { testControlFuture.complete(false) }
|
||||
val foundExpectedException = testControlFuture.getOrThrow(30.seconds)
|
||||
|
||||
Assert.assertTrue(foundExpectedException)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Throwing a random (non-SQL releated) exception from a vault observer causes the flow to be
|
||||
* aborted when unhandled in user code
|
||||
*/
|
||||
@Test
|
||||
fun otherExceptionsFromVaultObserverBringFlowDown() {
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
assertFailsWith(CordaRuntimeException::class, "Toys out of pram") {
|
||||
aliceNode.rpc.startFlow(
|
||||
::Initiator,
|
||||
"InvalidParameterException",
|
||||
CreateStateFlow.errorTargetsToNum(CreateStateFlow.ErrorTarget.ServiceThrowInvalidParameter)
|
||||
).returnValue.getOrThrow(30.seconds)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A random exception from a VaultObserver will bring the Rx Observer down, but can be handled in the flow
|
||||
* triggering the observer, and the flow will continue successfully (for some values of success)
|
||||
*/
|
||||
@Test
|
||||
fun otherExceptionsFromVaultObserverCanBeSuppressedInFlow() {
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
aliceNode.rpc.startFlow(::Initiator, "InvalidParameterException", CreateStateFlow.errorTargetsToNum(
|
||||
CreateStateFlow.ErrorTarget.ServiceThrowInvalidParameter,
|
||||
CreateStateFlow.ErrorTarget.FlowSwallowErrors))
|
||||
.returnValue.getOrThrow(30.seconds)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If the state we are trying to persist triggers a persistence exception, the flow hospital will retry the flow
|
||||
* and keep it in for observation if errors persist.
|
||||
*/
|
||||
@Test
|
||||
fun persistenceExceptionOnCommitGetsRetriedAndThenGetsKeptForObservation() {
|
||||
var admitted = 0
|
||||
var observation = 0
|
||||
StaffedFlowHospital.onFlowAdmitted.add {
|
||||
++admitted
|
||||
}
|
||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||
++observation
|
||||
}
|
||||
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
assertFailsWith<TimeoutException> {
|
||||
aliceNode.rpc.startFlow(::Initiator, "EntityManager", errorTargetsToNum(CreateStateFlow.ErrorTarget.TxInvalidState))
|
||||
.returnValue.getOrThrow(Duration.of(30, ChronoUnit.SECONDS))
|
||||
}
|
||||
}
|
||||
Assert.assertTrue("Exception from service has not been to Hospital", admitted > 0)
|
||||
Assert.assertEquals(1, observation)
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have a state causing a database error lined up for persistence, calling jdbConnection() in
|
||||
* the vault observer will trigger a flush that throws. This will be kept in for observation.
|
||||
*/
|
||||
@Test
|
||||
fun persistenceExceptionOnFlushGetsRetriedAndThenGetsKeptForObservation() {
|
||||
var counter = 0
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.add {
|
||||
when (it) {
|
||||
is OnErrorNotImplementedException -> Assert.fail("OnErrorNotImplementedException should be unwrapped")
|
||||
is PersistenceException -> {
|
||||
++counter
|
||||
log.info("Got a PersistentException in the flow hospital count = $counter")
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
var observation = 0
|
||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||
++observation
|
||||
}
|
||||
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
assertFailsWith<TimeoutException>("PersistenceException") {
|
||||
aliceNode.rpc.startFlow(::Initiator, "EntityManager", errorTargetsToNum(
|
||||
CreateStateFlow.ErrorTarget.ServiceValidUpdate,
|
||||
CreateStateFlow.ErrorTarget.TxInvalidState))
|
||||
.returnValue.getOrThrow(30.seconds)
|
||||
}
|
||||
}
|
||||
Assert.assertTrue("Flow has not been to hospital", counter > 0)
|
||||
Assert.assertEquals(1, observation)
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have a state causing a database error lined up for persistence, calling jdbConnection() in
|
||||
* the vault observer will trigger a flush that throws.
|
||||
* Trying to catch and suppress that exception in the flow around the code triggering the vault observer
|
||||
* does not change the outcome - the first exception in the service will bring the service down and will
|
||||
* be caught by the flow, but the state machine will error the flow anyway as Corda code threw.
|
||||
*/
|
||||
@Test
|
||||
fun persistenceExceptionOnFlushInVaultObserverCannotBeSuppressedInFlow() {
|
||||
var counter = 0
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.add {
|
||||
when (it) {
|
||||
is OnErrorNotImplementedException -> Assert.fail("OnErrorNotImplementedException should be unwrapped")
|
||||
is PersistenceException -> {
|
||||
++counter
|
||||
log.info("Got a PersistentException in the flow hospital count = $counter")
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
val flowHandle = aliceNode.rpc.startFlow(
|
||||
::Initiator,
|
||||
"EntityManager",
|
||||
CreateStateFlow.errorTargetsToNum(
|
||||
CreateStateFlow.ErrorTarget.ServiceValidUpdate,
|
||||
CreateStateFlow.ErrorTarget.TxInvalidState,
|
||||
CreateStateFlow.ErrorTarget.FlowSwallowErrors))
|
||||
val flowResult = flowHandle.returnValue
|
||||
assertFailsWith<TimeoutException>("PersistenceException") { flowResult.getOrThrow(30.seconds) }
|
||||
Assert.assertTrue("Flow has not been to hospital", counter > 0)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have a state causing a persistence exception lined up for persistence, calling jdbConnection() in
|
||||
* the vault observer will trigger a flush that throws.
|
||||
* Trying to catch and suppress that exception inside the service does protect the service, but the new
|
||||
* interceptor will fail the flow anyway. The flow will be kept in for observation if errors persist.
|
||||
*/
|
||||
@Test
|
||||
fun persistenceExceptionOnFlushInVaultObserverCannotBeSuppressedInService() {
|
||||
var counter = 0
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.add {
|
||||
when (it) {
|
||||
is OnErrorNotImplementedException -> Assert.fail("OnErrorNotImplementedException should be unwrapped")
|
||||
is PersistenceException -> {
|
||||
++counter
|
||||
log.info("Got a PersistentException in the flow hospital count = $counter")
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
val flowHandle = aliceNode.rpc.startFlow(
|
||||
::Initiator, "EntityManager",
|
||||
CreateStateFlow.errorTargetsToNum(
|
||||
CreateStateFlow.ErrorTarget.ServiceValidUpdate,
|
||||
CreateStateFlow.ErrorTarget.TxInvalidState,
|
||||
CreateStateFlow.ErrorTarget.ServiceSwallowErrors))
|
||||
val flowResult = flowHandle.returnValue
|
||||
assertFailsWith<TimeoutException>("PersistenceException") { flowResult.getOrThrow(30.seconds) }
|
||||
Assert.assertTrue("Flow has not been to hospital", counter > 0)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* User code throwing a syntax error in a raw vault observer will break the recordTransaction call,
|
||||
* therefore handling it in flow code is no good, and the error will be passed to the flow hospital via the
|
||||
* interceptor.
|
||||
*/
|
||||
@Test
|
||||
fun syntaxErrorInUserCodeInServiceCannotBeSuppressedInFlow() {
|
||||
val testControlFuture = openFuture<Boolean>()
|
||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ ->
|
||||
log.info("Flow has been kept for overnight observation")
|
||||
testControlFuture.set(true)
|
||||
}
|
||||
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
val flowHandle = aliceNode.rpc.startFlow(::Initiator, "EntityManager", CreateStateFlow.errorTargetsToNum(
|
||||
CreateStateFlow.ErrorTarget.ServiceSqlSyntaxError,
|
||||
CreateStateFlow.ErrorTarget.FlowSwallowErrors))
|
||||
val flowResult = flowHandle.returnValue
|
||||
flowResult.then {
|
||||
log.info("Flow has finished")
|
||||
testControlFuture.set(false)
|
||||
}
|
||||
Assert.assertTrue("Flow has not been kept in hospital", testControlFuture.getOrThrow(30.seconds))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* User code throwing a syntax error and catching suppressing that within the observer code is fine
|
||||
* and should not have any impact on the rest of the flow
|
||||
*/
|
||||
@Test
|
||||
fun syntaxErrorInUserCodeInServiceCanBeSuppressedInService() {
|
||||
driver(DriverParameters(
|
||||
startNodesInProcess = true,
|
||||
cordappsForAllNodes = testCordapps())) {
|
||||
val aliceUser = User("user", "foo", setOf(Permissions.all()))
|
||||
val aliceNode = startNode(providedName = ALICE_NAME, rpcUsers = listOf(aliceUser)).getOrThrow()
|
||||
val flowHandle = aliceNode.rpc.startFlow(::Initiator, "EntityManager", CreateStateFlow.errorTargetsToNum(
|
||||
CreateStateFlow.ErrorTarget.ServiceSqlSyntaxError,
|
||||
CreateStateFlow.ErrorTarget.ServiceSwallowErrors))
|
||||
val flowResult = flowHandle.returnValue
|
||||
flowResult.getOrThrow(30.seconds)
|
||||
}
|
||||
}
|
||||
}
|
@ -21,6 +21,7 @@ import net.corda.testing.driver.internal.internalServices
|
||||
import net.corda.testing.node.ClusterSpec
|
||||
import net.corda.testing.node.NotarySpec
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.junit.Ignore
|
||||
import org.junit.Test
|
||||
import java.util.*
|
||||
import java.util.concurrent.atomic.AtomicBoolean
|
||||
@ -31,6 +32,7 @@ class P2PMessagingTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
fun `communicating with a distributed service which we're part of`() {
|
||||
startDriverWithDistributedService { distributedService ->
|
||||
assertAllNodesAreUsed(distributedService, DISTRIBUTED_SERVICE_NAME, distributedService[0])
|
||||
|
@ -1190,6 +1190,7 @@ class FlowStarterImpl(private val smm: StateMachineManager, private val flowLogi
|
||||
override val deduplicationHandler: DeduplicationHandler
|
||||
get() = this
|
||||
|
||||
override val flowId: StateMachineRunId = StateMachineRunId.createRandom()
|
||||
override val flowLogic: FlowLogic<T>
|
||||
get() = logic
|
||||
override val context: InvocationContext
|
||||
@ -1232,8 +1233,17 @@ fun createCordaPersistence(databaseConfig: DatabaseConfig,
|
||||
@Suppress("DEPRECATION")
|
||||
org.hibernate.type.descriptor.java.JavaTypeDescriptorRegistry.INSTANCE.addDescriptor(AbstractPartyDescriptor(wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous))
|
||||
val attributeConverters = listOf(PublicKeyToTextConverter(), AbstractPartyToX500NameAsStringConverter(wellKnownPartyFromX500Name, wellKnownPartyFromAnonymous))
|
||||
|
||||
val jdbcUrl = hikariProperties.getProperty("dataSource.url", "")
|
||||
return CordaPersistence(databaseConfig, schemaService.schemaOptions.keys, jdbcUrl, cacheFactory, attributeConverters, customClassLoader)
|
||||
return CordaPersistence(
|
||||
databaseConfig,
|
||||
schemaService.schemaOptions.keys,
|
||||
jdbcUrl,
|
||||
cacheFactory,
|
||||
attributeConverters, customClassLoader,
|
||||
errorHandler = { t ->
|
||||
FlowStateMachineImpl.currentStateMachine()?.scheduleEvent(Event.Error(t))
|
||||
})
|
||||
}
|
||||
|
||||
fun CordaPersistence.startHikariPool(hikariProperties: Properties, databaseConfig: DatabaseConfig, schemas: Set<MappedSchema>, metricRegistry: MetricRegistry? = null, cordappLoader: CordappLoader? = null, currentDir: Path? = null, ourName: CordaX500Name) {
|
||||
|
@ -29,6 +29,7 @@ object NodeInfoSchemaV1 : MappedSchema(
|
||||
@Column(name = "node_info_id", nullable = false)
|
||||
var id: Int,
|
||||
|
||||
@Suppress("MagicNumber") // database column width
|
||||
@Column(name = "node_info_hash", length = 64, nullable = false)
|
||||
val hash: String,
|
||||
|
||||
|
@ -11,6 +11,7 @@ import net.corda.core.contracts.ScheduledStateRef
|
||||
import net.corda.core.contracts.StateRef
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.FlowLogicRefFactory
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.concurrent.flatMap
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
@ -239,6 +240,7 @@ class NodeSchedulerService(private val clock: CordaClock,
|
||||
}
|
||||
|
||||
private inner class FlowStartDeduplicationHandler(val scheduledState: ScheduledStateRef, override val flowLogic: FlowLogic<Any?>, override val context: InvocationContext) : DeduplicationHandler, ExternalEvent.ExternalStartFlowEvent<Any?> {
|
||||
override val flowId: StateMachineRunId = StateMachineRunId.createRandom()
|
||||
override val externalCause: ExternalEvent
|
||||
get() = this
|
||||
override val deduplicationHandler: FlowStartDeduplicationHandler
|
||||
|
@ -2,12 +2,16 @@ package net.corda.node.services.identity
|
||||
|
||||
import net.corda.core.crypto.Crypto
|
||||
import net.corda.core.crypto.toStringShort
|
||||
import net.corda.core.identity.*
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.AnonymousParty
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.identity.x500Matches
|
||||
import net.corda.core.internal.CertRole
|
||||
import net.corda.core.internal.NamedCacheFactory
|
||||
import net.corda.core.internal.hash
|
||||
import net.corda.core.internal.toSet
|
||||
import net.corda.core.node.services.IdentityService
|
||||
import net.corda.core.node.services.UnknownAnonymousPartyException
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.utilities.MAX_HASH_HEX_SIZE
|
||||
@ -29,13 +33,18 @@ import org.hibernate.annotations.Type
|
||||
import org.hibernate.internal.util.collections.ArrayHelper.EMPTY_BYTE_ARRAY
|
||||
import java.security.InvalidAlgorithmParameterException
|
||||
import java.security.PublicKey
|
||||
import java.security.cert.*
|
||||
import java.security.cert.CertPathValidatorException
|
||||
import java.security.cert.CertStore
|
||||
import java.security.cert.CertificateExpiredException
|
||||
import java.security.cert.CertificateNotYetValidException
|
||||
import java.security.cert.CollectionCertStoreParameters
|
||||
import java.security.cert.TrustAnchor
|
||||
import java.security.cert.X509Certificate
|
||||
import java.util.*
|
||||
import javax.annotation.concurrent.ThreadSafe
|
||||
import javax.persistence.Column
|
||||
import javax.persistence.Entity
|
||||
import javax.persistence.Id
|
||||
import kotlin.IllegalStateException
|
||||
import kotlin.collections.HashSet
|
||||
import kotlin.streams.toList
|
||||
|
||||
@ -147,6 +156,7 @@ class PersistentIdentityService(cacheFactory: NamedCacheFactory) : SingletonSeri
|
||||
@javax.persistence.Table(name = NAME_TO_HASH_TABLE_NAME)
|
||||
class PersistentPartyToPublicKeyHash(
|
||||
@Id
|
||||
@Suppress("MagicNumber") // database column width
|
||||
@Column(name = NAME_COLUMN_NAME, length = 128, nullable = false)
|
||||
var name: String = "",
|
||||
|
||||
|
@ -85,6 +85,7 @@ class P2PMessageDeduplicator(cacheFactory: NamedCacheFactory, private val databa
|
||||
}
|
||||
|
||||
@Entity
|
||||
@Suppress("MagicNumber") // database column width
|
||||
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}message_ids")
|
||||
class ProcessedMessage(
|
||||
@Id
|
||||
|
@ -3,6 +3,7 @@ package net.corda.node.services.messaging
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import com.codahale.metrics.MetricRegistry
|
||||
import net.corda.core.crypto.toStringShort
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.internal.NamedCacheFactory
|
||||
import net.corda.core.internal.ThreadBox
|
||||
@ -424,6 +425,7 @@ class P2PMessagingClient(val config: NodeConfiguration,
|
||||
private inner class MessageDeduplicationHandler(val artemisMessage: ClientMessage, override val receivedMessage: ReceivedMessage) : DeduplicationHandler, ExternalEvent.ExternalMessageEvent {
|
||||
override val externalCause: ExternalEvent
|
||||
get() = this
|
||||
override val flowId: StateMachineRunId = StateMachineRunId.createRandom()
|
||||
override val deduplicationHandler: MessageDeduplicationHandler
|
||||
get() = this
|
||||
|
||||
|
@ -29,6 +29,7 @@ class DBCheckpointStorage : CheckpointStorage {
|
||||
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}checkpoints")
|
||||
class DBCheckpoint(
|
||||
@Id
|
||||
@Suppress("MagicNumber") // database column width
|
||||
@Column(name = "checkpoint_id", length = 64, nullable = false)
|
||||
var checkpointId: String = "",
|
||||
|
||||
|
@ -30,6 +30,7 @@ import kotlin.streams.toList
|
||||
|
||||
class DBTransactionStorage(private val database: CordaPersistence, cacheFactory: NamedCacheFactory) : WritableTransactionStorage, SingletonSerializeAsToken() {
|
||||
|
||||
@Suppress("MagicNumber") // database column width
|
||||
@Entity
|
||||
@Table(name = "${NODE_DATABASE_PREFIX}transactions")
|
||||
class DBTransaction(
|
||||
|
@ -120,10 +120,21 @@ class ActionExecutorImpl(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("TooGenericExceptionCaught") // this is fully intentional here, see comment in the catch clause
|
||||
@Suspendable
|
||||
private fun executeAcknowledgeMessages(action: Action.AcknowledgeMessages) {
|
||||
action.deduplicationHandlers.forEach {
|
||||
try {
|
||||
it.afterDatabaseTransaction()
|
||||
} catch (e: Exception) {
|
||||
// Catch all exceptions that occur in the [DeduplicationHandler]s (although errors should be unlikely)
|
||||
// It is deemed safe for errors to occur here
|
||||
// Therefore the current transition should not fail if something does go wrong
|
||||
log.info(
|
||||
"An error occurred executing a deduplication post-database commit handler. Continuing, as it is safe to do so.",
|
||||
e
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,8 +229,10 @@ class ActionExecutorImpl(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("TooGenericExceptionCaught") // this is fully intentional here, see comment in the catch clause
|
||||
@Suspendable
|
||||
private fun executeAsyncOperation(fiber: FlowFiber, action: Action.ExecuteAsyncOperation) {
|
||||
try {
|
||||
val operationFuture = action.operation.execute(action.deduplicationId)
|
||||
operationFuture.thenMatch(
|
||||
success = { result ->
|
||||
@ -229,6 +242,11 @@ class ActionExecutorImpl(
|
||||
fiber.scheduleEvent(Event.Error(exception))
|
||||
}
|
||||
)
|
||||
} catch (e: Exception) {
|
||||
// Catch and wrap any unexpected exceptions from the async operation
|
||||
// Wrapping the exception allows it to be better handled by the flow hospital
|
||||
throw AsyncOperationTransitionException(e)
|
||||
}
|
||||
}
|
||||
|
||||
private fun executeRetryFlowFromSafePoint(action: Action.RetryFlowFromSafePoint) {
|
||||
|
@ -16,6 +16,7 @@ import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.*
|
||||
import net.corda.core.internal.concurrent.OpenFuture
|
||||
import net.corda.core.internal.concurrent.map
|
||||
import net.corda.core.internal.concurrent.mapError
|
||||
import net.corda.core.internal.concurrent.openFuture
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.serialization.SerializedBytes
|
||||
@ -113,7 +114,7 @@ class SingleThreadedStateMachineManager(
|
||||
private var checkpointSerializationContext: CheckpointSerializationContext? = null
|
||||
private var actionExecutor: ActionExecutor? = null
|
||||
|
||||
override val flowHospital: StaffedFlowHospital = StaffedFlowHospital(flowMessaging, ourSenderUUID)
|
||||
override val flowHospital: StaffedFlowHospital = makeFlowHospital()
|
||||
private val transitionExecutor = makeTransitionExecutor()
|
||||
|
||||
override val allStateMachines: List<FlowLogic<*>>
|
||||
@ -210,12 +211,14 @@ class SingleThreadedStateMachineManager(
|
||||
}
|
||||
|
||||
private fun <A> startFlow(
|
||||
flowId: StateMachineRunId,
|
||||
flowLogic: FlowLogic<A>,
|
||||
context: InvocationContext,
|
||||
ourIdentity: Party?,
|
||||
deduplicationHandler: DeduplicationHandler?
|
||||
): CordaFuture<FlowStateMachine<A>> {
|
||||
return startFlowInternal(
|
||||
flowId,
|
||||
invocationContext = context,
|
||||
flowLogic = flowLogic,
|
||||
flowStart = FlowStart.Explicit,
|
||||
@ -230,7 +233,10 @@ class SingleThreadedStateMachineManager(
|
||||
cancelTimeoutIfScheduled(id)
|
||||
val flow = flows.remove(id)
|
||||
if (flow != null) {
|
||||
logger.debug("Killing flow known to physical node.")
|
||||
flow.fiber.transientState?.let {
|
||||
flow.fiber.transientState = TransientReference(it.value.copy(isRemoved = true))
|
||||
}
|
||||
logger.info("Killing flow $id known to this node.")
|
||||
decrementLiveFibers()
|
||||
totalFinishedFlows.inc()
|
||||
try {
|
||||
@ -239,6 +245,7 @@ class SingleThreadedStateMachineManager(
|
||||
} finally {
|
||||
database.transaction {
|
||||
checkpointStorage.removeCheckpoint(id)
|
||||
serviceHub.vaultService.softLockRelease(id.uuid)
|
||||
}
|
||||
transitionExecutor.forceRemoveFlow(id)
|
||||
unfinishedFibers.countDown()
|
||||
@ -343,9 +350,11 @@ class SingleThreadedStateMachineManager(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("TooGenericExceptionCaught", "ComplexMethod", "MaxLineLength") // this is fully intentional here, see comment in the catch clause
|
||||
override fun retryFlowFromSafePoint(currentState: StateMachineState) {
|
||||
// Get set of external events
|
||||
val flowId = currentState.flowLogic.runId
|
||||
try {
|
||||
val oldFlowLeftOver = mutex.locked { flows[flowId] }?.fiber?.transientValues?.value?.eventQueue
|
||||
if (oldFlowLeftOver == null) {
|
||||
logger.error("Unable to find flow for flow $flowId. Something is very wrong. The flow will not retry.")
|
||||
@ -396,6 +405,17 @@ class SingleThreadedStateMachineManager(
|
||||
deliverExternalEvent(externalEvent)
|
||||
}
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
// Failed to retry - manually put the flow in for observation rather than
|
||||
// relying on the [HospitalisingInterceptor] to do so
|
||||
val exceptions = (currentState.checkpoint.errorState as? ErrorState.Errored)
|
||||
?.errors
|
||||
?.map { it.exception }
|
||||
?.plus(e) ?: emptyList()
|
||||
logger.info("Failed to retry flow $flowId, keeping in for observation and aborting")
|
||||
flowHospital.forceIntoOvernightObservation(flowId, exceptions)
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
override fun deliverExternalEvent(event: ExternalEvent) {
|
||||
@ -410,7 +430,13 @@ class SingleThreadedStateMachineManager(
|
||||
}
|
||||
|
||||
private fun <T> onExternalStartFlow(event: ExternalEvent.ExternalStartFlowEvent<T>) {
|
||||
val future = startFlow(event.flowLogic, event.context, ourIdentity = null, deduplicationHandler = event.deduplicationHandler)
|
||||
val future = startFlow(
|
||||
event.flowId,
|
||||
event.flowLogic,
|
||||
event.context,
|
||||
ourIdentity = null,
|
||||
deduplicationHandler = event.deduplicationHandler
|
||||
)
|
||||
event.wireUpFuture(future)
|
||||
}
|
||||
|
||||
@ -476,7 +502,16 @@ class SingleThreadedStateMachineManager(
|
||||
is InitiatedFlowFactory.Core -> event.receivedMessage.platformVersion
|
||||
is InitiatedFlowFactory.CorDapp -> null
|
||||
}
|
||||
startInitiatedFlow(flowLogic, event.deduplicationHandler, senderSession, initiatedSessionId, sessionMessage, senderCoreFlowVersion, initiatedFlowInfo)
|
||||
startInitiatedFlow(
|
||||
event.flowId,
|
||||
flowLogic,
|
||||
event.deduplicationHandler,
|
||||
senderSession,
|
||||
initiatedSessionId,
|
||||
sessionMessage,
|
||||
senderCoreFlowVersion,
|
||||
initiatedFlowInfo
|
||||
)
|
||||
} catch (t: Throwable) {
|
||||
logger.warn("Unable to initiate flow from $sender (appName=${sessionMessage.appName} " +
|
||||
"flowVersion=${sessionMessage.flowVersion}), sending to the flow hospital", t)
|
||||
@ -503,7 +538,9 @@ class SingleThreadedStateMachineManager(
|
||||
return serviceHub.getFlowFactory(initiatorFlowClass) ?: throw SessionRejectException.NotRegistered(initiatorFlowClass)
|
||||
}
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
private fun <A> startInitiatedFlow(
|
||||
flowId: StateMachineRunId,
|
||||
flowLogic: FlowLogic<A>,
|
||||
initiatingMessageDeduplicationHandler: DeduplicationHandler,
|
||||
peerSession: FlowSessionImpl,
|
||||
@ -515,13 +552,19 @@ class SingleThreadedStateMachineManager(
|
||||
val flowStart = FlowStart.Initiated(peerSession, initiatedSessionId, initiatingMessage, senderCoreFlowVersion, initiatedFlowInfo)
|
||||
val ourIdentity = ourFirstIdentity
|
||||
startFlowInternal(
|
||||
InvocationContext.peer(peerSession.counterparty.name), flowLogic, flowStart, ourIdentity,
|
||||
flowId,
|
||||
InvocationContext.peer(peerSession.counterparty.name),
|
||||
flowLogic,
|
||||
flowStart,
|
||||
ourIdentity,
|
||||
initiatingMessageDeduplicationHandler,
|
||||
isStartIdempotent = false
|
||||
)
|
||||
}
|
||||
|
||||
@Suppress("LongParameterList")
|
||||
private fun <A> startFlowInternal(
|
||||
flowId: StateMachineRunId,
|
||||
invocationContext: InvocationContext,
|
||||
flowLogic: FlowLogic<A>,
|
||||
flowStart: FlowStart,
|
||||
@ -529,7 +572,6 @@ class SingleThreadedStateMachineManager(
|
||||
deduplicationHandler: DeduplicationHandler?,
|
||||
isStartIdempotent: Boolean
|
||||
): CordaFuture<FlowStateMachine<A>> {
|
||||
val flowId = StateMachineRunId.createRandom()
|
||||
|
||||
// Before we construct the state machine state by freezing the FlowLogic we need to make sure that lazy properties
|
||||
// have access to the fiber (and thereby the service hub)
|
||||
@ -541,7 +583,28 @@ class SingleThreadedStateMachineManager(
|
||||
|
||||
val flowCorDappVersion = createSubFlowVersion(serviceHub.cordappProvider.getCordappForFlow(flowLogic), serviceHub.myInfo.platformVersion)
|
||||
|
||||
val initialCheckpoint = Checkpoint.create(
|
||||
val flowAlreadyExists = mutex.locked { flows[flowId] != null }
|
||||
|
||||
val existingCheckpoint = if (flowAlreadyExists) {
|
||||
// Load the flow's checkpoint
|
||||
// The checkpoint will be missing if the flow failed before persisting the original checkpoint
|
||||
// CORDA-3359 - Do not start/retry a flow that failed after deleting its checkpoint (the whole of the flow might replay)
|
||||
checkpointStorage.getCheckpoint(flowId)?.let { serializedCheckpoint ->
|
||||
val checkpoint = tryCheckpointDeserialize(serializedCheckpoint, flowId)
|
||||
if (checkpoint == null) {
|
||||
return openFuture<FlowStateMachine<A>>().mapError {
|
||||
IllegalStateException("Unable to deserialize database checkpoint for flow $flowId. " +
|
||||
"Something is very wrong. The flow will not retry.")
|
||||
}
|
||||
} else {
|
||||
checkpoint
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is a brand new flow
|
||||
null
|
||||
}
|
||||
val checkpoint = existingCheckpoint ?: Checkpoint.create(
|
||||
invocationContext,
|
||||
flowStart,
|
||||
flowLogic.javaClass,
|
||||
@ -550,13 +613,14 @@ class SingleThreadedStateMachineManager(
|
||||
flowCorDappVersion,
|
||||
flowLogic.isEnabledTimedFlow()
|
||||
).getOrThrow()
|
||||
|
||||
val startedFuture = openFuture<Unit>()
|
||||
val initialState = StateMachineState(
|
||||
checkpoint = initialCheckpoint,
|
||||
checkpoint = checkpoint,
|
||||
pendingDeduplicationHandlers = deduplicationHandler?.let { listOf(it) } ?: emptyList(),
|
||||
isFlowResumed = false,
|
||||
isTransactionTracked = false,
|
||||
isAnyCheckpointPersisted = false,
|
||||
isAnyCheckpointPersisted = existingCheckpoint != null,
|
||||
isStartIdempotent = isStartIdempotent,
|
||||
isRemoved = false,
|
||||
flowLogic = flowLogic,
|
||||
@ -817,6 +881,12 @@ class SingleThreadedStateMachineManager(
|
||||
return interceptors.fold(transitionExecutor) { executor, interceptor -> interceptor(executor) }
|
||||
}
|
||||
|
||||
private fun makeFlowHospital() : StaffedFlowHospital {
|
||||
// If the node is running as a notary service, we don't retain errored session initiation requests in case of missing Cordapps
|
||||
// to avoid memory leaks if the notary is under heavy load.
|
||||
return StaffedFlowHospital(flowMessaging, serviceHub.clock, ourSenderUUID)
|
||||
}
|
||||
|
||||
private fun InnerState.removeFlowOrderly(
|
||||
flow: Flow,
|
||||
removalReason: FlowRemovalReason.OrderlyFinish,
|
||||
|
@ -10,48 +10,103 @@ import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.DeclaredField
|
||||
import net.corda.core.internal.ThreadBox
|
||||
import net.corda.core.internal.TimedFlow
|
||||
import net.corda.core.internal.VisibleForTesting
|
||||
import net.corda.core.internal.bufferUntilSubscribed
|
||||
import net.corda.core.messaging.DataFeed
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.debug
|
||||
import net.corda.core.utilities.minutes
|
||||
import net.corda.core.utilities.seconds
|
||||
import net.corda.node.services.FinalityHandler
|
||||
import org.hibernate.exception.ConstraintViolationException
|
||||
import rx.subjects.PublishSubject
|
||||
import java.sql.SQLException
|
||||
import java.sql.SQLTransientConnectionException
|
||||
import java.time.Clock
|
||||
import java.time.Duration
|
||||
import java.time.Instant
|
||||
import java.util.*
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
import javax.persistence.PersistenceException
|
||||
import kotlin.concurrent.timerTask
|
||||
import kotlin.math.pow
|
||||
|
||||
/**
|
||||
* This hospital consults "staff" to see if they can automatically diagnose and treat flows.
|
||||
*/
|
||||
class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val ourSenderUUID: String) {
|
||||
private companion object {
|
||||
class StaffedFlowHospital(private val flowMessaging: FlowMessaging,
|
||||
private val clock: Clock,
|
||||
private val ourSenderUUID: String) {
|
||||
companion object {
|
||||
private val log = contextLogger()
|
||||
private val staff = listOf(
|
||||
DeadlockNurse,
|
||||
DuplicateInsertSpecialist,
|
||||
DoctorTimeout,
|
||||
FinalityDoctor,
|
||||
TransientConnectionCardiologist
|
||||
TransientConnectionCardiologist,
|
||||
DatabaseEndocrinologist,
|
||||
TransitionErrorGeneralPractitioner
|
||||
)
|
||||
|
||||
@VisibleForTesting
|
||||
val onFlowKeptForOvernightObservation = mutableListOf<(id: StateMachineRunId, by: List<String>) -> Unit>()
|
||||
|
||||
@VisibleForTesting
|
||||
val onFlowDischarged = mutableListOf<(id: StateMachineRunId, by: List<String>) -> Unit>()
|
||||
|
||||
@VisibleForTesting
|
||||
val onFlowAdmitted = mutableListOf<(id: StateMachineRunId) -> Unit>()
|
||||
}
|
||||
|
||||
private val hospitalJobTimer = Timer("FlowHospitalJobTimer", true)
|
||||
|
||||
init {
|
||||
// Register a task to log (at intervals) flows that are kept in hospital for overnight observation.
|
||||
hospitalJobTimer.scheduleAtFixedRate(timerTask {
|
||||
mutex.locked {
|
||||
if (flowsInHospital.isNotEmpty()) {
|
||||
// Get patients whose last record in their medical records is Outcome.OVERNIGHT_OBSERVATION.
|
||||
val patientsUnderOvernightObservation =
|
||||
flowsInHospital.filter { flowPatients[it.key]?.records?.last()?.outcome == Outcome.OVERNIGHT_OBSERVATION }
|
||||
if (patientsUnderOvernightObservation.isNotEmpty())
|
||||
log.warn("There are ${patientsUnderOvernightObservation.count()} flows kept for overnight observation. " +
|
||||
"Affected flow ids: ${patientsUnderOvernightObservation.map { it.key.uuid.toString() }.joinToString()}")
|
||||
}
|
||||
if (treatableSessionInits.isNotEmpty()) {
|
||||
log.warn("There are ${treatableSessionInits.count()} erroneous session initiations kept for overnight observation. " +
|
||||
"Erroneous session initiation ids: ${treatableSessionInits.map { it.key.toString() }.joinToString()}")
|
||||
}
|
||||
}
|
||||
}, 1.minutes.toMillis(), 1.minutes.toMillis())
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the flows that have been admitted to the hospital for treatment.
|
||||
* Flows should be removed from [flowsInHospital] when they have completed a successful transition.
|
||||
*/
|
||||
private val flowsInHospital = ConcurrentHashMap<StateMachineRunId, FlowFiber>()
|
||||
|
||||
private val mutex = ThreadBox(object {
|
||||
/**
|
||||
* Contains medical history of every flow (a patient) that has entered the hospital. A flow can leave the hospital,
|
||||
* but their medical history will be retained.
|
||||
*
|
||||
* Flows should be removed from [flowPatients] when they have completed successfully. Upon successful completion,
|
||||
* the medical history of a flow is no longer relevant as that flow has been completely removed from the
|
||||
* statemachine.
|
||||
*/
|
||||
val flowPatients = HashMap<StateMachineRunId, FlowMedicalHistory>()
|
||||
val treatableSessionInits = HashMap<UUID, InternalSessionInitRecord>()
|
||||
val recordsPublisher = PublishSubject.create<MedicalRecord>()
|
||||
})
|
||||
private val secureRandom = newSecureRandom()
|
||||
|
||||
private val delayedDischargeTimer = Timer("FlowHospitalDelayedDischargeTimer", true)
|
||||
/**
|
||||
* The node was unable to initiate the [InitialSessionMessage] from [sender].
|
||||
*/
|
||||
fun sessionInitErrored(sessionMessage: InitialSessionMessage, sender: Party, event: ExternalEvent.ExternalMessageEvent, error: Throwable) {
|
||||
val time = Instant.now()
|
||||
val time = clock.instant()
|
||||
val id = UUID.randomUUID()
|
||||
val outcome = if (error is SessionRejectException.UnknownClass) {
|
||||
// We probably don't have the CorDapp installed so let's pause the message in the hopes that the CorDapp is
|
||||
@ -104,11 +159,48 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
}
|
||||
|
||||
/**
|
||||
* The flow running in [flowFiber] has errored.
|
||||
* Forces the flow to be kept in for overnight observation by the hospital. A flow must already exist inside the hospital
|
||||
* and have existing medical records for it to be moved to overnight observation. If it does not meet these criteria then
|
||||
* an [IllegalArgumentException] will be thrown.
|
||||
*
|
||||
* @param id The [StateMachineRunId] of the flow that you are trying to force into observation
|
||||
* @param errors The errors to include in the new medical record
|
||||
*/
|
||||
fun flowErrored(flowFiber: FlowFiber, currentState: StateMachineState, errors: List<Throwable>) {
|
||||
val time = Instant.now()
|
||||
fun forceIntoOvernightObservation(id: StateMachineRunId, errors: List<Throwable>) {
|
||||
mutex.locked {
|
||||
// If a flow does not meet the criteria below, then it has moved into an invalid state or the function is being
|
||||
// called from an incorrect location. The assertions below should error out the flow if they are not true.
|
||||
requireNotNull(flowsInHospital[id]) { "Flow must already be in the hospital before forcing into overnight observation" }
|
||||
val history = requireNotNull(flowPatients[id]) { "Flow must already have history before forcing into overnight observation" }
|
||||
// Use the last staff member that last discharged the flow as the current staff member
|
||||
val record = history.records.last().copy(
|
||||
time = clock.instant(),
|
||||
errors = errors,
|
||||
outcome = Outcome.OVERNIGHT_OBSERVATION
|
||||
)
|
||||
onFlowKeptForOvernightObservation.forEach { hook -> hook.invoke(id, record.by.map { it.toString() }) }
|
||||
history.records += record
|
||||
recordsPublisher.onNext(record)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Request treatment for the [flowFiber]. A flow can only be added to the hospital if they are not already being
|
||||
* treated.
|
||||
*/
|
||||
fun requestTreatment(flowFiber: FlowFiber, currentState: StateMachineState, errors: List<Throwable>) {
|
||||
// Only treat flows that are not already in the hospital
|
||||
if (!currentState.isRemoved && flowsInHospital.putIfAbsent(flowFiber.id, flowFiber) == null) {
|
||||
admit(flowFiber, currentState, errors)
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("ComplexMethod")
|
||||
private fun admit(flowFiber: FlowFiber, currentState: StateMachineState, errors: List<Throwable>) {
|
||||
val time = clock.instant()
|
||||
log.info("Flow ${flowFiber.id} admitted to hospital in state $currentState")
|
||||
onFlowAdmitted.forEach { it.invoke(flowFiber.id) }
|
||||
|
||||
val (event, backOffForChronicCondition) = mutex.locked {
|
||||
val medicalHistory = flowPatients.computeIfAbsent(flowFiber.id) { FlowMedicalHistory() }
|
||||
@ -119,15 +211,17 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
Diagnosis.DISCHARGE -> {
|
||||
val backOff = calculateBackOffForChronicCondition(report, medicalHistory, currentState)
|
||||
log.info("Flow error discharged from hospital (delay ${backOff.seconds}s) by ${report.by} (error was ${report.error.message})")
|
||||
onFlowDischarged.forEach { hook -> hook.invoke(flowFiber.id, report.by.map{it.toString()}) }
|
||||
Triple(Outcome.DISCHARGE, Event.RetryFlowFromSafePoint, backOff)
|
||||
}
|
||||
Diagnosis.OVERNIGHT_OBSERVATION -> {
|
||||
log.info("Flow error kept for overnight observation by ${report.by} (error was ${report.error.message})")
|
||||
// We don't schedule a next event for the flow - it will automatically retry from its checkpoint on node restart
|
||||
onFlowKeptForOvernightObservation.forEach { hook -> hook.invoke(flowFiber.id, report.by.map{it.toString()}) }
|
||||
Triple(Outcome.OVERNIGHT_OBSERVATION, null, 0.seconds)
|
||||
}
|
||||
Diagnosis.NOT_MY_SPECIALTY -> {
|
||||
// None of the staff care for these errors so we let them propagate
|
||||
Diagnosis.NOT_MY_SPECIALTY, Diagnosis.TERMINAL -> {
|
||||
// None of the staff care for these errors, or someone decided it is a terminal condition, so we let them propagate
|
||||
log.info("Flow error allowed to propagate", report.error)
|
||||
Triple(Outcome.UNTREATABLE, Event.StartErrorPropagation, 0.seconds)
|
||||
}
|
||||
@ -143,10 +237,8 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
if (backOffForChronicCondition.isZero) {
|
||||
flowFiber.scheduleEvent(event)
|
||||
} else {
|
||||
delayedDischargeTimer.schedule(object : TimerTask() {
|
||||
override fun run() {
|
||||
hospitalJobTimer.schedule(timerTask {
|
||||
flowFiber.scheduleEvent(event)
|
||||
}
|
||||
}, backOffForChronicCondition.toMillis())
|
||||
}
|
||||
}
|
||||
@ -185,12 +277,19 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
private data class ConsultationReport(val error: Throwable, val diagnosis: Diagnosis, val by: List<Staff>)
|
||||
|
||||
/**
|
||||
* The flow has been removed from the state machine.
|
||||
* Remove the flow's medical history from the hospital.
|
||||
*/
|
||||
fun flowRemoved(flowId: StateMachineRunId) {
|
||||
fun removeMedicalHistory(flowId: StateMachineRunId) {
|
||||
mutex.locked { flowPatients.remove(flowId) }
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the flow from the hospital as it is not currently being treated.
|
||||
*/
|
||||
fun leave(id: StateMachineRunId) {
|
||||
flowsInHospital.remove(id)
|
||||
}
|
||||
|
||||
// TODO MedicalRecord subtypes can expose the Staff class, something which we probably don't want when wiring this method to RPC
|
||||
/** Returns a stream of medical records as flows pass through the hospital. */
|
||||
fun track(): DataFeed<List<MedicalRecord>, MedicalRecord> {
|
||||
@ -251,6 +350,8 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
|
||||
/** The order of the enum values are in priority order. */
|
||||
enum class Diagnosis {
|
||||
/** The flow should not see other staff members */
|
||||
TERMINAL,
|
||||
/** Retry from last safe point. */
|
||||
DISCHARGE,
|
||||
/** Park and await intervention. */
|
||||
@ -259,7 +360,6 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
NOT_MY_SPECIALTY
|
||||
}
|
||||
|
||||
|
||||
interface Staff {
|
||||
fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: FlowMedicalHistory): Diagnosis
|
||||
}
|
||||
@ -288,7 +388,8 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
*/
|
||||
object DuplicateInsertSpecialist : Staff {
|
||||
override fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: FlowMedicalHistory): Diagnosis {
|
||||
return if (newError.mentionsThrowable(ConstraintViolationException::class.java) && history.notDischargedForTheSameThingMoreThan(3, this, currentState)) {
|
||||
return if (newError.mentionsThrowable(ConstraintViolationException::class.java)
|
||||
&& history.notDischargedForTheSameThingMoreThan(2, this, currentState)) {
|
||||
Diagnosis.DISCHARGE
|
||||
} else {
|
||||
Diagnosis.NOT_MY_SPECIALTY
|
||||
@ -358,17 +459,21 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
val strippedStacktrace = error.stackTrace
|
||||
.filterNot { it?.className?.contains("counter-flow exception from peer") ?: false }
|
||||
.filterNot { it?.className?.startsWith("net.corda.node.services.statemachine.") ?: false }
|
||||
return strippedStacktrace.isNotEmpty() &&
|
||||
strippedStacktrace.first().className.startsWith(ReceiveTransactionFlow::class.qualifiedName!! )
|
||||
return strippedStacktrace.isNotEmpty()
|
||||
&& strippedStacktrace.first().className.startsWith(ReceiveTransactionFlow::class.qualifiedName!!)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* [SQLTransientConnectionException] detection that arise from failing to connect the underlying database/datasource
|
||||
*/
|
||||
object TransientConnectionCardiologist : Staff {
|
||||
override fun consult(flowFiber: FlowFiber, currentState: StateMachineState, newError: Throwable, history: FlowMedicalHistory): Diagnosis {
|
||||
override fun consult(
|
||||
flowFiber: FlowFiber,
|
||||
currentState: StateMachineState,
|
||||
newError: Throwable,
|
||||
history: FlowMedicalHistory
|
||||
): Diagnosis {
|
||||
return if (mentionsTransientConnection(newError)) {
|
||||
if (history.notDischargedForTheSameThingMoreThan(2, this, currentState)) {
|
||||
Diagnosis.DISCHARGE
|
||||
@ -384,6 +489,72 @@ class StaffedFlowHospital(private val flowMessaging: FlowMessaging, private val
|
||||
return exception.mentionsThrowable(SQLTransientConnectionException::class.java, "connection is not available")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Hospitalise any database (SQL and Persistence) exception that wasn't handled otherwise, unless on the configurable whitelist
|
||||
* Note that retry decisions from other specialists will not be affected as retries take precedence over hospitalisation.
|
||||
*/
|
||||
object DatabaseEndocrinologist : Staff {
|
||||
override fun consult(
|
||||
flowFiber: FlowFiber,
|
||||
currentState: StateMachineState,
|
||||
newError: Throwable,
|
||||
history: FlowMedicalHistory
|
||||
): Diagnosis {
|
||||
return if ((newError is SQLException || newError is PersistenceException) && !customConditions.any { it(newError) }) {
|
||||
Diagnosis.OVERNIGHT_OBSERVATION
|
||||
} else {
|
||||
Diagnosis.NOT_MY_SPECIALTY
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
val customConditions = mutableSetOf<(t: Throwable) -> Boolean>()
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles exceptions from internal state transitions that are not dealt with by the rest of the staff.
|
||||
*
|
||||
* [InterruptedException]s are diagnosed as [Diagnosis.TERMINAL] so they are never retried
|
||||
* (can occur when a flow is killed - `killFlow`).
|
||||
* [AsyncOperationTransitionException]s ares ignored as the error is likely to have originated in user async code rather than inside
|
||||
* of a transition.
|
||||
* All other exceptions are retried a maximum of 3 times before being kept in for observation.
|
||||
*/
|
||||
object TransitionErrorGeneralPractitioner : Staff {
|
||||
override fun consult(
|
||||
flowFiber: FlowFiber,
|
||||
currentState: StateMachineState,
|
||||
newError: Throwable,
|
||||
history: FlowMedicalHistory
|
||||
): Diagnosis {
|
||||
return if (newError.mentionsThrowable(StateTransitionException::class.java)) {
|
||||
when {
|
||||
newError.mentionsThrowable(InterruptedException::class.java) -> Diagnosis.TERMINAL
|
||||
newError.mentionsThrowable(AsyncOperationTransitionException::class.java) -> Diagnosis.NOT_MY_SPECIALTY
|
||||
history.notDischargedForTheSameThingMoreThan(2, this, currentState) -> Diagnosis.DISCHARGE
|
||||
else -> Diagnosis.OVERNIGHT_OBSERVATION
|
||||
}
|
||||
} else {
|
||||
Diagnosis.NOT_MY_SPECIALTY
|
||||
}.also { logDiagnosis(it, newError, flowFiber, history) }
|
||||
}
|
||||
|
||||
private fun logDiagnosis(diagnosis: Diagnosis, newError: Throwable, flowFiber: FlowFiber, history: FlowMedicalHistory) {
|
||||
if (diagnosis != Diagnosis.NOT_MY_SPECIALTY) {
|
||||
log.debug {
|
||||
"""
|
||||
Flow ${flowFiber.id} given $diagnosis diagnosis due to a transition error
|
||||
- Exception: ${newError.message}
|
||||
- History: $history
|
||||
${(newError as? StateTransitionException)?.transitionAction?.let { "- Action: $it" }}
|
||||
${(newError as? StateTransitionException)?.transitionEvent?.let { "- Event: $it" }}
|
||||
""".trimIndent()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private fun <T : Throwable> Throwable?.mentionsThrowable(exceptionType: Class<T>, errorMessage: String? = null): Boolean {
|
||||
@ -397,3 +568,4 @@ private fun <T : Throwable> Throwable?.mentionsThrowable(exceptionType: Class<T>
|
||||
}
|
||||
return (exceptionType.isAssignableFrom(this::class.java) && containsMessage) || cause.mentionsThrowable(exceptionType, errorMessage)
|
||||
}
|
||||
|
||||
|
@ -114,6 +114,7 @@ interface ExternalEvent {
|
||||
* An external P2P message event.
|
||||
*/
|
||||
interface ExternalMessageEvent : ExternalEvent {
|
||||
val flowId: StateMachineRunId
|
||||
val receivedMessage: ReceivedMessage
|
||||
}
|
||||
|
||||
@ -121,6 +122,7 @@ interface ExternalEvent {
|
||||
* An external request to start a flow, from the scheduler for example.
|
||||
*/
|
||||
interface ExternalStartFlowEvent<T> : ExternalEvent {
|
||||
val flowId: StateMachineRunId
|
||||
val flowLogic: FlowLogic<T>
|
||||
val context: InvocationContext
|
||||
|
||||
|
@ -0,0 +1,18 @@
|
||||
package net.corda.node.services.statemachine
|
||||
|
||||
import net.corda.core.CordaException
|
||||
import net.corda.core.serialization.ConstructorForDeserialization
|
||||
|
||||
// CORDA-3353 - These exceptions should not be propagated up to rpc as they suppress the real exceptions
|
||||
|
||||
class StateTransitionException(
|
||||
val transitionAction: Action?,
|
||||
val transitionEvent: Event?,
|
||||
val exception: Exception
|
||||
) : CordaException(exception.message, exception) {
|
||||
|
||||
@ConstructorForDeserialization
|
||||
constructor(exception: Exception): this(null, null, exception)
|
||||
}
|
||||
|
||||
class AsyncOperationTransitionException(exception: Exception) : CordaException(exception.message, exception)
|
@ -9,6 +9,7 @@ import net.corda.nodeapi.internal.persistence.CordaPersistence
|
||||
import net.corda.nodeapi.internal.persistence.contextDatabase
|
||||
import net.corda.nodeapi.internal.persistence.contextTransactionOrNull
|
||||
import java.security.SecureRandom
|
||||
import javax.persistence.OptimisticLockException
|
||||
|
||||
/**
|
||||
* This [TransitionExecutor] runs the transition actions using the passed in [ActionExecutor] and manually dirties the
|
||||
@ -27,6 +28,7 @@ class TransitionExecutorImpl(
|
||||
val log = contextLogger()
|
||||
}
|
||||
|
||||
@Suppress("NestedBlockDepth", "ReturnCount")
|
||||
@Suspendable
|
||||
override fun executeTransition(
|
||||
fiber: FlowFiber,
|
||||
@ -47,15 +49,24 @@ class TransitionExecutorImpl(
|
||||
// Instead we just keep around the old error state and wait for a new schedule, perhaps
|
||||
// triggered from a flow hospital
|
||||
log.warn("Error while executing $action during transition to errored state, aborting transition", exception)
|
||||
// CORDA-3354 - Go to the hospital with the new error that has occurred
|
||||
// while already in a error state (as this error could be for a different reason)
|
||||
return Pair(FlowContinuation.Abort, previousState.copy(isFlowResumed = false))
|
||||
} else {
|
||||
// Otherwise error the state manually keeping the old flow state and schedule a DoRemainingWork
|
||||
// to trigger error propagation
|
||||
log.info("Error while executing $action, erroring state", exception)
|
||||
log.info("Error while executing $action, with event $event, erroring state", exception)
|
||||
if(previousState.isRemoved && exception is OptimisticLockException) {
|
||||
log.debug("Flow has been killed and the following error is likely due to the flow's checkpoint being deleted. " +
|
||||
"Occurred while executing $action, with event $event", exception)
|
||||
} else {
|
||||
log.info("Error while executing $action, with event $event, erroring state", exception)
|
||||
}
|
||||
val newState = previousState.copy(
|
||||
checkpoint = previousState.checkpoint.copy(
|
||||
errorState = previousState.checkpoint.errorState.addErrors(
|
||||
listOf(FlowError(secureRandom.nextLong(), exception))
|
||||
// Wrap the exception with [StateTransitionException] for handling by the flow hospital
|
||||
listOf(FlowError(secureRandom.nextLong(), StateTransitionException(action, event, exception)))
|
||||
)
|
||||
),
|
||||
isFlowResumed = false
|
||||
|
@ -19,6 +19,7 @@ import java.util.concurrent.ConcurrentHashMap
|
||||
* This interceptor records a trace of all of the flows' states and transitions. If the flow dirties it dumps the trace
|
||||
* transition to the logger.
|
||||
*/
|
||||
@Suppress("MaxLineLength") // detekt confusing the whole if statement for a line
|
||||
class DumpHistoryOnErrorInterceptor(val delegate: TransitionExecutor) : TransitionExecutor {
|
||||
companion object {
|
||||
private val log = contextLogger()
|
||||
@ -34,8 +35,12 @@ class DumpHistoryOnErrorInterceptor(val delegate: TransitionExecutor) : Transiti
|
||||
transition: TransitionResult,
|
||||
actionExecutor: ActionExecutor
|
||||
): Pair<FlowContinuation, StateMachineState> {
|
||||
val (continuation, nextState) = delegate.executeTransition(fiber, previousState, event, transition, actionExecutor)
|
||||
val transitionRecord = TransitionDiagnosticRecord(Instant.now(), fiber.id, previousState, nextState, event, transition, continuation)
|
||||
val (continuation, nextState)
|
||||
= delegate.executeTransition(fiber, previousState, event, transition, actionExecutor)
|
||||
|
||||
if (!previousState.isRemoved) {
|
||||
val transitionRecord =
|
||||
TransitionDiagnosticRecord(Instant.now(), fiber.id, previousState, nextState, event, transition, continuation)
|
||||
val record = records.compute(fiber.id) { _, record ->
|
||||
(record ?: ArrayList()).apply { add(transitionRecord) }
|
||||
}
|
||||
@ -48,6 +53,7 @@ class DumpHistoryOnErrorInterceptor(val delegate: TransitionExecutor) : Transiti
|
||||
log.warn("Flow ${fiber.id} error", error.exception)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (nextState.isRemoved) {
|
||||
records.remove(fiber.id)
|
||||
|
@ -11,7 +11,6 @@ import net.corda.node.services.statemachine.StateMachineState
|
||||
import net.corda.node.services.statemachine.TransitionExecutor
|
||||
import net.corda.node.services.statemachine.transitions.FlowContinuation
|
||||
import net.corda.node.services.statemachine.transitions.TransitionResult
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
/**
|
||||
* This interceptor notifies the passed in [flowHospital] in case a flow went through a clean->errored or a errored->clean
|
||||
@ -27,12 +26,10 @@ class HospitalisingInterceptor(
|
||||
}
|
||||
|
||||
private fun removeFlow(id: StateMachineRunId) {
|
||||
hospitalisedFlows.remove(id)
|
||||
flowHospital.flowRemoved(id)
|
||||
flowHospital.leave(id)
|
||||
flowHospital.removeMedicalHistory(id)
|
||||
}
|
||||
|
||||
private val hospitalisedFlows = ConcurrentHashMap<StateMachineRunId, FlowFiber>()
|
||||
|
||||
@Suspendable
|
||||
override fun executeTransition(
|
||||
fiber: FlowFiber,
|
||||
@ -41,18 +38,18 @@ class HospitalisingInterceptor(
|
||||
transition: TransitionResult,
|
||||
actionExecutor: ActionExecutor
|
||||
): Pair<FlowContinuation, StateMachineState> {
|
||||
|
||||
// If the fiber's previous state was clean then remove it from the hospital
|
||||
// This is important for retrying a flow that has errored during a state machine transition
|
||||
if (previousState.checkpoint.errorState is ErrorState.Clean) {
|
||||
flowHospital.leave(fiber.id)
|
||||
}
|
||||
|
||||
val (continuation, nextState) = delegate.executeTransition(fiber, previousState, event, transition, actionExecutor)
|
||||
|
||||
when (nextState.checkpoint.errorState) {
|
||||
is ErrorState.Clean -> {
|
||||
hospitalisedFlows.remove(fiber.id)
|
||||
}
|
||||
is ErrorState.Errored -> {
|
||||
if (nextState.checkpoint.errorState is ErrorState.Errored && previousState.checkpoint.errorState is ErrorState.Clean) {
|
||||
val exceptionsToHandle = nextState.checkpoint.errorState.errors.map { it.exception }
|
||||
if (hospitalisedFlows.putIfAbsent(fiber.id, fiber) == null) {
|
||||
flowHospital.flowErrored(fiber, previousState, exceptionsToHandle)
|
||||
}
|
||||
}
|
||||
flowHospital.requestTreatment(fiber, previousState, exceptionsToHandle)
|
||||
}
|
||||
if (nextState.isRemoved) {
|
||||
removeFlow(fiber.id)
|
||||
|
@ -72,6 +72,7 @@ class PersistentUniquenessProvider(val clock: Clock, val database: CordaPersiste
|
||||
var requestDate: Instant
|
||||
)
|
||||
|
||||
@Suppress("MagicNumber") // database column length
|
||||
@Entity
|
||||
@javax.persistence.Table(name = "${NODE_DATABASE_PREFIX}notary_committed_txs")
|
||||
class CommittedTransaction(
|
||||
|
@ -12,6 +12,7 @@ import javax.persistence.Entity
|
||||
import javax.persistence.Id
|
||||
import javax.persistence.Table
|
||||
|
||||
@Suppress("MagicNumber") // database column length
|
||||
class ContractUpgradeServiceImpl(cacheFactory: NamedCacheFactory) : ContractUpgradeService, SingletonSerializeAsToken() {
|
||||
|
||||
@Entity
|
||||
|
@ -23,6 +23,7 @@ import net.corda.node.services.statemachine.FlowStateMachineImpl
|
||||
import net.corda.nodeapi.internal.persistence.*
|
||||
import org.hibernate.Session
|
||||
import rx.Observable
|
||||
import rx.exceptions.OnErrorNotImplementedException
|
||||
import rx.subjects.PublishSubject
|
||||
import java.security.PublicKey
|
||||
import java.time.Clock
|
||||
@ -390,7 +391,15 @@ class NodeVaultService(
|
||||
}
|
||||
}
|
||||
persistentStateService.persist(vaultUpdate.produced + vaultUpdate.references)
|
||||
try {
|
||||
updatesPublisher.onNext(vaultUpdate)
|
||||
} catch (e: OnErrorNotImplementedException) {
|
||||
log.warn("Caught an Rx.OnErrorNotImplementedException " +
|
||||
"- caused by an exception in an RX observer that was unhandled " +
|
||||
"- the observer has been unsubscribed! The underlying exception will be rethrown.", e)
|
||||
// if the observer code threw, unwrap their exception from the RX wrapper
|
||||
throw e.cause ?: e
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ object VaultSchema
|
||||
/**
|
||||
* First version of the Vault ORM schema
|
||||
*/
|
||||
@Suppress("MagicNumber") // database column length
|
||||
@CordaSerializable
|
||||
object VaultSchemaV1 : MappedSchema(
|
||||
schemaFamily = VaultSchema.javaClass,
|
||||
|
@ -127,6 +127,7 @@ class BFTSmartNotaryService(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("MagicNumber") // database column length
|
||||
@Entity
|
||||
@Table(name = "${NODE_DATABASE_PREFIX}bft_committed_txs")
|
||||
class CommittedTransaction(
|
||||
|
@ -104,6 +104,7 @@ class RaftUniquenessProvider(
|
||||
var index: Long = 0
|
||||
)
|
||||
|
||||
@Suppress("MagicNumber") // database column length
|
||||
@Entity
|
||||
@Table(name = "${NODE_DATABASE_PREFIX}raft_committed_txs")
|
||||
class CommittedTransaction(
|
||||
|
@ -11,4 +11,5 @@
|
||||
<include file="migration/vault-schema.changelog-v6.xml"/>
|
||||
<include file="migration/vault-schema.changelog-v7.xml"/>
|
||||
<include file="migration/vault-schema.changelog-v8.xml"/>
|
||||
<include file="migration/vault-schema.changelog-v11.xml"/>
|
||||
</databaseChangeLog>
|
||||
|
@ -0,0 +1,11 @@
|
||||
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd">
|
||||
|
||||
<changeSet author="R3.Corda" id="expand_constraint_data_size">
|
||||
<modifyDataType tableName="vault_states"
|
||||
columnName="constraint_data"
|
||||
newDataType="varbinary(20000)"/>
|
||||
</changeSet>
|
||||
</databaseChangeLog>
|
@ -37,7 +37,8 @@ import net.corda.testing.internal.LogHelper
|
||||
import net.corda.testing.node.InMemoryMessagingNetwork.MessageTransfer
|
||||
import net.corda.testing.node.InMemoryMessagingNetwork.ServicePeerAllocationStrategy.RoundRobin
|
||||
import net.corda.testing.node.internal.*
|
||||
import org.assertj.core.api.Assertions.*
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.assertj.core.api.Assertions.assertThatIllegalArgumentException
|
||||
import org.assertj.core.api.AssertionsForClassTypes.assertThatExceptionOfType
|
||||
import org.assertj.core.api.Condition
|
||||
import org.junit.After
|
||||
@ -115,18 +116,16 @@ class FlowFrameworkTests {
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `exception while fiber suspended`() {
|
||||
fun `exception while fiber suspended is retried and completes successfully`() {
|
||||
bobNode.registerCordappFlowFactory(ReceiveFlow::class) { InitiatedSendFlow("Hello", it) }
|
||||
val flow = ReceiveFlow(bob)
|
||||
val fiber = aliceNode.services.startFlow(flow) as FlowStateMachineImpl
|
||||
// Before the flow runs change the suspend action to throw an exception
|
||||
val exceptionDuringSuspend = Exception("Thrown during suspend")
|
||||
val throwingActionExecutor = SuspendThrowingActionExecutor(exceptionDuringSuspend, fiber.transientValues!!.value.actionExecutor)
|
||||
val throwingActionExecutor = SuspendThrowingActionExecutor(Exception("Thrown during suspend"),
|
||||
fiber.transientValues!!.value.actionExecutor)
|
||||
fiber.transientValues = TransientReference(fiber.transientValues!!.value.copy(actionExecutor = throwingActionExecutor))
|
||||
mockNet.runNetwork()
|
||||
assertThatThrownBy {
|
||||
fiber.resultFuture.getOrThrow()
|
||||
}.isSameAs(exceptionDuringSuspend)
|
||||
assertThat(aliceNode.smm.allStateMachines).isEmpty()
|
||||
// Make sure the fiber does actually terminate
|
||||
assertThat(fiber.state).isEqualTo(Strand.State.WAITING)
|
||||
|
@ -2,14 +2,18 @@ package net.corda.node.services.statemachine
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import net.corda.core.concurrent.CordaFuture
|
||||
import net.corda.core.flows.*
|
||||
import net.corda.core.flows.Destination
|
||||
import net.corda.core.flows.FlowInfo
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.FlowSession
|
||||
import net.corda.core.flows.InitiatedBy
|
||||
import net.corda.core.flows.InitiatingFlow
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.internal.FlowStateMachine
|
||||
import net.corda.core.internal.concurrent.flatMap
|
||||
import net.corda.core.messaging.MessageRecipients
|
||||
import net.corda.core.utilities.UntrustworthyData
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.unwrap
|
||||
import net.corda.node.services.FinalityHandler
|
||||
import net.corda.node.services.messaging.Message
|
||||
@ -17,11 +21,13 @@ import net.corda.node.services.persistence.DBTransactionStorage
|
||||
import net.corda.nodeapi.internal.persistence.contextTransaction
|
||||
import net.corda.testing.common.internal.eventually
|
||||
import net.corda.testing.core.TestIdentity
|
||||
import net.corda.testing.node.internal.*
|
||||
import net.corda.testing.node.internal.InternalMockNetwork
|
||||
import net.corda.testing.node.internal.MessagingServiceSpy
|
||||
import net.corda.testing.node.internal.TestStartedNode
|
||||
import net.corda.testing.node.internal.enclosedCordapp
|
||||
import net.corda.testing.node.internal.newContext
|
||||
import org.assertj.core.api.Assertions.assertThat
|
||||
import org.assertj.core.api.Assertions.assertThatThrownBy
|
||||
import org.h2.util.Utils
|
||||
import org.hibernate.exception.ConstraintViolationException
|
||||
import org.junit.After
|
||||
import org.junit.Assert.assertTrue
|
||||
import org.junit.Before
|
||||
@ -49,6 +55,8 @@ class RetryFlowMockTest {
|
||||
SendAndRetryFlow.count = 0
|
||||
RetryInsertFlow.count = 0
|
||||
KeepSendingFlow.count.set(0)
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.add { t -> t is LimitedRetryCausingError }
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.add { t -> t is RetryCausingError }
|
||||
}
|
||||
|
||||
private fun <T> TestStartedNode.startFlow(logic: FlowLogic<T>): CordaFuture<T> {
|
||||
@ -58,6 +66,7 @@ class RetryFlowMockTest {
|
||||
@After
|
||||
fun cleanUp() {
|
||||
mockNet.stopNodes()
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -66,14 +75,6 @@ class RetryFlowMockTest {
|
||||
assertEquals(2, RetryFlow.count)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Retry forever`() {
|
||||
assertThatThrownBy {
|
||||
nodeA.startFlow(RetryFlow(Int.MAX_VALUE)).getOrThrow()
|
||||
}.isInstanceOf(LimitedRetryCausingError::class.java)
|
||||
assertEquals(5, RetryFlow.count)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Retry does not set senderUUID`() {
|
||||
val messagesSent = Collections.synchronizedList(mutableListOf<Message>())
|
||||
@ -184,8 +185,7 @@ class RetryFlowMockTest {
|
||||
assertThat(nodeA.smm.flowHospital.track().snapshot).isEmpty()
|
||||
}
|
||||
|
||||
|
||||
class LimitedRetryCausingError : ConstraintViolationException("Test message", SQLException(), "Test constraint")
|
||||
class LimitedRetryCausingError : IllegalStateException("I am going to live forever")
|
||||
|
||||
class RetryCausingError : SQLException("deadlock")
|
||||
|
||||
|
@ -6,6 +6,7 @@ import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.node.services.queryBy
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
import net.corda.node.services.statemachine.StaffedFlowHospital
|
||||
import net.corda.testing.core.DummyCommandData
|
||||
import net.corda.testing.core.singleIdentity
|
||||
import net.corda.testing.internal.vault.DUMMY_DEAL_PROGRAM_ID
|
||||
@ -16,12 +17,13 @@ import net.corda.testing.node.MockNetwork
|
||||
import net.corda.testing.node.MockNetworkNotarySpec
|
||||
import net.corda.testing.node.MockNodeParameters
|
||||
import net.corda.testing.node.StartedMockNode
|
||||
import org.assertj.core.api.Assertions
|
||||
import org.junit.After
|
||||
import org.junit.Before
|
||||
import org.junit.Test
|
||||
import java.util.concurrent.ExecutionException
|
||||
import java.util.concurrent.CountDownLatch
|
||||
import java.util.concurrent.TimeUnit
|
||||
import kotlin.test.assertEquals
|
||||
import kotlin.test.assertTrue
|
||||
|
||||
class VaultFlowTest {
|
||||
private lateinit var mockNetwork: MockNetwork
|
||||
@ -48,14 +50,19 @@ class VaultFlowTest {
|
||||
@After
|
||||
fun tearDown() {
|
||||
mockNetwork.stopNodes()
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.clear()
|
||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.clear()
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `Unique column constraint failing causes states to not persist to vaults`() {
|
||||
StaffedFlowHospital.DatabaseEndocrinologist.customConditions.add( { t: Throwable -> t is javax.persistence.PersistenceException })
|
||||
partyA.startFlow(Initiator(listOf(partyA.info.singleIdentity(), partyB.info.singleIdentity()))).get()
|
||||
Assertions.assertThatExceptionOfType(ExecutionException::class.java).isThrownBy {
|
||||
partyA.startFlow(Initiator(listOf(partyA.info.singleIdentity(), partyB.info.singleIdentity()))).get()
|
||||
}
|
||||
val hospitalLatch = CountDownLatch(1)
|
||||
StaffedFlowHospital.onFlowKeptForOvernightObservation.add { _, _ -> hospitalLatch.countDown() }
|
||||
partyA.startFlow(Initiator(listOf(partyA.info.singleIdentity(), partyB.info.singleIdentity())))
|
||||
assertTrue(hospitalLatch.await(10, TimeUnit.SECONDS), "Flow not hospitalised")
|
||||
|
||||
assertEquals(1, partyA.transaction {
|
||||
partyA.services.vaultService.queryBy<UniqueDummyLinearContract.State>().states.size
|
||||
})
|
||||
|
@ -5,7 +5,7 @@ Please refer to `README.md` in the individual project folders. There are the fo
|
||||
* **attachment-demo** A simple demonstration of sending a transaction with an attachment from one node to another, and then accessing the attachment on the remote node.
|
||||
* **irs-demo** A demo showing two nodes agreeing to an interest rate swap and doing fixings using an oracle.
|
||||
* **trader-demo** A simple driver for exercising the two party trading flow. In this scenario, a buyer wants to purchase some commercial paper by swapping his cash for commercial paper. The seller learns that the buyer exists, and sends them a message to kick off the trade. The seller, having obtained his CP, then quits and the buyer goes back to waiting. The buyer will sell as much CP as he can! **We recommend starting with this demo.**
|
||||
* **Network-visualiser** A tool that uses a simulation to visualise the interaction and messages between nodes on the Corda network. Currently only works for the IRS demo.
|
||||
* **simm-valuation-demo** A demo showing two nodes reaching agreement on the valuation of a derivatives portfolio.
|
||||
* **notary-demo** A simple demonstration of a node getting multiple transactions notarised by a single or distributed (Raft or BFT SMaRt) notary.
|
||||
* **bank-of-corda-demo** A demo showing a node acting as an issuer of fungible assets (initially Cash)
|
||||
* **network-verifier** A very simple CorDapp that can be used to test that communication over a Corda network works.
|
||||
|
13
samples/network-verifier/README.md
Normal file
13
samples/network-verifier/README.md
Normal file
@ -0,0 +1,13 @@
|
||||
Network verifier
|
||||
----------------
|
||||
|
||||
Simple CorDapp that can be used to verify the setup of a Corda network.
|
||||
It contacts every other network participant and receives a reply from them.
|
||||
It also creates a transaction and finalizes it.
|
||||
|
||||
This makes sure that all basic Corda functionality works.
|
||||
|
||||
*Usage:*
|
||||
|
||||
- From the rpc just run the ``TestCommsFlowInitiator`` flow and inspect the result. There should be a "Hello" message from every ohter participant.
|
||||
|
@ -81,6 +81,8 @@ include 'serialization'
|
||||
include 'serialization-djvm'
|
||||
include 'serialization-djvm:deserializers'
|
||||
include 'serialization-tests'
|
||||
include 'testing:cordapps:dbfailure:dbfcontracts'
|
||||
include 'testing:cordapps:dbfailure:dbfworkflows'
|
||||
|
||||
// Common libraries - start
|
||||
include 'common-validation'
|
||||
|
18
testing/cordapps/dbfailure/dbfcontracts/build.gradle
Normal file
18
testing/cordapps/dbfailure/dbfcontracts/build.gradle
Normal file
@ -0,0 +1,18 @@
|
||||
apply plugin: 'kotlin'
|
||||
//apply plugin: 'net.corda.plugins.cordapp'
|
||||
//apply plugin: 'net.corda.plugins.quasar-utils'
|
||||
|
||||
repositories {
|
||||
mavenLocal()
|
||||
mavenCentral()
|
||||
maven { url "$artifactory_contextUrl/corda-dependencies" }
|
||||
maven { url "$artifactory_contextUrl/corda" }
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile project(":core")
|
||||
}
|
||||
|
||||
jar{
|
||||
baseName "testing-dbfailure-contracts"
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
package com.r3.dbfailure.contracts
|
||||
|
||||
import com.r3.dbfailure.schemas.DbFailureSchemaV1
|
||||
import net.corda.core.contracts.CommandData
|
||||
import net.corda.core.contracts.Contract
|
||||
import net.corda.core.contracts.LinearState
|
||||
import net.corda.core.contracts.UniqueIdentifier
|
||||
import net.corda.core.identity.AbstractParty
|
||||
import net.corda.core.identity.Party
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.schemas.PersistentState
|
||||
import net.corda.core.schemas.QueryableState
|
||||
import net.corda.core.transactions.LedgerTransaction
|
||||
import java.lang.IllegalArgumentException
|
||||
|
||||
class DbFailureContract : Contract {
|
||||
companion object {
|
||||
@JvmStatic
|
||||
val ID = "com.r3.dbfailure.contracts.DbFailureContract"
|
||||
}
|
||||
|
||||
class TestState(
|
||||
override val linearId: UniqueIdentifier,
|
||||
val particpant: Party,
|
||||
val randomValue: String?,
|
||||
val errorTarget: Int = 0
|
||||
) : LinearState, QueryableState {
|
||||
|
||||
override val participants: List<AbstractParty> = listOf(particpant)
|
||||
|
||||
override fun supportedSchemas(): Iterable<MappedSchema> = listOf(DbFailureSchemaV1)
|
||||
|
||||
override fun generateMappedObject(schema: MappedSchema): PersistentState {
|
||||
return if (schema is DbFailureSchemaV1){
|
||||
DbFailureSchemaV1.PersistentTestState( particpant.name.toString(), randomValue, errorTarget, linearId.id)
|
||||
}
|
||||
else {
|
||||
throw IllegalArgumentException("Unsupported schema $schema")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
override fun verify(tx: LedgerTransaction) {
|
||||
// no op - don't care for now
|
||||
}
|
||||
|
||||
interface Commands : CommandData{
|
||||
class Create: Commands
|
||||
}
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
package com.r3.dbfailure.schemas
|
||||
|
||||
import net.corda.core.schemas.MappedSchema
|
||||
import net.corda.core.schemas.PersistentState
|
||||
import java.util.*
|
||||
import javax.persistence.Column
|
||||
import javax.persistence.Entity
|
||||
import javax.persistence.Table
|
||||
|
||||
object DbFailureSchema
|
||||
|
||||
object DbFailureSchemaV1 : MappedSchema(
|
||||
schemaFamily = DbFailureSchema.javaClass,
|
||||
version = 1,
|
||||
mappedTypes = listOf(DbFailureSchemaV1.PersistentTestState::class.java)){
|
||||
override val migrationResource = "dbfailure.changelog-master"
|
||||
|
||||
@Entity
|
||||
@Table( name = "fail_test_states")
|
||||
class PersistentTestState(
|
||||
@Column( name = "participant")
|
||||
var participantName: String,
|
||||
|
||||
@Column( name = "random_value", nullable = false)
|
||||
var randomValue: String?,
|
||||
|
||||
@Column( name = "error_target")
|
||||
var errorTarget: Int,
|
||||
|
||||
@Column( name = "linear_id")
|
||||
var linearId: UUID
|
||||
) : PersistentState() {
|
||||
constructor() : this( "", "", 0, UUID.randomUUID())
|
||||
}
|
||||
}
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog" xmlns:ext="http://www.liquibase.org/xml/ns/dbchangelog-ext" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog-ext http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-ext.xsd http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd" >
|
||||
<changeSet author="R3.Corda" id="test dbfailure error target">
|
||||
<addColumn tableName="fail_test_states">
|
||||
<column name="error_target" type="INT"></column>
|
||||
</addColumn>
|
||||
</changeSet>
|
||||
</databaseChangeLog>
|
@ -0,0 +1,20 @@
|
||||
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog" xmlns:ext="http://www.liquibase.org/xml/ns/dbchangelog-ext" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog-ext http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-ext.xsd http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd" >
|
||||
<changeSet author="R3.Corda" id="test dbfailure init">
|
||||
<createTable tableName="fail_test_states">
|
||||
<column name="output_index" type="INT">
|
||||
<constraints nullable="false"/>
|
||||
</column>
|
||||
<column name="transaction_id" type="NVARCHAR(64)">
|
||||
<constraints nullable="false"/>
|
||||
</column>
|
||||
<column name="participant" type="NVARCHAR(255)">
|
||||
<constraints nullable="false"/>
|
||||
</column>
|
||||
<column name="random_value" type="NVARCHAR(255)">
|
||||
<constraints nullable="false"/>
|
||||
</column>
|
||||
<column name="linear_id" type="BINARY(255)"/>
|
||||
</createTable>
|
||||
</changeSet>
|
||||
</databaseChangeLog>
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.1" encoding="UTF-8" standalone="no"?>
|
||||
<databaseChangeLog xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.5.xsd">
|
||||
|
||||
<include file="migration/dbfailure.changelog-init.xml"/>
|
||||
<include file="migration/dbfailure.changelog-errortarget.xml"/>
|
||||
</databaseChangeLog>
|
12
testing/cordapps/dbfailure/dbfworkflows/build.gradle
Normal file
12
testing/cordapps/dbfailure/dbfworkflows/build.gradle
Normal file
@ -0,0 +1,12 @@
|
||||
apply plugin: 'kotlin'
|
||||
//apply plugin: 'net.corda.plugins.cordapp'
|
||||
//apply plugin: 'net.corda.plugins.quasar-utils'
|
||||
|
||||
dependencies {
|
||||
compile project(":core")
|
||||
compile project(":testing:cordapps:dbfailure:dbfcontracts")
|
||||
}
|
||||
|
||||
jar{
|
||||
baseName "testing-dbfailure-workflows"
|
||||
}
|
@ -0,0 +1,99 @@
|
||||
package com.r3.dbfailure.workflows
|
||||
|
||||
import co.paralleluniverse.fibers.Suspendable
|
||||
import com.r3.dbfailure.contracts.DbFailureContract
|
||||
import net.corda.core.contracts.Command
|
||||
import net.corda.core.contracts.UniqueIdentifier
|
||||
import net.corda.core.flows.FlowLogic
|
||||
import net.corda.core.flows.InitiatingFlow
|
||||
import net.corda.core.flows.StartableByRPC
|
||||
import net.corda.core.transactions.TransactionBuilder
|
||||
|
||||
// There is a bit of number fiddling in this class to encode/decode the error target instructions
|
||||
@Suppress("MagicNumber")
|
||||
object CreateStateFlow {
|
||||
|
||||
// Encoding of error targets
|
||||
// 1s are errors actions to be taken in the vault listener in the service
|
||||
// 10s are errors caused in the flow
|
||||
// 100s control exception handling in the flow
|
||||
// 1000s control exception handlling in the service/vault listener
|
||||
enum class ErrorTarget(val targetNumber: Int) {
|
||||
NoError(0),
|
||||
ServiceSqlSyntaxError(1),
|
||||
ServiceNullConstraintViolation(2),
|
||||
ServiceValidUpdate(3),
|
||||
ServiceReadState(4),
|
||||
ServiceCheckForState(5),
|
||||
ServiceThrowInvalidParameter(6),
|
||||
TxInvalidState(10),
|
||||
FlowSwallowErrors(100),
|
||||
ServiceSwallowErrors(1000)
|
||||
}
|
||||
|
||||
fun errorTargetsToNum(vararg targets: ErrorTarget): Int {
|
||||
return targets.map { it.targetNumber }.sum()
|
||||
}
|
||||
|
||||
private val targetMap = ErrorTarget.values().associateBy(ErrorTarget::targetNumber)
|
||||
|
||||
fun getServiceTarget(target: Int?): ErrorTarget {
|
||||
return target?.let { targetMap.getValue(it % 10) } ?: CreateStateFlow.ErrorTarget.NoError
|
||||
}
|
||||
|
||||
fun getServiceExceptionHandlingTarget(target: Int?): ErrorTarget {
|
||||
return target?.let { targetMap.getValue(((it / 1000) % 10) * 1000) } ?: CreateStateFlow.ErrorTarget.NoError
|
||||
}
|
||||
|
||||
fun getTxTarget(target: Int?): ErrorTarget {
|
||||
return target?.let { targetMap.getValue(((it / 10) % 10) * 10) } ?: CreateStateFlow.ErrorTarget.NoError
|
||||
}
|
||||
|
||||
fun getFlowTarget(target: Int?): ErrorTarget {
|
||||
return target?.let { targetMap.getValue(((it / 100) % 10) * 100) } ?: CreateStateFlow.ErrorTarget.NoError
|
||||
}
|
||||
|
||||
@InitiatingFlow
|
||||
@StartableByRPC
|
||||
class Initiator(private val randomValue: String, private val errorTarget: Int) : FlowLogic<UniqueIdentifier>() {
|
||||
|
||||
@Suspendable
|
||||
override fun call(): UniqueIdentifier {
|
||||
logger.info("Test flow: starting")
|
||||
val notary = serviceHub.networkMapCache.notaryIdentities[0]
|
||||
val txTarget = getTxTarget(errorTarget)
|
||||
logger.info("Test flow: The tx error target is $txTarget")
|
||||
val state = DbFailureContract.TestState(
|
||||
UniqueIdentifier(),
|
||||
ourIdentity,
|
||||
if (txTarget == CreateStateFlow.ErrorTarget.TxInvalidState) null else randomValue,
|
||||
errorTarget)
|
||||
val txCommand = Command(DbFailureContract.Commands.Create(), ourIdentity.owningKey)
|
||||
|
||||
logger.info("Test flow: tx builder")
|
||||
val txBuilder = TransactionBuilder(notary)
|
||||
.addOutputState(state)
|
||||
.addCommand(txCommand)
|
||||
|
||||
logger.info("Test flow: verify")
|
||||
txBuilder.verify(serviceHub)
|
||||
|
||||
val signedTx = serviceHub.signInitialTransaction(txBuilder)
|
||||
|
||||
@Suppress("TooGenericExceptionCaught") // this is fully intentional here, to allow twiddling with exceptions according to config
|
||||
try {
|
||||
logger.info("Test flow: recording transaction")
|
||||
serviceHub.recordTransactions(signedTx)
|
||||
} catch (t: Throwable) {
|
||||
if (getFlowTarget(errorTarget) == CreateStateFlow.ErrorTarget.FlowSwallowErrors) {
|
||||
logger.info("Test flow: Swallowing all exception! Muahahaha!", t)
|
||||
} else {
|
||||
logger.info("Test flow: caught exception - rethrowing")
|
||||
throw t
|
||||
}
|
||||
}
|
||||
logger.info("Test flow: returning")
|
||||
return state.linearId
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,98 @@
|
||||
package com.r3.dbfailure.workflows
|
||||
|
||||
import com.r3.dbfailure.contracts.DbFailureContract
|
||||
import net.corda.core.node.AppServiceHub
|
||||
import net.corda.core.node.services.CordaService
|
||||
import net.corda.core.serialization.SingletonSerializeAsToken
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import java.security.InvalidParameterException
|
||||
|
||||
@CordaService
|
||||
class DbListenerService(services: AppServiceHub) : SingletonSerializeAsToken() {
|
||||
|
||||
companion object {
|
||||
val log = contextLogger()
|
||||
}
|
||||
|
||||
init {
|
||||
services.vaultService.rawUpdates.subscribe { (_, produced) ->
|
||||
produced.forEach {
|
||||
val contractState = it.state.data as? DbFailureContract.TestState
|
||||
@Suppress("TooGenericExceptionCaught") // this is fully intentional here, to allow twiddling with exceptions
|
||||
try {
|
||||
when (CreateStateFlow.getServiceTarget(contractState?.errorTarget)) {
|
||||
CreateStateFlow.ErrorTarget.ServiceSqlSyntaxError -> {
|
||||
log.info("Fail with syntax error on raw statement")
|
||||
val session = services.jdbcSession()
|
||||
val statement = session.createStatement()
|
||||
statement.execute(
|
||||
"UPDATE FAIL_TEST_STATES \n" +
|
||||
"BLAAA RANDOM_VALUE = NULL\n" +
|
||||
"WHERE transaction_id = '${it.ref.txhash}' AND output_index = ${it.ref.index};"
|
||||
)
|
||||
log.info("SQL result: ${statement.resultSet}")
|
||||
}
|
||||
CreateStateFlow.ErrorTarget.ServiceNullConstraintViolation -> {
|
||||
log.info("Fail with null constraint violation on raw statement")
|
||||
val session = services.jdbcSession()
|
||||
val statement = session.createStatement()
|
||||
statement.execute(
|
||||
"UPDATE FAIL_TEST_STATES \n" +
|
||||
"SET RANDOM_VALUE = NULL\n" +
|
||||
"WHERE transaction_id = '${it.ref.txhash}' AND output_index = ${it.ref.index};"
|
||||
)
|
||||
log.info("SQL result: ${statement.resultSet}")
|
||||
}
|
||||
CreateStateFlow.ErrorTarget.ServiceValidUpdate -> {
|
||||
log.info("Update current statement")
|
||||
val session = services.jdbcSession()
|
||||
val statement = session.createStatement()
|
||||
statement.execute(
|
||||
"UPDATE FAIL_TEST_STATES \n" +
|
||||
"SET RANDOM_VALUE = '${contractState!!.randomValue} Updated by service'\n" +
|
||||
"WHERE transaction_id = '${it.ref.txhash}' AND output_index = ${it.ref.index};"
|
||||
)
|
||||
log.info("SQL result: ${statement.resultSet}")
|
||||
}
|
||||
CreateStateFlow.ErrorTarget.ServiceReadState -> {
|
||||
log.info("Read current state from db")
|
||||
val session = services.jdbcSession()
|
||||
val statement = session.createStatement()
|
||||
statement.execute(
|
||||
"SELECT * FROM FAIL_TEST_STATES \n" +
|
||||
"WHERE transaction_id = '${it.ref.txhash}' AND output_index = ${it.ref.index};"
|
||||
)
|
||||
log.info("SQL result: ${statement.resultSet}")
|
||||
}
|
||||
CreateStateFlow.ErrorTarget.ServiceCheckForState -> {
|
||||
log.info("Check for currently written state in the db")
|
||||
val session = services.jdbcSession()
|
||||
val statement = session.createStatement()
|
||||
val rs = statement.executeQuery(
|
||||
"SELECT COUNT(*) FROM FAIL_TEST_STATES \n" +
|
||||
"WHERE transaction_id = '${it.ref.txhash}' AND output_index = ${it.ref.index};"
|
||||
)
|
||||
val numOfRows = if (rs.next()) rs.getInt("COUNT(*)") else 0
|
||||
log.info("Found a state with tx:ind ${it.ref.txhash}:${it.ref.index} in " +
|
||||
"TEST_FAIL_STATES: ${if (numOfRows > 0) "Yes" else "No"}")
|
||||
}
|
||||
CreateStateFlow.ErrorTarget.ServiceThrowInvalidParameter -> {
|
||||
log.info("Throw InvalidParameterException")
|
||||
throw InvalidParameterException("Toys out of pram")
|
||||
}
|
||||
else -> {
|
||||
// do nothing, everything else must be handled elsewhere
|
||||
}
|
||||
}
|
||||
} catch (t: Throwable) {
|
||||
if (CreateStateFlow.getServiceExceptionHandlingTarget(contractState?.errorTarget)
|
||||
== CreateStateFlow.ErrorTarget.ServiceSwallowErrors) {
|
||||
log.warn("Service not letting errors escape", t)
|
||||
} else {
|
||||
throw t
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -7,6 +7,7 @@ import net.corda.core.internal.cordapp.set
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.debug
|
||||
import net.corda.testing.core.internal.JarSignatureTestUtils.generateKey
|
||||
import net.corda.testing.core.internal.JarSignatureTestUtils.containsKey
|
||||
import net.corda.testing.core.internal.JarSignatureTestUtils.signJar
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.Paths
|
||||
@ -43,7 +44,8 @@ data class CustomCordapp(
|
||||
|
||||
override fun withOnlyJarContents(): CustomCordapp = CustomCordapp(packages = packages, classes = classes)
|
||||
|
||||
fun signed(keyStorePath: Path? = null): CustomCordapp = copy(signingInfo = SigningInfo(keyStorePath))
|
||||
fun signed(keyStorePath: Path? = null, numberOfSignatures: Int = 1, keyAlgorithm: String = "RSA"): CustomCordapp =
|
||||
copy(signingInfo = SigningInfo(keyStorePath, numberOfSignatures, keyAlgorithm))
|
||||
|
||||
@VisibleForTesting
|
||||
internal fun packageAsJar(file: Path) {
|
||||
@ -73,20 +75,21 @@ data class CustomCordapp(
|
||||
|
||||
private fun signJar(jarFile: Path) {
|
||||
if (signingInfo != null) {
|
||||
val testKeystore = "_teststore"
|
||||
val alias = "Test"
|
||||
val pwd = "secret!"
|
||||
val keyStorePathToUse = if (signingInfo.keyStorePath != null) {
|
||||
signingInfo.keyStorePath
|
||||
} else {
|
||||
defaultJarSignerDirectory.createDirectories()
|
||||
if (!(defaultJarSignerDirectory / testKeystore).exists()) {
|
||||
defaultJarSignerDirectory.generateKey(alias, pwd, "O=Test Company Ltd,OU=Test,L=London,C=GB")
|
||||
}
|
||||
defaultJarSignerDirectory
|
||||
}
|
||||
|
||||
for (i in 1 .. signingInfo.numberOfSignatures) {
|
||||
val alias = "alias$i"
|
||||
val pwd = "secret!"
|
||||
if (!keyStorePathToUse.containsKey(alias, pwd))
|
||||
keyStorePathToUse.generateKey(alias, pwd, "O=Test Company Ltd $i,OU=Test,L=London,C=GB", signingInfo.keyAlgorithm)
|
||||
val pk = keyStorePathToUse.signJar(jarFile.toString(), alias, pwd)
|
||||
logger.debug { "Signed Jar: $jarFile with public key $pk" }
|
||||
}
|
||||
} else {
|
||||
logger.debug { "Unsigned Jar: $jarFile" }
|
||||
}
|
||||
@ -111,7 +114,7 @@ data class CustomCordapp(
|
||||
return ZipEntry(name).setCreationTime(epochFileTime).setLastAccessTime(epochFileTime).setLastModifiedTime(epochFileTime)
|
||||
}
|
||||
|
||||
data class SigningInfo(val keyStorePath: Path? = null)
|
||||
data class SigningInfo(val keyStorePath: Path?, val numberOfSignatures: Int, val keyAlgorithm: String)
|
||||
|
||||
companion object {
|
||||
private val logger = contextLogger()
|
||||
|
@ -34,6 +34,7 @@ import net.corda.core.node.NetworkParameters
|
||||
import net.corda.core.node.NotaryInfo
|
||||
import net.corda.core.node.services.NetworkMapCache
|
||||
import net.corda.core.utilities.NetworkHostAndPort
|
||||
import net.corda.core.utilities.Try
|
||||
import net.corda.core.utilities.contextLogger
|
||||
import net.corda.core.utilities.getOrThrow
|
||||
import net.corda.core.utilities.millis
|
||||
@ -82,6 +83,7 @@ import rx.schedulers.Schedulers
|
||||
import java.io.File
|
||||
import java.net.ConnectException
|
||||
import java.net.URL
|
||||
import java.net.URLClassLoader
|
||||
import java.nio.file.Path
|
||||
import java.security.cert.X509Certificate
|
||||
import java.time.Duration
|
||||
@ -152,6 +154,14 @@ class DriverDSLImpl(
|
||||
//TODO: remove this once we can bundle quasar properly.
|
||||
private val quasarJarPath: String by lazy { resolveJar("co.paralleluniverse.fibers.Suspendable") }
|
||||
|
||||
private val bytemanJarPath: String? by lazy {
|
||||
try {
|
||||
resolveJar("org.jboss.byteman.agent.Transformer")
|
||||
} catch (e: Exception) {
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
private fun NodeConfig.checkAndOverrideForInMemoryDB(): NodeConfig = this.run {
|
||||
if (inMemoryDB && corda.dataSourceProperties.getProperty("dataSource.url").startsWith("jdbc:h2:")) {
|
||||
val jdbcUrl = "jdbc:h2:mem:persistence${inMemoryCounter.getAndIncrement()};DB_CLOSE_ON_EXIT=FALSE;LOCK_TIMEOUT=10000;WRITE_DELAY=100"
|
||||
@ -206,7 +216,9 @@ class DriverDSLImpl(
|
||||
}
|
||||
}
|
||||
|
||||
override fun startNode(parameters: NodeParameters): CordaFuture<NodeHandle> {
|
||||
override fun startNode(parameters: NodeParameters): CordaFuture<NodeHandle> = startNode(parameters, bytemanPort = null)
|
||||
|
||||
override fun startNode(parameters: NodeParameters, bytemanPort: Int?): CordaFuture<NodeHandle> {
|
||||
val p2pAddress = portAllocation.nextHostAndPort()
|
||||
// TODO: Derive name from the full picked name, don't just wrap the common name
|
||||
val name = parameters.providedName ?: CordaX500Name("${oneOf(names).organisation}-${p2pAddress.port}", "London", "GB")
|
||||
@ -221,15 +233,17 @@ class DriverDSLImpl(
|
||||
return registrationFuture.flatMap {
|
||||
networkMapAvailability.flatMap {
|
||||
// But starting the node proper does require the network map
|
||||
startRegisteredNode(name, it, parameters, p2pAddress)
|
||||
startRegisteredNode(name, it, parameters, p2pAddress, bytemanPort)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("ComplexMethod")
|
||||
private fun startRegisteredNode(name: CordaX500Name,
|
||||
localNetworkMap: LocalNetworkMap?,
|
||||
parameters: NodeParameters,
|
||||
p2pAddress: NetworkHostAndPort = portAllocation.nextHostAndPort()): CordaFuture<NodeHandle> {
|
||||
p2pAddress: NetworkHostAndPort = portAllocation.nextHostAndPort(),
|
||||
bytemanPort: Int? = null): CordaFuture<NodeHandle> {
|
||||
val rpcAddress = portAllocation.nextHostAndPort()
|
||||
val rpcAdminAddress = portAllocation.nextHostAndPort()
|
||||
val webAddress = portAllocation.nextHostAndPort()
|
||||
@ -270,7 +284,7 @@ class DriverDSLImpl(
|
||||
configOverrides = if (overrides.hasPath("devMode")) overrides else overrides + mapOf("devMode" to true)
|
||||
).withDJVMConfig(djvmBootstrapSource, djvmCordaSource)
|
||||
).checkAndOverrideForInMemoryDB()
|
||||
return startNodeInternal(config, webAddress, localNetworkMap, parameters)
|
||||
return startNodeInternal(config, webAddress, localNetworkMap, parameters, bytemanPort)
|
||||
}
|
||||
|
||||
private fun startNodeRegistration(
|
||||
@ -574,6 +588,8 @@ class DriverDSLImpl(
|
||||
config,
|
||||
quasarJarPath,
|
||||
debugPort,
|
||||
bytemanJarPath,
|
||||
null,
|
||||
systemProperties,
|
||||
"512m",
|
||||
null,
|
||||
@ -585,10 +601,12 @@ class DriverDSLImpl(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("ComplexMethod")
|
||||
private fun startNodeInternal(config: NodeConfig,
|
||||
webAddress: NetworkHostAndPort,
|
||||
localNetworkMap: LocalNetworkMap?,
|
||||
parameters: NodeParameters): CordaFuture<NodeHandle> {
|
||||
parameters: NodeParameters,
|
||||
bytemanPort: Int?): CordaFuture<NodeHandle> {
|
||||
val visibilityHandle = networkVisibilityController.register(config.corda.myLegalName)
|
||||
val baseDirectory = config.corda.baseDirectory.createDirectories()
|
||||
localNetworkMap?.networkParametersCopier?.install(baseDirectory)
|
||||
@ -634,7 +652,16 @@ class DriverDSLImpl(
|
||||
nodeFuture
|
||||
} else {
|
||||
val debugPort = if (isDebug) debugPortAllocation.nextPort() else null
|
||||
val process = startOutOfProcessNode(config, quasarJarPath, debugPort, systemProperties, parameters.maximumHeapSize, parameters.logLevelOverride)
|
||||
val process = startOutOfProcessNode(
|
||||
config,
|
||||
quasarJarPath,
|
||||
debugPort,
|
||||
bytemanJarPath,
|
||||
bytemanPort,
|
||||
systemProperties,
|
||||
parameters.maximumHeapSize,
|
||||
parameters.logLevelOverride
|
||||
)
|
||||
|
||||
// Destroy the child process when the parent exits.This is needed even when `waitForAllNodesToFinish` is
|
||||
// true because we don't want orphaned processes in the case that the parent process is terminated by the
|
||||
@ -786,16 +813,21 @@ class DriverDSLImpl(
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("ComplexMethod", "MaxLineLength")
|
||||
private fun startOutOfProcessNode(
|
||||
config: NodeConfig,
|
||||
quasarJarPath: String,
|
||||
debugPort: Int?,
|
||||
bytemanJarPath: String?,
|
||||
bytemanPort: Int?,
|
||||
overriddenSystemProperties: Map<String, String>,
|
||||
maximumHeapSize: String,
|
||||
logLevelOverride: String?,
|
||||
vararg extraCmdLineFlag: String
|
||||
): Process {
|
||||
log.info("Starting out-of-process Node ${config.corda.myLegalName.organisation}, debug port is " + (debugPort ?: "not enabled"))
|
||||
log.info("Starting out-of-process Node ${config.corda.myLegalName.organisation}, " +
|
||||
"debug port is " + (debugPort ?: "not enabled") + ", " +
|
||||
"byteMan: " + if (bytemanJarPath == null) "not in classpath" else "port is " + (bytemanPort ?: "not enabled"))
|
||||
// Write node.conf
|
||||
writeConfig(config.corda.baseDirectory, "node.conf", config.typesafe.toNodeOnly())
|
||||
|
||||
@ -839,6 +871,20 @@ class DriverDSLImpl(
|
||||
it += extraCmdLineFlag
|
||||
}.toList()
|
||||
|
||||
val bytemanJvmArgs = {
|
||||
val bytemanAgent = bytemanJarPath?.let {
|
||||
bytemanPort?.let {
|
||||
"-javaagent:$bytemanJarPath=port:$bytemanPort,listener:true"
|
||||
}
|
||||
}
|
||||
listOfNotNull(bytemanAgent) +
|
||||
if (bytemanAgent != null && debugPort != null) listOf(
|
||||
"-Dorg.jboss.byteman.verbose=true",
|
||||
"-Dorg.jboss.byteman.debug=true"
|
||||
)
|
||||
else emptyList()
|
||||
}.invoke()
|
||||
|
||||
// The following dependencies are excluded from the classpath of the created JVM,
|
||||
// so that the environment resembles a real one as close as possible.
|
||||
// These are either classes that will be added as attachments to the node (i.e. samples, finance, opengamma etc.)
|
||||
@ -853,7 +899,7 @@ class DriverDSLImpl(
|
||||
className = "net.corda.node.Corda", // cannot directly get class for this, so just use string
|
||||
arguments = arguments,
|
||||
jdwpPort = debugPort,
|
||||
extraJvmArguments = extraJvmArguments,
|
||||
extraJvmArguments = extraJvmArguments + bytemanJvmArgs,
|
||||
workingDirectory = config.corda.baseDirectory,
|
||||
maximumHeapSize = maximumHeapSize,
|
||||
classPath = cp
|
||||
@ -1016,6 +1062,11 @@ interface InternalDriverDSL : DriverDSL {
|
||||
fun start()
|
||||
|
||||
fun shutdown()
|
||||
|
||||
fun startNode(
|
||||
parameters: NodeParameters = NodeParameters(),
|
||||
bytemanPort: Int? = null
|
||||
): CordaFuture<NodeHandle>
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,5 +1,6 @@
|
||||
package net.corda.testing.node.internal
|
||||
|
||||
import net.corda.core.flows.StateMachineRunId
|
||||
import net.corda.core.identity.CordaX500Name
|
||||
import net.corda.core.identity.PartyAndCertificate
|
||||
import net.corda.core.internal.PLATFORM_VERSION
|
||||
@ -268,6 +269,7 @@ class MockNodeMessagingService(private val configuration: NodeConfiguration,
|
||||
private inner class InMemoryDeduplicationHandler(override val receivedMessage: ReceivedMessage, val transfer: InMemoryMessagingNetwork.MessageTransfer) : DeduplicationHandler, ExternalEvent.ExternalMessageEvent {
|
||||
override val externalCause: ExternalEvent
|
||||
get() = this
|
||||
override val flowId: StateMachineRunId = StateMachineRunId.createRandom()
|
||||
override val deduplicationHandler: DeduplicationHandler
|
||||
get() = this
|
||||
|
||||
|
@ -9,6 +9,7 @@ import java.io.Closeable
|
||||
import java.io.FileInputStream
|
||||
import java.io.FileOutputStream
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.NoSuchFileException
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.Paths
|
||||
import java.security.PublicKey
|
||||
@ -72,6 +73,15 @@ object JarSignatureTestUtils {
|
||||
return ks.getCertificate(alias).publicKey
|
||||
}
|
||||
|
||||
fun Path.containsKey(alias: String, storePassword: String, storeName: String = "_teststore"): Boolean {
|
||||
return try {
|
||||
val ks = loadKeyStore(this.resolve(storeName), storePassword)
|
||||
ks.containsAlias(alias)
|
||||
} catch (e: NoSuchFileException) {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fun Path.getPublicKey(alias: String, storePassword: String) = getPublicKey(alias, "_teststore", storePassword)
|
||||
|
||||
fun Path.getJarSigners(fileName: String) =
|
||||
|
@ -135,6 +135,7 @@ class NodeController(check: atRuntime = ::checkExists) : Controller() {
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("MagicNumber") // initialising to max value
|
||||
private fun makeNetworkParametersCopier(config: NodeConfigWrapper): NetworkParametersCopier {
|
||||
val identity = getNotaryIdentity(config)
|
||||
val parametersCopier = NetworkParametersCopier(NetworkParameters(
|
||||
|
@ -241,6 +241,7 @@ class NodeTabView : Fragment() {
|
||||
CityDatabase.cityMap.values.map { it.countryCode }.toSet().map { it to Image(resources["/net/corda/demobench/flags/$it.png"]) }.toMap()
|
||||
}
|
||||
|
||||
@Suppress("MagicNumber") // demobench UI magic
|
||||
private fun Pane.nearestCityField(): ComboBox<WorldMapLocation> {
|
||||
return combobox(model.nearestCity, CityDatabase.cityMap.values.toList().sortedBy { it.description }) {
|
||||
minWidth = textWidth
|
||||
|
@ -70,6 +70,7 @@ fun main(args: Array<String>) {
|
||||
}
|
||||
}
|
||||
|
||||
@Suppress("MagicNumber") // test constants
|
||||
private fun runLoadTest(loadTestConfiguration: LoadTestConfiguration) {
|
||||
runLoadTests(loadTestConfiguration, listOf(
|
||||
selfIssueTest to LoadTest.RunParameters(
|
||||
@ -131,6 +132,7 @@ private fun runLoadTest(loadTestConfiguration: LoadTestConfiguration) {
|
||||
))
|
||||
}
|
||||
|
||||
@Suppress("MagicNumber") // test constants
|
||||
private fun runStabilityTest(loadTestConfiguration: LoadTestConfiguration) {
|
||||
runLoadTests(loadTestConfiguration, listOf(
|
||||
// Self issue cash. This is a pre test step to make sure vault have enough cash to work with.
|
||||
|
@ -38,6 +38,7 @@ interface Volume {
|
||||
nodeInfoFile.readBytes().deserialize<SignedNodeInfo>().verified().let { NotaryInfo(it.legalIdentities.first(), validating) }
|
||||
}
|
||||
|
||||
@Suppress("MagicNumber") // default config constants
|
||||
return notaryInfos.let {
|
||||
NetworkParameters(
|
||||
minimumPlatformVersion = 1,
|
||||
|
Loading…
Reference in New Issue
Block a user