Merge pull request #5745 from corda/EdP/CORDA-3446-4.4

Merge OS 4.3 -> OS 4.4
This commit is contained in:
Stefano Franz 2019-11-25 12:47:21 +00:00 committed by GitHub
commit 0f92c96d15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 256 additions and 138 deletions

View File

@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: testing-storage
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
replication-type: none

View File

@ -4,7 +4,7 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob
killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger())
pipeline {
agent { label 'k8s' }
agent { label 'gke' }
options {
timestamps()
buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7'))
@ -26,7 +26,7 @@ pipeline {
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean pushBuildImage preAllocateForParallelRegressionTest --stacktrace"
" clean pushBuildImage --stacktrace"
}
sh "kubectl auth can-i get pods"
}
@ -42,7 +42,7 @@ pipeline {
"-Dartifactory.password=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Dgit.branch=\"\${GIT_BRANCH}\" " +
"-Dgit.target.branch=\"\${GIT_BRANCH}\" " +
" deAllocateForParallelRegressionTest parallelRegressionTest --stacktrace"
" parallelRegressionTest --stacktrace"
}
}
}

29
Jenkinsfile vendored
View File

@ -1,3 +1,4 @@
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
@Library('existing-build-control')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
@ -23,7 +24,7 @@ pipeline {
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean pushBuildImage preAllocateForAllParallelIntegrationTest --stacktrace"
" clean pushBuildImage preAllocateForAllParallelIntegrationTest preAllocateForAllParallelUnitTest --stacktrace"
}
sh "kubectl auth can-i get pods"
}
@ -41,21 +42,23 @@ pipeline {
"-Dartifactory.password=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Dgit.branch=\"\${GIT_BRANCH}\" " +
"-Dgit.target.branch=\"\${CHANGE_TARGET}\" " +
" deAllocateForAllParallelIntegrationTest allParallelIntegrationTest --stacktrace"
" deAllocateForAllParallelIntegrationTest allParallelIntegrationTest --stacktrace"
}
}
stage('Unit Tests') {
steps {
sh "./gradlew " +
"-DbuildId=\"\${BUILD_ID}\" " +
"-Dkubenetize=true " +
"-Ddocker.run.tag=\"\${DOCKER_TAG_TO_USE}\" " +
"-Dartifactory.username=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Dartifactory.password=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Dgit.branch=\"\${GIT_BRANCH}\" " +
"-Dgit.target.branch=\"\${CHANGE_TARGET}\" " +
" deAllocateForAllParallelUnitTest allParallelUnitTest --stacktrace"
}
}
// stage('Unit Tests') {
// steps {
// sh "./gradlew " +
// "-DbuildId=\"\${BUILD_ID}\" " +
// "-Dkubenetize=true " +
// "-Ddocker.run.tag=\"\${DOCKER_TAG_TO_USE}\"" +
// " deAllocateForAllParallelUnitTest allParallelUnitTest --stacktrace"
// }
// }
}
}
}

View File

@ -609,7 +609,7 @@ task allParallelIntegrationTest(type: ParallelTestGroup) {
numberOfShards 10
streamOutput false
coresPerFork 5
memoryInGbPerFork 10
memoryInGbPerFork 12
distribute DistributeTestsBy.METHOD
}
task allParallelUnitTest(type: ParallelTestGroup) {
@ -617,9 +617,10 @@ task allParallelUnitTest(type: ParallelTestGroup) {
testGroups "test"
numberOfShards 10
streamOutput false
coresPerFork 5
memoryInGbPerFork 6
coresPerFork 3
memoryInGbPerFork 12
distribute DistributeTestsBy.CLASS
nodeTaints "small"
}
task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) {
testGroups "test", "integrationTest"
@ -627,15 +628,15 @@ task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) {
streamOutput false
coresPerFork 6
memoryInGbPerFork 10
distribute DistributeTestsBy.CLASS
distribute DistributeTestsBy.METHOD
}
task parallelRegressionTest(type: ParallelTestGroup) {
testGroups "test", "integrationTest", "slowIntegrationTest", "smokeTest"
numberOfShards 5
numberOfShards 6
streamOutput false
coresPerFork 6
memoryInGbPerFork 10
distribute DistributeTestsBy.CLASS
distribute DistributeTestsBy.METHOD
}
task allParallelSmokeTest(type: ParallelTestGroup) {
testGroups "slowIntegrationTest", "smokeTest"

View File

@ -20,12 +20,17 @@ import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static net.corda.testing.ListTests.DISTRIBUTION_PROPERTY;
public class BucketingAllocator {
private static final Logger LOG = LoggerFactory.getLogger(BucketingAllocator.class);
private final List<TestsForForkContainer> forkContainers;
private final Supplier<Tests> timedTestsProvider;
private List<Tuple2<TestLister, Object>> sources = new ArrayList<>();
private DistributeTestsBy distribution = System.getProperty(DISTRIBUTION_PROPERTY) != null && !System.getProperty(DISTRIBUTION_PROPERTY).isEmpty() ?
DistributeTestsBy.valueOf(System.getProperty(DISTRIBUTION_PROPERTY)) : DistributeTestsBy.METHOD;
public BucketingAllocator(Integer forkCount, Supplier<Tests> timedTestsProvider) {
this.forkContainers = IntStream.range(0, forkCount).mapToObj(TestsForForkContainer::new).collect(Collectors.toList());
@ -104,7 +109,17 @@ public class BucketingAllocator {
// If the gradle task is distributing by class rather than method, then 'testName' will be the className
// and not className.testName
// No matter which it is, we return the mean test duration as the duration value if not found.
final List<Tuple2<String, Long>> matchingTests = tests.startsWith(testName);
final List<Tuple2<String, Long>> matchingTests;
switch (distribution) {
case METHOD:
matchingTests = tests.equals(testName);
break;
case CLASS:
matchingTests = tests.startsWith(testName);
break;
default:
throw new IllegalArgumentException("Unknown distribution type: " + distribution);
}
return new TestBucket(task, testName, matchingTests);
}).sorted(Comparator.comparing(TestBucket::getDuration).reversed()).collect(Collectors.toList());

View File

@ -118,10 +118,11 @@ class DistributedTesting implements Plugin<Project> {
numberOfCoresPerFork = testGrouping.getCoresToUse()
distribution = testGrouping.getDistribution()
podLogLevel = testGrouping.getLogLevel()
taints = testGrouping.getNodeTaints()
sidecarImage = testGrouping.sidecarImage
additionalArgs = testGrouping.additionalArgs
doFirst {
dockerTag = tagToUseForRunningTests ? (ImageBuilding.registryName + ":" + tagToUseForRunningTests) : (imagePushTask.imageName.get() + ":" + imagePushTask.tag.get())
sidecarImage = testGrouping.sidecarImage
additionalArgs = testGrouping.additionalArgs
}
}
def reportOnAllTask = project.rootProject.tasks.create("userDefinedReports${testGrouping.getName().capitalize()}", KubesReporting) {
@ -165,7 +166,7 @@ class DistributedTesting implements Plugin<Project> {
int numberOfPodsToRequest = testGrouping.getShardCount()
int coresPerPod = testGrouping.getCoresToUse()
int memoryGBPerPod = testGrouping.getGbOfMemory()
allocator.allocatePods(numberOfPodsToRequest, coresPerPod, memoryGBPerPod, podPrefix)
allocator.allocatePods(numberOfPodsToRequest, coresPerPod, memoryGBPerPod, podPrefix, testGrouping.getNodeTaints())
}
}

View File

@ -1,13 +1,18 @@
package net.corda.testing;
import io.fabric8.kubernetes.api.model.ContainerFluent;
import io.fabric8.kubernetes.api.model.DoneablePod;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.PodBuilder;
import io.fabric8.kubernetes.api.model.PodFluent;
import io.fabric8.kubernetes.api.model.PodSpecFluent;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.Status;
import io.fabric8.kubernetes.api.model.StatusCause;
import io.fabric8.kubernetes.api.model.StatusDetails;
import io.fabric8.kubernetes.api.model.Toleration;
import io.fabric8.kubernetes.api.model.TolerationBuilder;
import io.fabric8.kubernetes.client.DefaultKubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClientException;
@ -32,7 +37,10 @@ import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.io.RandomAccessFile;
import java.math.BigInteger;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
@ -74,12 +82,13 @@ public class KubesTest extends DefaultTask {
String sidecarImage;
Boolean printOutput = false;
List<String> additionalArgs;
List<String> taints = Collections.emptyList();
Integer numberOfCoresPerFork = 4;
Integer memoryGbPerFork = 6;
public volatile List<File> testOutput = Collections.emptyList();
public volatile List<KubePodResult> containerResults = Collections.emptyList();
private final Set<String> remainingPods = Collections.synchronizedSet(new HashSet());
private final Set<String> remainingPods = Collections.synchronizedSet(new HashSet<>());
public static String NAMESPACE = "thisisatest";
@ -139,7 +148,26 @@ public class KubesTest extends DefaultTask {
}
@NotNull
private KubernetesClient getKubernetesClient() {
private synchronized KubernetesClient getKubernetesClient() {
try (RandomAccessFile file = new RandomAccessFile("/tmp/refresh.lock", "rw");
FileChannel c = file.getChannel();
FileLock lock = c.lock()) {
getProject().getLogger().quiet("Invoking kubectl to attempt to refresh token");
ProcessBuilder tokenRefreshCommand = new ProcessBuilder().command("kubectl", "auth", "can-i", "get", "pods");
Process refreshProcess = tokenRefreshCommand.start();
int resultCodeOfRefresh = refreshProcess.waitFor();
getProject().getLogger().quiet("Completed Token refresh");
if (resultCodeOfRefresh != 0) {
throw new RuntimeException("Failed to invoke kubectl to refresh tokens");
}
} catch (InterruptedException | IOException e) {
throw new RuntimeException(e);
}
io.fabric8.kubernetes.client.Config config = new io.fabric8.kubernetes.client.ConfigBuilder()
.withConnectionTimeout(DEFAULT_K8S_TIMEOUT_VALUE_MILLIES)
.withRequestTimeout(DEFAULT_K8S_TIMEOUT_VALUE_MILLIES)
@ -319,34 +347,7 @@ public class KubesTest extends DefaultTask {
}
private Pod buildPodRequestWithOnlyWorkerNode(String podName, PersistentVolumeClaim pvc) {
return new PodBuilder()
.withNewMetadata().withName(podName).endMetadata()
.withNewSpec()
.addNewVolume()
.withName("gradlecache")
.withNewHostPath()
.withType("DirectoryOrCreate")
.withPath("/tmp/gradle")
.endHostPath()
.endVolume()
.addNewVolume()
.withName("testruns")
.withNewPersistentVolumeClaim()
.withClaimName(pvc.getMetadata().getName())
.endPersistentVolumeClaim()
.endVolume()
.addNewContainer()
.withImage(dockerTag)
.withCommand("bash")
.withArgs("-c", "sleep 3600")
.addNewEnv()
.withName("DRIVER_NODE_MEMORY")
.withValue("1024m")
.withName("DRIVER_WEB_MEMORY")
.withValue("1024m")
.endEnv()
.withName(podName)
.withNewResources()
return getBasePodDefinition(podName, pvc)
.addToRequests("cpu", new Quantity(numberOfCoresPerFork.toString()))
.addToRequests("memory", new Quantity(memoryGbPerFork.toString()))
.endResources()
@ -360,43 +361,13 @@ public class KubesTest extends DefaultTask {
}
private Pod buildPodRequestWithWorkerNodeAndDbContainer(String podName, PersistentVolumeClaim pvc) {
return new PodBuilder()
.withNewMetadata().withName(podName).endMetadata()
.withNewSpec()
.addNewVolume()
.withName("gradlecache")
.withNewHostPath()
.withType("DirectoryOrCreate")
.withPath("/tmp/gradle")
.endHostPath()
.endVolume()
.addNewVolume()
.withName("testruns")
.withNewPersistentVolumeClaim()
.withClaimName(pvc.getMetadata().getName())
.endPersistentVolumeClaim()
.endVolume()
.addNewContainer()
.withImage(dockerTag)
.withCommand("bash")
.withArgs("-c", "sleep 3600")
.addNewEnv()
.withName("DRIVER_NODE_MEMORY")
.withValue("1024m")
.withName("DRIVER_WEB_MEMORY")
.withValue("1024m")
.endEnv()
.withName(podName)
.withNewResources()
return getBasePodDefinition(podName, pvc)
.addToRequests("cpu", new Quantity(Integer.valueOf(numberOfCoresPerFork - 1).toString()))
.addToRequests("memory", new Quantity(Integer.valueOf(memoryGbPerFork - 1).toString() + "Gi"))
.endResources()
.addNewVolumeMount().withName("gradlecache").withMountPath("/tmp/gradle").endVolumeMount()
.addNewVolumeMount().withName("testruns").withMountPath(TEST_RUN_DIR).endVolumeMount()
.endContainer()
.addNewContainer()
.withImage(sidecarImage)
.addNewEnv()
@ -418,6 +389,39 @@ public class KubesTest extends DefaultTask {
.build();
}
private ContainerFluent.ResourcesNested<PodSpecFluent.ContainersNested<PodFluent.SpecNested<PodBuilder>>> getBasePodDefinition(String podName, PersistentVolumeClaim pvc) {
return new PodBuilder()
.withNewMetadata().withName(podName).endMetadata()
.withNewSpec()
.addNewVolume()
.withName("gradlecache")
.withNewHostPath()
.withType("DirectoryOrCreate")
.withPath("/tmp/gradle")
.endHostPath()
.endVolume()
.addNewVolume()
.withName("testruns")
.withNewPersistentVolumeClaim()
.withClaimName(pvc.getMetadata().getName())
.endPersistentVolumeClaim()
.endVolume()
.withTolerations(taints.stream().map(taint -> new TolerationBuilder().withKey("key").withValue(taint).withOperator("Equal").withEffect("NoSchedule").build()).collect(Collectors.toList()))
.addNewContainer()
.withImage(dockerTag)
.withCommand("bash")
.withArgs("-c", "sleep 3600")
.addNewEnv()
.withName("DRIVER_NODE_MEMORY")
.withValue("1024m")
.withName("DRIVER_WEB_MEMORY")
.withValue("1024m")
.endEnv()
.withName(podName)
.withNewResources();
}
private File startLogPumping(InputStream stdOutIs, int podIdx, File podLogsDirectory, boolean printOutput) throws IOException {
File outputFile = new File(podLogsDirectory, "container-" + podIdx + ".log");

View File

@ -6,6 +6,7 @@ import io.github.classgraph.ClassInfoList;
import org.gradle.api.DefaultTask;
import org.gradle.api.file.FileCollection;
import org.gradle.api.tasks.TaskAction;
import org.jetbrains.annotations.NotNull;
import java.math.BigInteger;
import java.util.ArrayList;
@ -13,6 +14,7 @@ import java.util.Collection;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
interface TestLister {
List<String> getAllTestsDiscovered();
@ -47,23 +49,7 @@ public class ListTests extends DefaultTask implements TestLister {
Collection<String> results;
switch (distribution) {
case METHOD:
results = new ClassGraph()
.enableClassInfo()
.enableMethodInfo()
.ignoreClassVisibility()
.ignoreMethodVisibility()
.enableAnnotationInfo()
.overrideClasspath(scanClassPath)
.scan()
.getClassesWithMethodAnnotation("org.junit.Test")
.stream()
.map(classInfo -> {
ClassInfoList returnList = new ClassInfoList();
returnList.add(classInfo);
returnList.addAll(classInfo.getSubclasses());
return returnList;
})
.flatMap(ClassInfoList::stream)
results = getClassGraphStreamOfTestClasses()
.map(classInfo -> classInfo.getMethodInfo().filter(methodInfo -> methodInfo.hasAnnotation("org.junit.Test"))
.stream().map(methodInfo -> classInfo.getName() + "." + methodInfo.getName()))
.flatMap(Function.identity())
@ -72,28 +58,32 @@ public class ListTests extends DefaultTask implements TestLister {
this.allTests = results.stream().sorted().collect(Collectors.toList());
break;
case CLASS:
results = new ClassGraph()
.enableClassInfo()
.enableMethodInfo()
.ignoreClassVisibility()
.ignoreMethodVisibility()
.enableAnnotationInfo()
.overrideClasspath(scanClassPath)
.scan()
.getClassesWithMethodAnnotation("org.junit.Test")
.stream()
.map(classInfo -> {
ClassInfoList returnList = new ClassInfoList();
returnList.add(classInfo);
returnList.addAll(classInfo.getSubclasses());
return returnList;
})
.flatMap(ClassInfoList::stream)
results = getClassGraphStreamOfTestClasses()
.map(ClassInfo::getName)
.collect(Collectors.toSet());
this.allTests = results.stream().sorted().collect(Collectors.toList());
break;
}
getProject().getLogger().lifecycle("THESE ARE ALL THE TESTSSS!!!!!!!!: " + allTests.toString());
}
@NotNull
private Stream<ClassInfo> getClassGraphStreamOfTestClasses() {
return new ClassGraph()
.enableClassInfo()
.enableMethodInfo()
.ignoreClassVisibility()
.ignoreMethodVisibility()
.enableAnnotationInfo()
.overrideClasspath(scanClassPath)
.scan()
.getClassesWithMethodAnnotation("org.junit.Test")
.stream()
.map(classInfo -> {
ClassInfoList returnList = new ClassInfoList();
returnList.add(classInfo);
returnList.addAll(classInfo.getSubclasses());
return returnList;
})
.flatMap(ClassInfoList::stream);
}
}

View File

@ -17,6 +17,7 @@ public class ParallelTestGroup extends DefaultTask {
private PodLogLevel logLevel = PodLogLevel.INFO;
private String sidecarImage;
private List<String> additionalArgs = new ArrayList<>();
private List<String> taints = new ArrayList<>();
public DistributeTestsBy getDistribution() {
return distribution;
@ -46,9 +47,17 @@ public class ParallelTestGroup extends DefaultTask {
return logLevel;
}
public String getSidecarImage() { return sidecarImage; }
public String getSidecarImage() {
return sidecarImage;
}
public List<String> getAdditionalArgs() { return additionalArgs; }
public List<String> getAdditionalArgs() {
return additionalArgs;
}
public List<String> getNodeTaints(){
return new ArrayList<>(taints);
}
public void numberOfShards(int shards) {
this.shardCount = shards;
@ -95,4 +104,12 @@ public class ParallelTestGroup extends DefaultTask {
this.additionalArgs.addAll(additionalArgs);
}
public void nodeTaints(String... additionalArgs) {
nodeTaints(Arrays.asList(additionalArgs));
}
private void nodeTaints(List<String> additionalArgs) {
this.taints.addAll(additionalArgs);
}
}

View File

@ -1,6 +1,7 @@
package net.corda.testing;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.TolerationBuilder;
import io.fabric8.kubernetes.api.model.batch.Job;
import io.fabric8.kubernetes.api.model.batch.JobBuilder;
import io.fabric8.kubernetes.client.Config;
@ -36,12 +37,16 @@ public class PodAllocator {
this.logger = LoggerFactory.getLogger(PodAllocator.class);
}
public void allocatePods(Integer number, Integer coresPerPod, Integer memoryPerPod, String prefix) {
public void allocatePods(Integer number,
Integer coresPerPod,
Integer memoryPerPod,
String prefix,
List<String> taints) {
Config config = getConfig();
KubernetesClient client = new DefaultKubernetesClient(config);
List<Job> podsToRequest = IntStream.range(0, number).mapToObj(i -> buildJob("pa-" + prefix + i, coresPerPod, memoryPerPod)).collect(Collectors.toList());
List<Job> podsToRequest = IntStream.range(0, number).mapToObj(i -> buildJob("pa-" + prefix + i, coresPerPod, memoryPerPod, taints)).collect(Collectors.toList());
List<Job> createdJobs = podsToRequest.stream().map(requestedJob -> {
String msg = "PreAllocating " + requestedJob.getMetadata().getName();
if (logger instanceof org.gradle.api.logging.Logger) {
@ -112,7 +117,7 @@ public class PodAllocator {
}
Job buildJob(String podName, Integer coresPerPod, Integer memoryPerPod) {
Job buildJob(String podName, Integer coresPerPod, Integer memoryPerPod, List<String> taints) {
return new JobBuilder().withNewMetadata().withName(podName).endMetadata()
.withNewSpec()
.withTtlSecondsAfterFinished(10)
@ -121,6 +126,7 @@ public class PodAllocator {
.withName(podName + "-pod")
.endMetadata()
.withNewSpec()
.withTolerations(taints.stream().map(taint -> new TolerationBuilder().withKey("key").withValue(taint).withOperator("Equal").withEffect("NoSchedule").build()).collect(Collectors.toList()))
.addNewContainer()
.withImage("busybox:latest")
.withCommand("sh")

View File

@ -170,6 +170,20 @@ public class Tests {
return results;
}
@NotNull
List<Tuple2<String, Long>> equals(@NotNull final String testPrefix) {
List<Tuple2<String, Long>> results = this.tests.keySet().stream()
.filter(t -> t.equals(testPrefix))
.map(t -> new Tuple2<>(t, getDuration(t)))
.collect(Collectors.toList());
// We don't know if the testPrefix is a classname or classname.methodname (exact match).
if (results == null || results.isEmpty()) {
LOG.warn("In {} previously executed tests, could not find any starting with {}", tests.size(), testPrefix);
results = Arrays.asList(new Tuple2<>(testPrefix, getMeanDurationForTests()));
}
return results;
}
/**
* How many times has this function been run? Every call to addDuration increments the current value.
*

View File

@ -611,9 +611,17 @@ flow to receive the transaction:
:dedent: 12
``idOfTxWeSigned`` is an optional parameter used to confirm that we got the right transaction. It comes from using ``SignTransactionFlow``
which is described below.
which is described in the error handling behaviour section.
**Error handling behaviour**
Finalizing transactions with only one participant
.................................................
In some cases, transactions will only have one participant, the initiator. In these instances, there are no other
parties to send the transactions to during ``FinalityFlow``. In these cases the ``counterpartySession`` list must exist,
but be empty.
Error handling behaviour
........................
Once a transaction has been notarised and its input states consumed by the flow initiator (eg. sender), should the participant(s) receiving the
transaction fail to verify it, or the receiving flow (the finality handler) fails due to some other error, we then have a scenario where not

View File

@ -85,6 +85,26 @@ To fix this, an explicit type hint must be provided to the compiler:
This stops type inference from occurring and forces the variable to be of type ``AbstractParty``.
.. _platform_version_5_gradle_changes:
Step 2. Update Gradle version and associated dependencies
---------------------------------------------------------
Platform Version 5 requires Gradle 5.4 to build. If you use the Gradle wrapper, you can upgrade by running:
.. code:: shell
./gradlew wrapper --gradle-version 5.4.1
Otherwise, upgrade your installed copy in the usual manner for your operating system.
Additionally, you'll need to add https://repo.gradle.org/gradle/libs-releases as a repository to your project, in order to pick up the
`gradle-api-tooling` dependency. You can do this by adding the following to the repositories in your Gradle file:
.. code-block:: groovy
maven { url 'https://repo.gradle.org/gradle/libs-releases' }
Upgrading apps to Platform Version 4
====================================
@ -124,6 +144,10 @@ You should also ensure you're using Gradle 4.10 (but not 5). If you use the Grad
Otherwise just upgrade your installed copy in the usual manner for your operating system.
.. note:: Platform Version 5 requires a different version of Gradle, so if you're intending to upgrade past Platform Version 4 you may wish
to skip updating Gradle here and upgrade directly to the version required by Platform Version 5. You'll still need to alter the version
numbers in your Gradle file as shown in this section. See :ref:`platform_version_5_gradle_changes`
Step 3. Update your Gradle build file
-------------------------------------

View File

@ -36,7 +36,7 @@ Unreleased
* Introduced a new low level flow diagnostics tool: checkpoint agent (that can be used standalone or in conjunction with the ``checkpoints dump`` shell command).
See :doc:`checkpoint-tooling` for more information.
* ``NotaryFlow.Client`` now performs transaction verification by default to prevent accidentally sending an invalid transaction to a
non-validating notary. The behaviour can be controlled by passing a constructor parameter flag ``skipVerification``.
Note: this only affects flows that invoke ``NotaryFlow.Client`` directly no behavioural change if using ``FinalityFlow``.
@ -88,6 +88,9 @@ Unreleased
Note that it's a responsibility of a client application to handle RPC reconnection in case this happens.
See :ref:`setting_jvm_args` and :ref:`memory_usage_and_tuning` for further details.
* :doc:`design/data-model-upgrades/package-namespace-ownership` configurations can be now be set as described in
:ref:`node_package_namespace_ownership`, when using the Cordformation plugin version 4.0.43.
* Environment variables and system properties can now be provided with underscore separators instead of dots. Neither are case sensitive.
See :ref:`overriding config values <corda_configuration_file_overriding_config>` for more information.

View File

@ -7,6 +7,12 @@ Deploying a node to a server
whether they have developed and tested a CorDapp following the instructions in :doc:`generating-a-node`
or are deploying a third-party CorDapp.
.. note:: When deploying multiple nodes in parallel the package tool (Capsule) that Corda uses can encounter
issues retrieving dependencies. This is due to each node trying to download the dependencies in a common
location. In these cases it is recommended to set the environment variable ``CAPSULE_CACHE_DIR`` which
will allow the Capsule to maintain a separate cache for each node. This is used in the example descriptions
below. See the `Capsule documentation <http://www.capsule.io>`_ for more details.
Linux: Installing and running Corda as a system service
-------------------------------------------------------
We recommend creating system services to run a node and the optional test webserver. This provides logging and service
@ -90,6 +96,7 @@ handling, and ensures the Corda service is run at boot.
WorkingDirectory=/opt/corda
ExecStart=/usr/bin/java -jar /opt/corda/corda.jar
Restart=on-failure
Environment="CAPSULE_CACHE_DIR=./capsule"
[Install]
WantedBy=multi-user.target
@ -244,17 +251,20 @@ at boot, and means the Corda service stays running with no users connected to th
.. code-block:: batch
nssm install cordanode1 C:\ProgramData\Oracle\Java\javapath\java.exe
nssm install cordanode1 java.exe
nssm set cordanode1 AppParameters "-jar corda.jar"
nssm set cordanode1 AppDirectory C:\Corda
nssm set cordanode1 AppStdout C:\Corda\service.log
nssm set cordanode1 AppStderr C:\Corda\service.log
nssm set cordanode1 AppEnvironmentExtra CAPSULE_CACHE_DIR=./capsule
nssm set cordanode1 Description Corda Node - Bank of Breakfast Tea
nssm set cordanode1 Start SERVICE_AUTO_START
sc start cordanode1
9. Modify the batch file:
* If you are installing multiple nodes, use a different service name (``cordanode1``) for each node
* If you are installing multiple nodes, use a different service name (``cordanode1``), and modify
`AppDirectory`, `AppStdout` and `AppStderr` for each node accordingly
* Set an informative description
10. Provision the required certificates to your node. Contact the network permissioning service or see

View File

@ -337,6 +337,13 @@ transaction that uses them. This flow returns a list of ``LedgerTransaction`` ob
we don't download a transaction from the peer, they know we must have already seen it before. Fixing this privacy
leak will come later.
Finalizing transactions with only one participant
.................................................
In some cases, transactions will only have one participant, the initiator. In these instances, there are no other
parties to send the transactions to during ``FinalityFlow``. In these cases the ``counterpartySession`` list must exist,
but be empty.
CollectSignaturesFlow/SignTransactionFlow
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We also invoke two other subflows:

View File

@ -247,11 +247,18 @@ To copy the same file to all nodes `ext.drivers` can be defined in the top level
}
}
The Cordform task will automatically copy a Jolokia agent JAR into each generated node's `drivers` subdirectory. The version of this JAR
defaults to `1.6.0`. This can be changed by setting the `jolokia_version` property anywhere in your `build.gradle` file:
.. sourcecode:: groovy
ext.jolokia_version = "1.6.1"
.. _node_package_namespace_ownership:
Package namespace ownership
^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specify package namespace ownership, the optional ``networkParameterOverrides`` and ``packageOwnership`` blocks can be used, similar to the configuration file used in :doc:`network-bootstrapper`:
To specify :doc:`design/data-model-upgrades/package-namespace-ownership` configuration, the optional ``networkParameterOverrides`` and ``packageOwnership`` blocks can be used, similar to the configuration file used in :doc:`network-bootstrapper`:
.. sourcecode:: groovy

View File

@ -116,7 +116,7 @@ In order to ensure that a Jolokia agent is instrumented with the JVM run-time, y
* Specify the Node configuration parameter ``jmxMonitoringHttpPort`` which will attempt to load the jolokia driver from the ``drivers`` folder.
The format of the driver name needs to be ``jolokia-jvm-{VERSION}-agent.jar`` where VERSION is the version required by Corda, currently |jolokia_version|.
* Start the node with ``java -Dcapsule.jvm.args="-javaagent:drivers/jolokia-jvm-1.6.0-agent.jar=port=7777,host=localhost" -jar corda.jar``.
* Start the node with ``java -Dcapsule.jvm.args="-javaagent:drivers/jolokia-jvm-1.6.1-agent.jar=port=7777,host=localhost" -jar corda.jar``.
The following JMX statistics are exported:

View File

@ -1061,7 +1061,7 @@ class StatemachineErrorHandlingTest {
* that 3 retries are attempted before recovering.
*/
@Test
fun `error during transition with CommitTransaction action that occurs during the beginning of execution will retry and complete successfully - responding flow`() {
fun `responding flow - error during transition with CommitTransaction action that occurs during the beginning of execution will retry and complete successfully`() {
startDriver {
val charlie = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
@ -1156,7 +1156,7 @@ class StatemachineErrorHandlingTest {
* the responding flow to recover and finish its flow.
*/
@Test
fun `error during transition with CommitTransaction action that occurs during the beginning of execution will retry and be kept for observation if error persists - responding flow`() {
fun `responding flow - error during transition with CommitTransaction action that occurs during the beginning of execution will retry and be kept for observation if error persists`() {
startDriver {
val charlie = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
@ -1244,7 +1244,7 @@ class StatemachineErrorHandlingTest {
* succeeds and the flow finishes.
*/
@Test
fun `error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully - responding flow`() {
fun `responding flow - error during transition with CommitTransaction action that occurs when completing a flow and deleting its checkpoint will retry and complete successfully`() {
startDriver {
val charlie = createBytemanNode(CHARLIE_NAME)
val alice = createNode(ALICE_NAME)
@ -1340,7 +1340,7 @@ class StatemachineErrorHandlingTest {
* send to the responding node and the responding node successfully received it.
*/
@Test
fun `error recording a transaction inside of ReceiveFinalityFlow will keep the flow in for observation`() {
fun `error recording a transaction inside of ReceiveFinalityFlow will keep the flow in for observation` () {
startDriver(notarySpec = NotarySpec(DUMMY_NOTARY_NAME, validating = false)) {
val charlie = createBytemanNode(CHARLIE_NAME, FINANCE_CORDAPPS)
val alice = createNode(ALICE_NAME, FINANCE_CORDAPPS)