Merge remote-tracking branch 'origin/release/os/4.3' into EdP/CORDA-3446-4.4

# Conflicts:
#	docs/source/changelog.rst
This commit is contained in:
stefano 2019-11-25 09:41:50 +00:00
commit 40b1a188f1
11 changed files with 144 additions and 79 deletions

20
Jenkinsfile vendored
View File

@ -1,3 +1,4 @@
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
@Library('existing-build-control')
import static com.r3.build.BuildControl.killAllExistingBuildsForJob
@ -23,7 +24,7 @@ pipeline {
"-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " +
"-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " +
"-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" +
" clean pushBuildImage preAllocateForAllParallelUnitAndIntegrationTest --stacktrace"
" clean pushBuildImage preAllocateForAllParallelIntegrationTest preAllocateForAllParallelUnitTest --stacktrace"
}
sh "kubectl auth can-i get pods"
}
@ -31,7 +32,7 @@ pipeline {
stage('Corda Pull Request - Run Tests') {
parallel {
stage('Integration and Unit Tests') {
stage('Integration Tests') {
steps {
sh "./gradlew " +
"-DbuildId=\"\${BUILD_ID}\" " +
@ -41,7 +42,20 @@ pipeline {
"-Dartifactory.password=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Dgit.branch=\"\${GIT_BRANCH}\" " +
"-Dgit.target.branch=\"\${CHANGE_TARGET}\" " +
" deAllocateForAllParallelUnitAndIntegrationTest allParallelUnitAndIntegrationTest --stacktrace"
" deAllocateForAllParallelIntegrationTest allParallelIntegrationTest --stacktrace"
}
}
stage('Unit Tests') {
steps {
sh "./gradlew " +
"-DbuildId=\"\${BUILD_ID}\" " +
"-Dkubenetize=true " +
"-Ddocker.run.tag=\"\${DOCKER_TAG_TO_USE}\" " +
"-Dartifactory.username=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " +
"-Dartifactory.password=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " +
"-Dgit.branch=\"\${GIT_BRANCH}\" " +
"-Dgit.target.branch=\"\${CHANGE_TARGET}\" " +
" deAllocateForAllParallelUnitTest allParallelUnitTest --stacktrace"
}
}
}

View File

@ -609,7 +609,7 @@ task allParallelIntegrationTest(type: ParallelTestGroup) {
numberOfShards 10
streamOutput false
coresPerFork 5
memoryInGbPerFork 10
memoryInGbPerFork 12
distribute DistributeTestsBy.METHOD
}
task allParallelUnitTest(type: ParallelTestGroup) {
@ -617,9 +617,10 @@ task allParallelUnitTest(type: ParallelTestGroup) {
testGroups "test"
numberOfShards 10
streamOutput false
coresPerFork 5
memoryInGbPerFork 6
coresPerFork 3
memoryInGbPerFork 12
distribute DistributeTestsBy.CLASS
nodeTaints "small"
}
task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) {
testGroups "test", "integrationTest"

View File

@ -118,10 +118,11 @@ class DistributedTesting implements Plugin<Project> {
numberOfCoresPerFork = testGrouping.getCoresToUse()
distribution = testGrouping.getDistribution()
podLogLevel = testGrouping.getLogLevel()
taints = testGrouping.getNodeTaints()
sidecarImage = testGrouping.sidecarImage
additionalArgs = testGrouping.additionalArgs
doFirst {
dockerTag = tagToUseForRunningTests ? (ImageBuilding.registryName + ":" + tagToUseForRunningTests) : (imagePushTask.imageName.get() + ":" + imagePushTask.tag.get())
sidecarImage = testGrouping.sidecarImage
additionalArgs = testGrouping.additionalArgs
}
}
def reportOnAllTask = project.rootProject.tasks.create("userDefinedReports${testGrouping.getName().capitalize()}", KubesReporting) {
@ -165,7 +166,7 @@ class DistributedTesting implements Plugin<Project> {
int numberOfPodsToRequest = testGrouping.getShardCount()
int coresPerPod = testGrouping.getCoresToUse()
int memoryGBPerPod = testGrouping.getGbOfMemory()
allocator.allocatePods(numberOfPodsToRequest, coresPerPod, memoryGBPerPod, podPrefix)
allocator.allocatePods(numberOfPodsToRequest, coresPerPod, memoryGBPerPod, podPrefix, testGrouping.getNodeTaints())
}
}

View File

@ -1,13 +1,18 @@
package net.corda.testing;
import io.fabric8.kubernetes.api.model.ContainerFluent;
import io.fabric8.kubernetes.api.model.DoneablePod;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.PodBuilder;
import io.fabric8.kubernetes.api.model.PodFluent;
import io.fabric8.kubernetes.api.model.PodSpecFluent;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.Status;
import io.fabric8.kubernetes.api.model.StatusCause;
import io.fabric8.kubernetes.api.model.StatusDetails;
import io.fabric8.kubernetes.api.model.Toleration;
import io.fabric8.kubernetes.api.model.TolerationBuilder;
import io.fabric8.kubernetes.client.DefaultKubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClientException;
@ -77,12 +82,13 @@ public class KubesTest extends DefaultTask {
String sidecarImage;
Boolean printOutput = false;
List<String> additionalArgs;
List<String> taints = Collections.emptyList();
Integer numberOfCoresPerFork = 4;
Integer memoryGbPerFork = 6;
public volatile List<File> testOutput = Collections.emptyList();
public volatile List<KubePodResult> containerResults = Collections.emptyList();
private final Set<String> remainingPods = Collections.synchronizedSet(new HashSet());
private final Set<String> remainingPods = Collections.synchronizedSet(new HashSet<>());
public static String NAMESPACE = "thisisatest";
@ -341,34 +347,7 @@ public class KubesTest extends DefaultTask {
}
private Pod buildPodRequestWithOnlyWorkerNode(String podName, PersistentVolumeClaim pvc) {
return new PodBuilder()
.withNewMetadata().withName(podName).endMetadata()
.withNewSpec()
.addNewVolume()
.withName("gradlecache")
.withNewHostPath()
.withType("DirectoryOrCreate")
.withPath("/tmp/gradle")
.endHostPath()
.endVolume()
.addNewVolume()
.withName("testruns")
.withNewPersistentVolumeClaim()
.withClaimName(pvc.getMetadata().getName())
.endPersistentVolumeClaim()
.endVolume()
.addNewContainer()
.withImage(dockerTag)
.withCommand("bash")
.withArgs("-c", "sleep 3600")
.addNewEnv()
.withName("DRIVER_NODE_MEMORY")
.withValue("1024m")
.withName("DRIVER_WEB_MEMORY")
.withValue("1024m")
.endEnv()
.withName(podName)
.withNewResources()
return getBasePodDefinition(podName, pvc)
.addToRequests("cpu", new Quantity(numberOfCoresPerFork.toString()))
.addToRequests("memory", new Quantity(memoryGbPerFork.toString()))
.endResources()
@ -382,43 +361,13 @@ public class KubesTest extends DefaultTask {
}
private Pod buildPodRequestWithWorkerNodeAndDbContainer(String podName, PersistentVolumeClaim pvc) {
return new PodBuilder()
.withNewMetadata().withName(podName).endMetadata()
.withNewSpec()
.addNewVolume()
.withName("gradlecache")
.withNewHostPath()
.withType("DirectoryOrCreate")
.withPath("/tmp/gradle")
.endHostPath()
.endVolume()
.addNewVolume()
.withName("testruns")
.withNewPersistentVolumeClaim()
.withClaimName(pvc.getMetadata().getName())
.endPersistentVolumeClaim()
.endVolume()
.addNewContainer()
.withImage(dockerTag)
.withCommand("bash")
.withArgs("-c", "sleep 3600")
.addNewEnv()
.withName("DRIVER_NODE_MEMORY")
.withValue("1024m")
.withName("DRIVER_WEB_MEMORY")
.withValue("1024m")
.endEnv()
.withName(podName)
.withNewResources()
return getBasePodDefinition(podName, pvc)
.addToRequests("cpu", new Quantity(Integer.valueOf(numberOfCoresPerFork - 1).toString()))
.addToRequests("memory", new Quantity(Integer.valueOf(memoryGbPerFork - 1).toString() + "Gi"))
.endResources()
.addNewVolumeMount().withName("gradlecache").withMountPath("/tmp/gradle").endVolumeMount()
.addNewVolumeMount().withName("testruns").withMountPath(TEST_RUN_DIR).endVolumeMount()
.endContainer()
.addNewContainer()
.withImage(sidecarImage)
.addNewEnv()
@ -440,6 +389,39 @@ public class KubesTest extends DefaultTask {
.build();
}
private ContainerFluent.ResourcesNested<PodSpecFluent.ContainersNested<PodFluent.SpecNested<PodBuilder>>> getBasePodDefinition(String podName, PersistentVolumeClaim pvc) {
return new PodBuilder()
.withNewMetadata().withName(podName).endMetadata()
.withNewSpec()
.addNewVolume()
.withName("gradlecache")
.withNewHostPath()
.withType("DirectoryOrCreate")
.withPath("/tmp/gradle")
.endHostPath()
.endVolume()
.addNewVolume()
.withName("testruns")
.withNewPersistentVolumeClaim()
.withClaimName(pvc.getMetadata().getName())
.endPersistentVolumeClaim()
.endVolume()
.withTolerations(taints.stream().map(taint -> new TolerationBuilder().withKey("key").withValue(taint).withOperator("Equal").withEffect("NoSchedule").build()).collect(Collectors.toList()))
.addNewContainer()
.withImage(dockerTag)
.withCommand("bash")
.withArgs("-c", "sleep 3600")
.addNewEnv()
.withName("DRIVER_NODE_MEMORY")
.withValue("1024m")
.withName("DRIVER_WEB_MEMORY")
.withValue("1024m")
.endEnv()
.withName(podName)
.withNewResources();
}
private File startLogPumping(InputStream stdOutIs, int podIdx, File podLogsDirectory, boolean printOutput) throws IOException {
File outputFile = new File(podLogsDirectory, "container-" + podIdx + ".log");

View File

@ -17,6 +17,7 @@ public class ParallelTestGroup extends DefaultTask {
private PodLogLevel logLevel = PodLogLevel.INFO;
private String sidecarImage;
private List<String> additionalArgs = new ArrayList<>();
private List<String> taints = new ArrayList<>();
public DistributeTestsBy getDistribution() {
return distribution;
@ -46,9 +47,17 @@ public class ParallelTestGroup extends DefaultTask {
return logLevel;
}
public String getSidecarImage() { return sidecarImage; }
public String getSidecarImage() {
return sidecarImage;
}
public List<String> getAdditionalArgs() { return additionalArgs; }
public List<String> getAdditionalArgs() {
return additionalArgs;
}
public List<String> getNodeTaints(){
return new ArrayList<>(taints);
}
public void numberOfShards(int shards) {
this.shardCount = shards;
@ -95,4 +104,12 @@ public class ParallelTestGroup extends DefaultTask {
this.additionalArgs.addAll(additionalArgs);
}
public void nodeTaints(String... additionalArgs) {
nodeTaints(Arrays.asList(additionalArgs));
}
private void nodeTaints(List<String> additionalArgs) {
this.taints.addAll(additionalArgs);
}
}

View File

@ -1,6 +1,7 @@
package net.corda.testing;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.TolerationBuilder;
import io.fabric8.kubernetes.api.model.batch.Job;
import io.fabric8.kubernetes.api.model.batch.JobBuilder;
import io.fabric8.kubernetes.client.Config;
@ -36,12 +37,16 @@ public class PodAllocator {
this.logger = LoggerFactory.getLogger(PodAllocator.class);
}
public void allocatePods(Integer number, Integer coresPerPod, Integer memoryPerPod, String prefix) {
public void allocatePods(Integer number,
Integer coresPerPod,
Integer memoryPerPod,
String prefix,
List<String> taints) {
Config config = getConfig();
KubernetesClient client = new DefaultKubernetesClient(config);
List<Job> podsToRequest = IntStream.range(0, number).mapToObj(i -> buildJob("pa-" + prefix + i, coresPerPod, memoryPerPod)).collect(Collectors.toList());
List<Job> podsToRequest = IntStream.range(0, number).mapToObj(i -> buildJob("pa-" + prefix + i, coresPerPod, memoryPerPod, taints)).collect(Collectors.toList());
List<Job> createdJobs = podsToRequest.stream().map(requestedJob -> {
String msg = "PreAllocating " + requestedJob.getMetadata().getName();
if (logger instanceof org.gradle.api.logging.Logger) {
@ -112,7 +117,7 @@ public class PodAllocator {
}
Job buildJob(String podName, Integer coresPerPod, Integer memoryPerPod) {
Job buildJob(String podName, Integer coresPerPod, Integer memoryPerPod, List<String> taints) {
return new JobBuilder().withNewMetadata().withName(podName).endMetadata()
.withNewSpec()
.withTtlSecondsAfterFinished(10)
@ -121,6 +126,7 @@ public class PodAllocator {
.withName(podName + "-pod")
.endMetadata()
.withNewSpec()
.withTolerations(taints.stream().map(taint -> new TolerationBuilder().withKey("key").withValue(taint).withOperator("Equal").withEffect("NoSchedule").build()).collect(Collectors.toList()))
.addNewContainer()
.withImage("busybox:latest")
.withCommand("sh")

View File

@ -85,6 +85,26 @@ To fix this, an explicit type hint must be provided to the compiler:
This stops type inference from occurring and forces the variable to be of type ``AbstractParty``.
.. _platform_version_5_gradle_changes:
Step 2. Update Gradle version and associated dependencies
---------------------------------------------------------
Platform Version 5 requires Gradle 5.4 to build. If you use the Gradle wrapper, you can upgrade by running:
.. code:: shell
./gradlew wrapper --gradle-version 5.4.1
Otherwise, upgrade your installed copy in the usual manner for your operating system.
Additionally, you'll need to add https://repo.gradle.org/gradle/libs-releases as a repository to your project, in order to pick up the
`gradle-api-tooling` dependency. You can do this by adding the following to the repositories in your Gradle file:
.. code-block:: groovy
maven { url 'https://repo.gradle.org/gradle/libs-releases' }
Upgrading apps to Platform Version 4
====================================
@ -124,6 +144,10 @@ You should also ensure you're using Gradle 4.10 (but not 5). If you use the Grad
Otherwise just upgrade your installed copy in the usual manner for your operating system.
.. note:: Platform Version 5 requires a different version of Gradle, so if you're intending to upgrade past Platform Version 4 you may wish
to skip updating Gradle here and upgrade directly to the version required by Platform Version 5. You'll still need to alter the version
numbers in your Gradle file as shown in this section. See :ref:`platform_version_5_gradle_changes`
Step 3. Update your Gradle build file
-------------------------------------

View File

@ -36,7 +36,7 @@ Unreleased
* Introduced a new low level flow diagnostics tool: checkpoint agent (that can be used standalone or in conjunction with the ``checkpoints dump`` shell command).
See :doc:`checkpoint-tooling` for more information.
* ``NotaryFlow.Client`` now performs transaction verification by default to prevent accidentally sending an invalid transaction to a
non-validating notary. The behaviour can be controlled by passing a constructor parameter flag ``skipVerification``.
Note: this only affects flows that invoke ``NotaryFlow.Client`` directly no behavioural change if using ``FinalityFlow``.
@ -88,6 +88,9 @@ Unreleased
Note that it's a responsibility of a client application to handle RPC reconnection in case this happens.
See :ref:`setting_jvm_args` and :ref:`memory_usage_and_tuning` for further details.
* :doc:`design/data-model-upgrades/package-namespace-ownership` configurations can be now be set as described in
:ref:`node_package_namespace_ownership`, when using the Cordformation plugin version 4.0.43.
* Environment variables and system properties can now be provided with underscore separators instead of dots. Neither are case sensitive.
See :ref:`overriding config values <corda_configuration_file_overriding_config>` for more information.

View File

@ -7,6 +7,12 @@ Deploying a node to a server
whether they have developed and tested a CorDapp following the instructions in :doc:`generating-a-node`
or are deploying a third-party CorDapp.
.. note:: When deploying multiple nodes in parallel the package tool (Capsule) that Corda uses can encounter
issues retrieving dependencies. This is due to each node trying to download the dependencies in a common
location. In these cases it is recommended to set the environment variable ``CAPSULE_CACHE_DIR`` which
will allow the Capsule to maintain a separate cache for each node. This is used in the example descriptions
below. See the `Capsule documentation <http://www.capsule.io>`_ for more details.
Linux: Installing and running Corda as a system service
-------------------------------------------------------
We recommend creating system services to run a node and the optional test webserver. This provides logging and service
@ -90,6 +96,7 @@ handling, and ensures the Corda service is run at boot.
WorkingDirectory=/opt/corda
ExecStart=/usr/bin/java -jar /opt/corda/corda.jar
Restart=on-failure
Environment="CAPSULE_CACHE_DIR=./capsule"
[Install]
WantedBy=multi-user.target
@ -244,17 +251,20 @@ at boot, and means the Corda service stays running with no users connected to th
.. code-block:: batch
nssm install cordanode1 C:\ProgramData\Oracle\Java\javapath\java.exe
nssm install cordanode1 java.exe
nssm set cordanode1 AppParameters "-jar corda.jar"
nssm set cordanode1 AppDirectory C:\Corda
nssm set cordanode1 AppStdout C:\Corda\service.log
nssm set cordanode1 AppStderr C:\Corda\service.log
nssm set cordanode1 AppEnvironmentExtra CAPSULE_CACHE_DIR=./capsule
nssm set cordanode1 Description Corda Node - Bank of Breakfast Tea
nssm set cordanode1 Start SERVICE_AUTO_START
sc start cordanode1
9. Modify the batch file:
* If you are installing multiple nodes, use a different service name (``cordanode1``) for each node
* If you are installing multiple nodes, use a different service name (``cordanode1``), and modify
`AppDirectory`, `AppStdout` and `AppStderr` for each node accordingly
* Set an informative description
10. Provision the required certificates to your node. Contact the network permissioning service or see

View File

@ -247,11 +247,18 @@ To copy the same file to all nodes `ext.drivers` can be defined in the top level
}
}
The Cordform task will automatically copy a Jolokia agent JAR into each generated node's `drivers` subdirectory. The version of this JAR
defaults to `1.6.0`. This can be changed by setting the `jolokia_version` property anywhere in your `build.gradle` file:
.. sourcecode:: groovy
ext.jolokia_version = "1.6.1"
.. _node_package_namespace_ownership:
Package namespace ownership
^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specify package namespace ownership, the optional ``networkParameterOverrides`` and ``packageOwnership`` blocks can be used, similar to the configuration file used in :doc:`network-bootstrapper`:
To specify :doc:`design/data-model-upgrades/package-namespace-ownership` configuration, the optional ``networkParameterOverrides`` and ``packageOwnership`` blocks can be used, similar to the configuration file used in :doc:`network-bootstrapper`:
.. sourcecode:: groovy

View File

@ -116,7 +116,7 @@ In order to ensure that a Jolokia agent is instrumented with the JVM run-time, y
* Specify the Node configuration parameter ``jmxMonitoringHttpPort`` which will attempt to load the jolokia driver from the ``drivers`` folder.
The format of the driver name needs to be ``jolokia-jvm-{VERSION}-agent.jar`` where VERSION is the version required by Corda, currently |jolokia_version|.
* Start the node with ``java -Dcapsule.jvm.args="-javaagent:drivers/jolokia-jvm-1.6.0-agent.jar=port=7777,host=localhost" -jar corda.jar``.
* Start the node with ``java -Dcapsule.jvm.args="-javaagent:drivers/jolokia-jvm-1.6.1-agent.jar=port=7777,host=localhost" -jar corda.jar``.
The following JMX statistics are exported: