From 2851534ae7b17e3bfac9b242ae964b7511bc1991 Mon Sep 17 00:00:00 2001 From: Ramzi El-Yafi Date: Wed, 11 Dec 2019 08:29:37 +0000 Subject: [PATCH 01/14] Prevent on-demand tests re-triggering from branch indexing --- .ci/dev/smoke/Jenkinsfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.ci/dev/smoke/Jenkinsfile b/.ci/dev/smoke/Jenkinsfile index d5332b2508..16ae19b53d 100644 --- a/.ci/dev/smoke/Jenkinsfile +++ b/.ci/dev/smoke/Jenkinsfile @@ -5,7 +5,8 @@ killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) pipeline { agent { label 'k8s' } - options { timestamps() } + options { timestamps() + overrideIndexTriggers(false) } triggers { issueCommentTrigger('.*smoke tests.*') @@ -94,4 +95,4 @@ def currentBuildTriggeredByComment() { } return triggerCause != null -} \ No newline at end of file +} From 575df97c52b2ad884d85db57ac87a78cdc676ce4 Mon Sep 17 00:00:00 2001 From: Ramzi El-Yafi Date: Fri, 13 Dec 2019 10:08:51 +0000 Subject: [PATCH 02/14] Mark integration test tasks with "big" node taint --- build.gradle | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/build.gradle b/build.gradle index 5f1ae94300..ff3b1f3eb4 100644 --- a/build.gradle +++ b/build.gradle @@ -606,6 +606,7 @@ task allParallelIntegrationTest(type: ParallelTestGroup) { coresPerFork 5 memoryInGbPerFork 12 distribute DistributeTestsBy.METHOD + nodeTaints "big" } task allParallelUnitTest(type: ParallelTestGroup) { podLogLevel PodLogLevel.INFO @@ -624,6 +625,7 @@ task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) { coresPerFork 6 memoryInGbPerFork 10 distribute DistributeTestsBy.METHOD + nodeTaints "big" } task parallelRegressionTest(type: ParallelTestGroup) { testGroups "test", "integrationTest", "slowIntegrationTest", "smokeTest" @@ -632,6 +634,7 @@ task parallelRegressionTest(type: ParallelTestGroup) { coresPerFork 6 memoryInGbPerFork 10 distribute DistributeTestsBy.METHOD + nodeTaints "big" } task allParallelSmokeTest(type: ParallelTestGroup) { testGroups "slowIntegrationTest", "smokeTest" @@ -640,6 +643,7 @@ task allParallelSmokeTest(type: ParallelTestGroup) { coresPerFork 6 memoryInGbPerFork 10 distribute DistributeTestsBy.CLASS + nodeTaints "big" } apply plugin: 'com.r3.testing.distributed-testing' apply plugin: 'com.r3.testing.image-building' From 5458f11998548711712ce348c558c7b95459f614 Mon Sep 17 00:00:00 2001 From: Ramzi El-Yafi Date: Tue, 17 Dec 2019 13:02:56 +0000 Subject: [PATCH 03/14] Jenkins file for nightly regression tests (#5786) * Jenkins file for nightly regression tests * Use k8s instead of gke cluster --- .ci/dev/nightly-regression/Jenkinsfile | 65 ++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 .ci/dev/nightly-regression/Jenkinsfile diff --git a/.ci/dev/nightly-regression/Jenkinsfile b/.ci/dev/nightly-regression/Jenkinsfile new file mode 100644 index 0000000000..44ea33875b --- /dev/null +++ b/.ci/dev/nightly-regression/Jenkinsfile @@ -0,0 +1,65 @@ +@Library('existing-build-control') +import static com.r3.build.BuildControl.killAllExistingBuildsForJob + +killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) + +pipeline { + agent { label 'k8s' } + options { + timestamps() + overrideIndexTriggers(false) + buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7')) + } + triggers { + pollSCM ignorePostCommitHooks: true, scmpoll_spec: '@midnight' + } + + environment { + DOCKER_TAG_TO_USE = "${env.GIT_COMMIT.subSequence(0, 8)}" + EXECUTOR_NUMBER = "${env.EXECUTOR_NUMBER}" + BUILD_ID = "${env.BUILD_ID}-${env.JOB_NAME}" + ARTIFACTORY_CREDENTIALS = credentials('artifactory-credentials') + } + + stages { + stage('Generate Build Image') { + steps { + withCredentials([string(credentialsId: 'container_reg_passwd', variable: 'DOCKER_PUSH_PWD')]) { + sh "./gradlew " + + "-Dkubenetize=true " + + "-Ddocker.push.password=\"\${DOCKER_PUSH_PWD}\" " + + "-Ddocker.work.dir=\"/tmp/\${EXECUTOR_NUMBER}\" " + + "-Ddocker.build.tag=\"\${DOCKER_TAG_TO_USE}\"" + + " clean pushBuildImage --stacktrace" + } + sh "kubectl auth can-i get pods" + } + } + + stage('Regression Test') { + steps { + sh "./gradlew " + + "-DbuildId=\"\${BUILD_ID}\" " + + "-Dkubenetize=true " + + "-Ddocker.run.tag=\"\${DOCKER_TAG_TO_USE}\" " + + "-Dartifactory.username=\"\${ARTIFACTORY_CREDENTIALS_USR}\" " + + "-Dartifactory.password=\"\${ARTIFACTORY_CREDENTIALS_PSW}\" " + + "-Dgit.branch=\"\${GIT_BRANCH}\" " + + "-Dgit.target.branch=\"\${GIT_BRANCH}\" " + + " parallelRegressionTest --stacktrace" + } + } + } + + + post { + always { + archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false + junit testResults: '**/build/test-results-xml/**/*.xml', allowEmptyResults: true + } + cleanup { + deleteDir() /* clean up our workspace */ + } + } +} + From 35c58f1b9bd99e91e8f846987a126749e39e9fe8 Mon Sep 17 00:00:00 2001 From: carolynequinn <44175553+carolynequinn@users.noreply.github.com> Date: Thu, 19 Dec 2019 18:28:11 +0000 Subject: [PATCH 04/14] DOCS: Update UAT.md (#5602) --- docs/source/corda-network/UAT.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/source/corda-network/UAT.md b/docs/source/corda-network/UAT.md index deff5dd389..06eee44129 100644 --- a/docs/source/corda-network/UAT.md +++ b/docs/source/corda-network/UAT.md @@ -1,9 +1,9 @@ -Corda Network: UAT Environment +Corda Network: Pre-Production Environment ============================= -Corda Network UAT seeks to provide a test environment which is as close as possible to Corda Network in its make-up and operation. +Corda Network Pre-Production (or UAT) seeks to provide a test environment which is as close as possible to Corda Network in its make-up and operation. -For owners of tested CorDapps with a firm plan to take them into production, a bespoke UAT environment can be provided by R3. Here, such CorDapps can be further tested in the network configuration they will experience in production, utilising relevant Corda Network Services (including the Identity Operator, and trusted notaries). +For owners of tested CorDapps with a firm plan to take them into production, a bespoke Pre-Production environment is provided. Here, such CorDapps can be further tested in the network configuration they will experience in production, utilising relevant Corda Network Services (including the Identity Operator, Network Map and notaries). Corda UAT is not intended for customers' full test cycles, as it is expected that the bulk of CorDapp testing will occur in simpler network configurations run by the CorDapp provider, but is available for testing of functionally complete and tested CorDapps in realistic network settings to simulate the real-world business environment, including the production settings of network parameters, Corda network services and supported Corda versions. @@ -12,10 +12,10 @@ UAT is therefore more aligned to the testing of the operational characteristics More information about UAT will continue to be uploaded on this site or related sub-sites. -Joining the UAT environment +Joining the Pre-Production environment --------------------------- -*The below joining steps assume the potential participant is joining the UAT environment directly, and as such is not “sponsoring” or onboarding other participants. If this is the case, please contact your Corda representative for how to ‘sponsor’ end-participants onto UAT.* +*The below joining steps assume the potential participant is joining the Pre-Production environment directly, and as such is not “sponsoring” or onboarding other participants. If this is the case, please contact your Corda representative for how to ‘sponsor’ end-participants on.* **Pre-requisites:** @@ -29,12 +29,12 @@ Joining the UAT environment * Access to the appropriate environment has been agreed with your project representative with sufficient advance notice (4 weeks standard but may be longer if you have special service requirements) to ensure appropriate SLAs can be in place. Your project representative will be able to supply the booking template. **Note**: -Corda Network UAT is governed by an [independent Foundation](https://corda.network/governance/index.html). +Corda Network Pre-Production is governed by an [independent Foundation](https://corda.network/governance/index.html). -Steps to join UAT environment ------------------------------ +Steps to join the Pre-Production environment +------------------------------------------- -Steps to join are outlined on the [Corda Network UAT microsite](http://uat.network.r3.com/pages/joining/joining.html) +Steps to join are outlined on the [Corda Network microsite](https://corda.network/participation/index.html) - follow any specific instructions for 'Pre-Production'. -For further questions on this process, please contact us - preferably on the mailing list: https://groups.io/g/corda-network +For further questions on this process, please contact us - preferably on the mailing list: https://groups.io/g/corda-network or at info@corda.network From bd436e640d1db572389222ec1ddab691e5bf0beb Mon Sep 17 00:00:00 2001 From: Ramzi El-Yafi Date: Fri, 20 Dec 2019 10:11:26 +0000 Subject: [PATCH 05/14] Fix report generation against regression builds (#5818) --- .ci/dev/regression/Jenkinsfile | 41 +++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/.ci/dev/regression/Jenkinsfile b/.ci/dev/regression/Jenkinsfile index 9447598306..99aa0d1a64 100644 --- a/.ci/dev/regression/Jenkinsfile +++ b/.ci/dev/regression/Jenkinsfile @@ -52,7 +52,46 @@ pipeline { always { archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false junit '**/build/test-results-xml/**/*.xml' - allure includeProperties: false, jdk: '', results: [[path: '**/build/test-results-xml/**']] + + script { + try { + /* + * Copy all JUnit results files into a single top level directory. + * This is necessary to stop the allure plugin from hitting out + * of memory errors due to being passed many directories with + * long paths. + * + * File names are pre-pended with the pod number when + * copied to avoid collisions between files where the same test + * classes have run on multiple pods. + */ + sh label: 'Compact test results', + script: + '''#!/bin/bash + shopt -s globstar + rm -rf allure-input + mkdir allure-input + + for i in **/test-results-xml/**/test-runs/test-reports/** + do + [ -f $i ] && + cp $i allure-input/$(echo $i | sed -e \\ + \'s/.*test-results-xml\\/.*-\\(.*\\)\\/test-runs\\/.*\\/\\(.*\\)$/\\1\\-\\2/\') + done + + echo "Finished compacting JUnit results" + ''' + allure includeProperties: false, + jdk: '', + results: [[path: '**/allure-input']] + } catch (err) { + echo("Allure report generation failed: $err") + + if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) { + currentBuild.result = 'UNSTABLE' + } + } + } } cleanup { deleteDir() /* clean up our workspace */ From 38fb51b74b068d9e05aff0f524a8e166ed60f387 Mon Sep 17 00:00:00 2001 From: Ramzi El-Yafi Date: Tue, 14 Jan 2020 11:10:42 +0000 Subject: [PATCH 06/14] Strategic fix for allure report generation memory issues (#5845) --- .ci/dev/regression/Jenkinsfile | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/.ci/dev/regression/Jenkinsfile b/.ci/dev/regression/Jenkinsfile index 99aa0d1a64..a4017c5c86 100644 --- a/.ci/dev/regression/Jenkinsfile +++ b/.ci/dev/regression/Jenkinsfile @@ -65,22 +65,13 @@ pipeline { * copied to avoid collisions between files where the same test * classes have run on multiple pods. */ - sh label: 'Compact test results', - script: - '''#!/bin/bash - shopt -s globstar - rm -rf allure-input - mkdir allure-input - - for i in **/test-results-xml/**/test-runs/test-reports/** - do - [ -f $i ] && - cp $i allure-input/$(echo $i | sed -e \\ - \'s/.*test-results-xml\\/.*-\\(.*\\)\\/test-runs\\/.*\\/\\(.*\\)$/\\1\\-\\2/\') - done - - echo "Finished compacting JUnit results" - ''' + fileOperations([fileCopyOperation( + includes: '**/test-results-xml/**/test-runs/test-reports/**', + targetLocation: 'allure-input', + flattenFiles: true, + renameFiles: true, + sourceCaptureExpression: '.*test-results-xml/.*-([\\d]+)/.*/([^/]+)$', + targetNameExpression: '$1-$2')]) allure includeProperties: false, jdk: '', results: [[path: '**/allure-input']] From 86347abe798b1621402f454f53cabece79cb4c1c Mon Sep 17 00:00:00 2001 From: Ramzi El-Yafi Date: Tue, 14 Jan 2020 11:52:05 +0000 Subject: [PATCH 07/14] TM-137 Daily email report for release branch regression tests (#5797) * Daily email report for release branch regression tests * Manual merge fix --- .ci/dev/regression/Jenkinsfile | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/.ci/dev/regression/Jenkinsfile b/.ci/dev/regression/Jenkinsfile index a4017c5c86..4c0182e0b1 100644 --- a/.ci/dev/regression/Jenkinsfile +++ b/.ci/dev/regression/Jenkinsfile @@ -52,6 +52,39 @@ pipeline { always { archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false junit '**/build/test-results-xml/**/*.xml' + allure includeProperties: false, jdk: '', results: [[path: '**/build/test-results-xml/**']] + + script + { + // We want to send a summary email, but want to limit to once per day. + // Comparing the dates of the previous and current builds achieves this, + // i.e. we will only send an email for the first build on a given day. + def prevBuildDate = new Date( + currentBuild?.previousBuild.timeInMillis ?: 0).clearTime() + def currentBuildDate = new Date( + currentBuild.timeInMillis).clearTime() + + if (prevBuildDate != currentBuildDate) { + def statusSymbol = '\u2753' + switch(currentBuild.result) { + case 'SUCCESS': + statusSymbol = '\u2705' + break; + case 'UNSTABLE': + case 'FAILURE': + statusSymbol = '\u274c' + break; + default: + break; + } + + echo('First build for this date, sending summary email') + emailext to: '$DEFAULT_RECIPIENTS', + subject: "$statusSymbol" + '$BRANCH_NAME regression tests - $BUILD_STATUS', + mimeType: 'text/html', + body: '${SCRIPT, template="groovy-html.template"}' + } else { + echo('Already sent summary email today, suppressing') script { try { From b195e4f703ab6a6fa245e1e7d1da39c09c226b58 Mon Sep 17 00:00:00 2001 From: Ramzi El-Yafi Date: Thu, 16 Jan 2020 11:55:01 +0000 Subject: [PATCH 08/14] Manual fix of mergr for PR-5797 (#5856) --- .ci/dev/regression/Jenkinsfile | 67 +++++++++++++++++----------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/.ci/dev/regression/Jenkinsfile b/.ci/dev/regression/Jenkinsfile index 4c0182e0b1..942b864291 100644 --- a/.ci/dev/regression/Jenkinsfile +++ b/.ci/dev/regression/Jenkinsfile @@ -52,39 +52,6 @@ pipeline { always { archiveArtifacts artifacts: '**/pod-logs/**/*.log', fingerprint: false junit '**/build/test-results-xml/**/*.xml' - allure includeProperties: false, jdk: '', results: [[path: '**/build/test-results-xml/**']] - - script - { - // We want to send a summary email, but want to limit to once per day. - // Comparing the dates of the previous and current builds achieves this, - // i.e. we will only send an email for the first build on a given day. - def prevBuildDate = new Date( - currentBuild?.previousBuild.timeInMillis ?: 0).clearTime() - def currentBuildDate = new Date( - currentBuild.timeInMillis).clearTime() - - if (prevBuildDate != currentBuildDate) { - def statusSymbol = '\u2753' - switch(currentBuild.result) { - case 'SUCCESS': - statusSymbol = '\u2705' - break; - case 'UNSTABLE': - case 'FAILURE': - statusSymbol = '\u274c' - break; - default: - break; - } - - echo('First build for this date, sending summary email') - emailext to: '$DEFAULT_RECIPIENTS', - subject: "$statusSymbol" + '$BRANCH_NAME regression tests - $BUILD_STATUS', - mimeType: 'text/html', - body: '${SCRIPT, template="groovy-html.template"}' - } else { - echo('Already sent summary email today, suppressing') script { try { @@ -116,6 +83,40 @@ pipeline { } } } + + script + { + // We want to send a summary email, but want to limit to once per day. + // Comparing the dates of the previous and current builds achieves this, + // i.e. we will only send an email for the first build on a given day. + def prevBuildDate = new Date( + currentBuild?.previousBuild.timeInMillis ?: 0).clearTime() + def currentBuildDate = new Date( + currentBuild.timeInMillis).clearTime() + + if (prevBuildDate != currentBuildDate) { + def statusSymbol = '\u2753' + switch(currentBuild.result) { + case 'SUCCESS': + statusSymbol = '\u2705' + break; + case 'UNSTABLE': + case 'FAILURE': + statusSymbol = '\u274c' + break; + default: + break; + } + + echo('First build for this date, sending summary email') + emailext to: '$DEFAULT_RECIPIENTS', + subject: "$statusSymbol" + '$BRANCH_NAME regression tests - $BUILD_STATUS', + mimeType: 'text/html', + body: '${SCRIPT, template="groovy-html.template"}' + } else { + echo('Already sent summary email today, suppressing') + } + } } cleanup { deleteDir() /* clean up our workspace */ From f02bfc134b51981221a8ab47e965010e66e34a4e Mon Sep 17 00:00:00 2001 From: Christian Sailer Date: Thu, 23 Jan 2020 16:40:38 +0000 Subject: [PATCH 09/14] NOTICK disable max line length check in detekt. (#5882) --- detekt-config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detekt-config.yml b/detekt-config.yml index 0fce7da45a..e1e15b2f99 100644 --- a/detekt-config.yml +++ b/detekt-config.yml @@ -172,7 +172,7 @@ style: ignoreNamedArgument: true ignoreEnums: true MaxLineLength: - active: true + active: false excludes: "**/buildSrc/**" maxLineLength: 140 excludePackageStatements: true From ffe75db464fc942d7074dc2234f0d738fd1ebfdd Mon Sep 17 00:00:00 2001 From: Ramzi El-Yafi Date: Fri, 24 Jan 2020 10:39:23 +0000 Subject: [PATCH 10/14] TM-170 Generic on-demand-test configuration (#5863) * Generic on-demand test configuration * Rename library "magic" string from existing-build-control to corda-shared-build-pipeline-steps --- .ci/dev/integration/Jenkinsfile | 2 +- .ci/dev/nightly-regression/Jenkinsfile | 2 +- .ci/dev/on-demand-tests/Jenkinsfile | 6 ++++++ .ci/dev/on-demand-tests/commentMappings.yml | 4 ++++ .ci/dev/regression/Jenkinsfile | 2 +- .ci/dev/smoke/Jenkinsfile | 2 +- .ci/dev/unit/Jenkinsfile | 2 +- Jenkinsfile | 4 ++-- 8 files changed, 17 insertions(+), 7 deletions(-) create mode 100644 .ci/dev/on-demand-tests/Jenkinsfile create mode 100644 .ci/dev/on-demand-tests/commentMappings.yml diff --git a/.ci/dev/integration/Jenkinsfile b/.ci/dev/integration/Jenkinsfile index ca32e752b4..de89aec99e 100644 --- a/.ci/dev/integration/Jenkinsfile +++ b/.ci/dev/integration/Jenkinsfile @@ -1,5 +1,5 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob -@Library('existing-build-control') +@Library('corda-shared-build-pipeline-steps') import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) diff --git a/.ci/dev/nightly-regression/Jenkinsfile b/.ci/dev/nightly-regression/Jenkinsfile index 44ea33875b..db140bece0 100644 --- a/.ci/dev/nightly-regression/Jenkinsfile +++ b/.ci/dev/nightly-regression/Jenkinsfile @@ -1,4 +1,4 @@ -@Library('existing-build-control') +@Library('corda-shared-build-pipeline-steps') import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) diff --git a/.ci/dev/on-demand-tests/Jenkinsfile b/.ci/dev/on-demand-tests/Jenkinsfile new file mode 100644 index 0000000000..25127ef133 --- /dev/null +++ b/.ci/dev/on-demand-tests/Jenkinsfile @@ -0,0 +1,6 @@ +@Library('corda-shared-build-pipeline-steps') _ +import static com.r3.build.BuildControl.killAllExistingBuildsForJob + +killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) + +onDemandTestPipeline('k8s', '.ci/dev/on-demand-tests/commentMappings.yml') diff --git a/.ci/dev/on-demand-tests/commentMappings.yml b/.ci/dev/on-demand-tests/commentMappings.yml new file mode 100644 index 0000000000..3b68c8366c --- /dev/null +++ b/.ci/dev/on-demand-tests/commentMappings.yml @@ -0,0 +1,4 @@ +integration: { allParallelIntegrationTest } +pr-merge: { parallelRegressionTest } +smoke: { allParallelSmokeTest } +unit: { allParallelUnitTest } diff --git a/.ci/dev/regression/Jenkinsfile b/.ci/dev/regression/Jenkinsfile index 942b864291..d08e37967c 100644 --- a/.ci/dev/regression/Jenkinsfile +++ b/.ci/dev/regression/Jenkinsfile @@ -1,4 +1,4 @@ -@Library('existing-build-control') +@Library('corda-shared-build-pipeline-steps') import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) diff --git a/.ci/dev/smoke/Jenkinsfile b/.ci/dev/smoke/Jenkinsfile index 16ae19b53d..f24c97a898 100644 --- a/.ci/dev/smoke/Jenkinsfile +++ b/.ci/dev/smoke/Jenkinsfile @@ -1,4 +1,4 @@ -@Library('existing-build-control') +@Library('corda-shared-build-pipeline-steps') import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) diff --git a/.ci/dev/unit/Jenkinsfile b/.ci/dev/unit/Jenkinsfile index 9bd4c4243b..736e4bf235 100644 --- a/.ci/dev/unit/Jenkinsfile +++ b/.ci/dev/unit/Jenkinsfile @@ -1,5 +1,5 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob -@Library('existing-build-control') +@Library('corda-shared-build-pipeline-steps') import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) diff --git a/Jenkinsfile b/Jenkinsfile index dcd0522239..0c5b0f188f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,5 +1,5 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob -@Library('existing-build-control') +@Library('corda-shared-build-pipeline-steps') import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) @@ -71,4 +71,4 @@ pipeline { deleteDir() /* clean up our workspace */ } } -} \ No newline at end of file +} From 8f7367add7c5ec5b05650e35e8bfd1b957736369 Mon Sep 17 00:00:00 2001 From: Ryan Fowler Date: Thu, 19 Dec 2019 10:52:34 +0000 Subject: [PATCH 11/14] CORDA-3509: add documentation about Corda features and their corresponding platformVersion and network minimumPlatformVersion --- docs/source/api-contract-constraints.rst | 3 + docs/source/features-versions.rst | 85 ++++++++++++++++++++++++ docs/source/network-map.rst | 4 ++ 3 files changed, 92 insertions(+) create mode 100644 docs/source/features-versions.rst diff --git a/docs/source/api-contract-constraints.rst b/docs/source/api-contract-constraints.rst index ad00dbc65f..f019212989 100644 --- a/docs/source/api-contract-constraints.rst +++ b/docs/source/api-contract-constraints.rst @@ -12,6 +12,9 @@ API: Contract Constraints .. note:: Before reading this page, you should be familiar with the key concepts of :doc:`key-concepts-contracts`. +.. note:: As of Corda |corda_version| the `minimumPlatformVersion` required to use these features is 4 + (see :ref:`Network Parameters ` and :doc:`features-versions` for more details). + .. contents:: Reasons for Contract Constraints diff --git a/docs/source/features-versions.rst b/docs/source/features-versions.rst new file mode 100644 index 0000000000..f103bb7a9d --- /dev/null +++ b/docs/source/features-versions.rst @@ -0,0 +1,85 @@ +Corda Features to Versions +========================== + +New versions of Corda introduce new features. These fall into one of three categories which have subtle but important implications for +node owners, application developers and network operators. + +The first set are changes that have no impact on application developers or the Corda network protocol. An example would be support for +a new HSM or database system, for example, and which are of interest only to a node's operator. + +The second set are new or changed APIs, which are of interest to CorDapp developers. When a release of Corda ships such features, the +Platform Version of that node is incremented so that a CorDapp that relies on such a new or changed feature can detect this (eg to +prevent it from running on a node without the feature or to trigger an alternative optimised codepath if the feature is present). The +app developer should set the CorDapp's minimumPlatformVersion parameter to signal the minimum Platform Version against which the app +can run or has been tested. If the application has also been tested against a greater platform version and can exploit it if present, +the node can also set the targetPlatformVersion field. + +The third set of changes are those which could affect the operation of a Corda network. Examples would include a change to the +serialisation format or flow/wire protocol, or introduction of a new transaction component. These are changes to the core data model and +these features have the property that it is not safe for any node or application to take advantage of until all nodes on the network +are capable of understanding them. Such features are thus only enabled in a node if the network to which it is connected has published +a minimumPlatformVersion in its network parameters that is greater than or equal to the Corda Platform Version that introduced the +feature. For example, Corda 4.0 nodes, which implement Corda Platform Version 4, can only take advantage of the Corda Reference States +feature when connected to a network with mPV 4. + +Generally the rules work this way: + +- IF (CorDapp.mPV > node.PV) THEN + prevent the CorDapp from running (this signals that it cannot run without the new feature). +- IF (CorDapp.mPV <= node.PV AND CorDapp.targetPV < node.PV) THEN + this means the node is ahead of the CorDapp so it might choose to trigger some code paths that emulate some old behaviour that the + CorDapp expected on that version. +- IF (CorDapp.mPV <= node.PV AND CorDapp.targetPV == node.PV) THEN + just use the new mechanism because the CorDapp and the node are perfectly aligned. +- IF (CorDapp.mPV <= node.PV AND CorDapp.targetPV > node.PV) THEN + this means that the CorDapp is ahead of the running node, but it must have some alternative runtime code paths built in to be able + to simulate the new behaviour using old apis. + +.. list-table:: Corda Features + :header-rows: 1 + + * - Feature + - Corda Platform Version (PV) + - Min Network Platform Version (network mPV) + - Introduced in OS version + - Introduced in Enterprise version + * - Observer Nodes + - 2 + - 2 + - 2.0 + - n/a + * - Corda Serialization Framework + - 3 + - 3 + - 3.0 + - 3.0 + * - Hash Constraints + - 1 + - 1 + - 1.0 + - 1.0 + * - Whitelist Constraints + - 3 + - 3 + - 3.0 + - 3.0 + * - Inline Finality Flow + - 4 + - 3 + - 4.0 + - 4.0 + * - Reference States + - 4 + - 4 + - 4.0 + - 4.0 + * - Signature Constraints + - 4 + - 4 + - 4.0 + - 4.0 + * - Underlying Support for Accounts + - 5 + - 4 + - 4.3 + - 4.3 diff --git a/docs/source/network-map.rst b/docs/source/network-map.rst index fba6220071..9c7ffab830 100644 --- a/docs/source/network-map.rst +++ b/docs/source/network-map.rst @@ -97,6 +97,8 @@ cluster generated like this can be sized for the maximum size you may need, and More information can be found in :doc:`network-bootstrapper`. +.. _network-parameters: + Network parameters ------------------ @@ -152,6 +154,8 @@ The current set of network parameters: Encountering an owned contract in a JAR that is not signed by the rightful owner is most likely a sign of malicious behaviour, and should be reported. The transaction verification logic will throw an exception when this happens. +.. note:: To determine which `minimumPlatformVersion` a zone must mandate in order to permit all the features of Corda |corda_version| see :doc:`features-versions` + More parameters will be added in future releases to regulate things like allowed port numbers, whether or not IPv6 connectivity is required for zone members, required cryptographic algorithms and roll-out schedules (e.g. for moving to post quantum cryptography), parameters related to SGX and so on. From 338671e6b2de95158cd425cf372dda2c207bf42d Mon Sep 17 00:00:00 2001 From: Ryan Fowler Date: Mon, 27 Jan 2020 09:42:28 +0000 Subject: [PATCH 12/14] CORDA-3509: add documentation about Corda features and their corresponding platformVersion and network minimumPlatformVersion --- docs/source/features-versions.rst | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/docs/source/features-versions.rst b/docs/source/features-versions.rst index f103bb7a9d..9ed8801926 100644 --- a/docs/source/features-versions.rst +++ b/docs/source/features-versions.rst @@ -22,18 +22,13 @@ a minimumPlatformVersion in its network parameters that is greater than or equal feature. For example, Corda 4.0 nodes, which implement Corda Platform Version 4, can only take advantage of the Corda Reference States feature when connected to a network with mPV 4. -Generally the rules work this way: - -- IF (CorDapp.mPV > node.PV) THEN - prevent the CorDapp from running (this signals that it cannot run without the new feature). -- IF (CorDapp.mPV <= node.PV AND CorDapp.targetPV < node.PV) THEN - this means the node is ahead of the CorDapp so it might choose to trigger some code paths that emulate some old behaviour that the - CorDapp expected on that version. -- IF (CorDapp.mPV <= node.PV AND CorDapp.targetPV == node.PV) THEN - just use the new mechanism because the CorDapp and the node are perfectly aligned. -- IF (CorDapp.mPV <= node.PV AND CorDapp.targetPV > node.PV) THEN - this means that the CorDapp is ahead of the running node, but it must have some alternative runtime code paths built in to be able - to simulate the new behaviour using old apis. +If there is a Platform Version below which your application will not run or is not supported, then signal that with the application's +`CorDapp.mPV` field. This will prevent older nodes from running your app. Nodes which support newer Platform Versions may also use this +field to trigger code paths that emulate behaviours that were in force on that older Platform Version to maximise compatibility. However, +if you have tested your app against newer versions of Corda and know your node can take advantage of the new Platform Version behaviours +if present, you can signal this by using `CorDapp.targetPV` to declare the latest Platform Version against which the app has been tested +and is known to work. In this way, it is possible to ship CorDapps that can both run on all nodes supporting some minimum Platform Version +of Corda as well as opt in to newer features should they happen to be available on any given node. .. list-table:: Corda Features :header-rows: 1 From 9ccbfe178c267a64da1732fb627b9dabcbefb044 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Waldemar=20=C5=BBurowski?= <45210402+wzur-r3@users.noreply.github.com> Date: Tue, 11 Feb 2020 08:40:22 +0000 Subject: [PATCH 13/14] Updating Artifactory URLs for Corda OS 4.3 (#5930) * Switching Artifactory URLs from obsolete ci-artifactory to software.r3.com --- build.gradle | 4 ++-- create-jdk8u/build.gradle | 2 +- docker/src/bash/example-mini-network.sh | 8 ++++---- docs/source/network-builder.rst | 4 ++-- docs/source/node-upgrade-notes.rst | 2 +- docs/source/testnet-explorer-corda.rst | 6 +++--- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/build.gradle b/build.gradle index 93001ccf3b..6e49f4f5f1 100644 --- a/build.gradle +++ b/build.gradle @@ -107,7 +107,7 @@ buildscript { ext.artifactory_plugin_version = constants.getProperty('artifactoryPluginVersion') ext.hikari_version = '3.3.1' ext.liquibase_version = '3.6.3' - ext.artifactory_contextUrl = 'https://ci-artifactory.corda.r3cev.com/artifactory' + ext.artifactory_contextUrl = 'https://software.r3.com/artifactory' ext.snake_yaml_version = constants.getProperty('snakeYamlVersion') ext.docker_compose_rule_version = '0.35.0' ext.selenium_version = '3.141.59' @@ -153,7 +153,7 @@ buildscript { url 'https://kotlin.bintray.com/kotlinx' } maven { - url "https://ci-artifactory.corda.r3cev.com/artifactory/corda-dependencies-dev" + url "$artifactory_contextUrl/corda-dependencies-dev" } maven { url "$artifactory_contextUrl/corda-releases" diff --git a/create-jdk8u/build.gradle b/create-jdk8u/build.gradle index 7c03dcbe3e..809bfdadc5 100644 --- a/create-jdk8u/build.gradle +++ b/create-jdk8u/build.gradle @@ -3,7 +3,7 @@ buildscript { file("../constants.properties").withInputStream { constants.load(it) } ext { - artifactory_contextUrl = 'https://ci-artifactory.corda.r3cev.com/artifactory' + artifactory_contextUrl = 'https://software.r3.com/artifactory' artifactory_plugin_version = constants.getProperty('artifactoryPluginVersion') proguard_version = constants.getProperty("proguardVersion") } diff --git a/docker/src/bash/example-mini-network.sh b/docker/src/bash/example-mini-network.sh index 33da0fbf25..569ea52e25 100755 --- a/docker/src/bash/example-mini-network.sh +++ b/docker/src/bash/example-mini-network.sh @@ -7,9 +7,9 @@ DOCKER_IMAGE_VERSION="corda-zulu-4.3-SNAPSHOT" mkdir cordapps rm -f cordapps/* -wget -O cordapps/finance-contracts.jar https://ci-artifactory.corda.r3cev.com/artifactory/list/corda-dev/net/corda/corda-finance-contracts/${CORDAPP_VERSION}/corda-finance-contracts-${CORDAPP_VERSION}.jar -wget -O cordapps/finance-workflows.jar https://ci-artifactory.corda.r3cev.com/artifactory/list/corda-dev/net/corda/corda-finance-workflows/${CORDAPP_VERSION}/corda-finance-workflows-${CORDAPP_VERSION}.jar -wget -O cordapps/confidential-identities.jar https://ci-artifactory.corda.r3cev.com/artifactory/list/corda-dev/net/corda/corda-confidential-identities/${CORDAPP_VERSION}/corda-confidential-identities-${CORDAPP_VERSION}.jar +wget -O cordapps/finance-contracts.jar https://software.r3.com/artifactory/list/corda-dev/net/corda/corda-finance-contracts/${CORDAPP_VERSION}/corda-finance-contracts-${CORDAPP_VERSION}.jar +wget -O cordapps/finance-workflows.jar https://software.r3.com/artifactory/list/corda-dev/net/corda/corda-finance-workflows/${CORDAPP_VERSION}/corda-finance-workflows-${CORDAPP_VERSION}.jar +wget -O cordapps/confidential-identities.jar https://software.r3.com/artifactory/list/corda-dev/net/corda/corda-confidential-identities/${CORDAPP_VERSION}/corda-confidential-identities-${CORDAPP_VERSION}.jar rm keystore @@ -83,4 +83,4 @@ do --name ${NODE} \ --network="${NETWORK_NAME}" \ corda/${DOCKER_IMAGE_VERSION}:latest config-generator --generic -done \ No newline at end of file +done diff --git a/docs/source/network-builder.rst b/docs/source/network-builder.rst index 1629920948..168888ee21 100644 --- a/docs/source/network-builder.rst +++ b/docs/source/network-builder.rst @@ -16,7 +16,7 @@ Unlike the official image, a `node.conf` file and CorDapps are embedded into the More backends may be added in future. The tool is open source, so contributions to add more destinations for the containers are welcome! -`Download the Corda Network Builder `_. +`Download the Corda Network Builder `_. .. _pre-requisites: @@ -195,4 +195,4 @@ node has been started correctly, run the following in the previously connected S Shutting down the nodes ----------------------- -Run ``docker kill $(docker ps -q)`` to kill all running Docker processes. \ No newline at end of file +Run ``docker kill $(docker ps -q)`` to kill all running Docker processes. diff --git a/docs/source/node-upgrade-notes.rst b/docs/source/node-upgrade-notes.rst index d6c0da05b4..8830eb9bef 100644 --- a/docs/source/node-upgrade-notes.rst +++ b/docs/source/node-upgrade-notes.rst @@ -50,7 +50,7 @@ for further information. Step 4. Replace ``corda.jar`` with the new version -------------------------------------------------- -Download the latest version of Corda from `our Artifactory site `_. +Download the latest version of Corda from `our Artifactory site `_. Make sure it's available on your path, and that you've read the :doc:`release-notes`, in particular to discover what version of Java this node requires. diff --git a/docs/source/testnet-explorer-corda.rst b/docs/source/testnet-explorer-corda.rst index e0fdbc2fd4..e62452253f 100644 --- a/docs/source/testnet-explorer-corda.rst +++ b/docs/source/testnet-explorer-corda.rst @@ -34,8 +34,8 @@ couple of resources. .. code-block:: bash - wget https://ci-artifactory.corda.r3cev.com/artifactory/corda-releases/net/corda/corda-finance-contracts/|corda_version|/corda-finance-contracts-|corda_version|.jar - wget https://ci-artifactory.corda.r3cev.com/artifactory/corda-releases/net/corda/corda-finance-workflows/|corda_version|/corda-finance-workflows-|corda_version|.jar + wget https://software.r3.com/artifactory/corda-releases/net/corda/corda-finance-contracts/|corda_version|/corda-finance-contracts-|corda_version|.jar + wget https://software.r3.com/artifactory/corda-releases/net/corda/corda-finance-workflows/|corda_version|/corda-finance-workflows-|corda_version|.jar This is required to run some flows to check your connections, and to issue/transfer cash to counterparties. Copy it to the Corda installation location: @@ -70,7 +70,7 @@ couple of resources. .. code:: bash - http://ci-artifactory.corda.r3cev.com/artifactory/corda-releases/net/corda/corda-tools-explorer/|corda_version|-corda/corda-tools-explorer-|corda_version|-corda.jar + https://software.r3.com/artifactory/corda-releases/net/corda/corda-tools-explorer/|corda_version|-corda/corda-tools-explorer-|corda_version|-corda.jar .. warning:: This Node Explorer is incompatible with the Corda Enterprise distribution and vice versa as they currently use different serialisation schemes (Kryo vs AMQP). From 42eca48a02934cb2a05eae2566a58ac5afa0545d Mon Sep 17 00:00:00 2001 From: Razvan Codreanu <52859362+Schife@users.noreply.github.com> Date: Wed, 12 Feb 2020 13:58:30 +0000 Subject: [PATCH 14/14] TM-197 Build stability changes (#5947) * TM-197 switching 4.3 to use local k8s instances and also make the maximum duration of builds 3 hours, fix 1 test and ignore 2 flaky ones * update to use local-k8s version of the plugin Co-authored-by: Stefano Franz --- .ci/dev/integration/Jenkinsfile | 7 +++++-- .ci/dev/nightly-regression/Jenkinsfile | 3 ++- .ci/dev/on-demand-tests/Jenkinsfile | 2 +- .ci/dev/regression/Jenkinsfile | 3 ++- .ci/dev/smoke/Jenkinsfile | 9 ++++++--- .ci/dev/unit/Jenkinsfile | 7 +++++-- Jenkinsfile | 7 +++++-- build.gradle | 16 ++++++++-------- .../coretests/crypto/X509NameConstraintsTest.kt | 2 ++ .../distributed/DistributedServiceTests.kt | 3 +++ 10 files changed, 39 insertions(+), 20 deletions(-) diff --git a/.ci/dev/integration/Jenkinsfile b/.ci/dev/integration/Jenkinsfile index de89aec99e..1e5d032e8d 100644 --- a/.ci/dev/integration/Jenkinsfile +++ b/.ci/dev/integration/Jenkinsfile @@ -5,8 +5,11 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) pipeline { - agent { label 'k8s' } - options { timestamps() } + agent { label 'local-k8s' } + options { + timestamps() + timeout(time: 3, unit: 'HOURS') + } environment { DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}" diff --git a/.ci/dev/nightly-regression/Jenkinsfile b/.ci/dev/nightly-regression/Jenkinsfile index db140bece0..de26a41c90 100644 --- a/.ci/dev/nightly-regression/Jenkinsfile +++ b/.ci/dev/nightly-regression/Jenkinsfile @@ -4,11 +4,12 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) pipeline { - agent { label 'k8s' } + agent { label 'local-k8s' } options { timestamps() overrideIndexTriggers(false) buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7')) + timeout(time: 3, unit: 'HOURS') } triggers { pollSCM ignorePostCommitHooks: true, scmpoll_spec: '@midnight' diff --git a/.ci/dev/on-demand-tests/Jenkinsfile b/.ci/dev/on-demand-tests/Jenkinsfile index 25127ef133..f59d3d67d0 100644 --- a/.ci/dev/on-demand-tests/Jenkinsfile +++ b/.ci/dev/on-demand-tests/Jenkinsfile @@ -3,4 +3,4 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) -onDemandTestPipeline('k8s', '.ci/dev/on-demand-tests/commentMappings.yml') +onDemandTestPipeline('local-k8s', '.ci/dev/on-demand-tests/commentMappings.yml') diff --git a/.ci/dev/regression/Jenkinsfile b/.ci/dev/regression/Jenkinsfile index d08e37967c..ed550bd401 100644 --- a/.ci/dev/regression/Jenkinsfile +++ b/.ci/dev/regression/Jenkinsfile @@ -4,10 +4,11 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) pipeline { - agent { label 'gke' } + agent { label 'local-k8s' } options { timestamps() buildDiscarder(logRotator(daysToKeepStr: '7', artifactDaysToKeepStr: '7')) + timeout(time: 3, unit: 'HOURS') } environment { diff --git a/.ci/dev/smoke/Jenkinsfile b/.ci/dev/smoke/Jenkinsfile index f24c97a898..05aec41e59 100644 --- a/.ci/dev/smoke/Jenkinsfile +++ b/.ci/dev/smoke/Jenkinsfile @@ -4,9 +4,12 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) pipeline { - agent { label 'k8s' } - options { timestamps() - overrideIndexTriggers(false) } + agent { label 'local-k8s' } + options { + timestamps() + overrideIndexTriggers(false) + timeout(time: 3, unit: 'HOURS') + } triggers { issueCommentTrigger('.*smoke tests.*') diff --git a/.ci/dev/unit/Jenkinsfile b/.ci/dev/unit/Jenkinsfile index 736e4bf235..98f43b4428 100644 --- a/.ci/dev/unit/Jenkinsfile +++ b/.ci/dev/unit/Jenkinsfile @@ -5,8 +5,11 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) pipeline { - agent { label 'k8s' } - options { timestamps() } + agent { label 'local-k8s' } + options { + timestamps() + timeout(time: 3, unit: 'HOURS') + } environment { DOCKER_TAG_TO_USE = "${UUID.randomUUID().toString().toLowerCase().subSequence(0, 12)}" diff --git a/Jenkinsfile b/Jenkinsfile index 0c5b0f188f..b81f50ed61 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,8 +5,11 @@ import static com.r3.build.BuildControl.killAllExistingBuildsForJob killAllExistingBuildsForJob(env.JOB_NAME, env.BUILD_NUMBER.toInteger()) pipeline { - agent { label 'k8s' } - options { timestamps() } + agent { label 'local-k8s' } + options { + timestamps() + timeout(time: 3, unit: 'HOURS') + } environment { DOCKER_TAG_TO_USE = "${env.GIT_COMMIT.subSequence(0, 8)}" diff --git a/build.gradle b/build.gradle index 6e49f4f5f1..e1b3594f59 100644 --- a/build.gradle +++ b/build.gradle @@ -179,7 +179,7 @@ buildscript { // Capsule gradle plugin forked and maintained locally to support Gradle 5.x // See https://github.com/corda/gradle-capsule-plugin classpath "us.kirchmeier:gradle-capsule-plugin:1.0.4_r3" - classpath group: "com.r3.testing", name: "gradle-distributed-testing-plugin", version: "1.2-SNAPSHOT", changing: true + classpath group: "com.r3.testing", name: "gradle-distributed-testing-plugin", version: "1.2-LOCAL-K8S-SHARED-CACHE-SNAPSHOT", changing: true classpath "com.bmuschko:gradle-docker-plugin:5.0.0" } } @@ -620,7 +620,7 @@ task allParallelIntegrationTest(type: ParallelTestGroup) { testGroups "integrationTest" numberOfShards 10 streamOutput false - coresPerFork 5 + coresPerFork 2 memoryInGbPerFork 12 distribute DistributeTestsBy.METHOD nodeTaints "big" @@ -630,7 +630,7 @@ task allParallelUnitTest(type: ParallelTestGroup) { testGroups "test" numberOfShards 10 streamOutput false - coresPerFork 3 + coresPerFork 2 memoryInGbPerFork 12 distribute DistributeTestsBy.CLASS nodeTaints "small" @@ -639,25 +639,25 @@ task allParallelUnitAndIntegrationTest(type: ParallelTestGroup) { testGroups "test", "integrationTest" numberOfShards 15 streamOutput false - coresPerFork 6 + coresPerFork 2 memoryInGbPerFork 10 distribute DistributeTestsBy.METHOD nodeTaints "big" } task parallelRegressionTest(type: ParallelTestGroup) { testGroups "test", "integrationTest", "slowIntegrationTest", "smokeTest" - numberOfShards 6 + numberOfShards 10 streamOutput false - coresPerFork 6 + coresPerFork 2 memoryInGbPerFork 10 distribute DistributeTestsBy.METHOD nodeTaints "big" } task allParallelSmokeTest(type: ParallelTestGroup) { testGroups "slowIntegrationTest", "smokeTest" - numberOfShards 4 + numberOfShards 10 streamOutput false - coresPerFork 6 + coresPerFork 2 memoryInGbPerFork 10 distribute DistributeTestsBy.CLASS nodeTaints "big" diff --git a/core-tests/src/test/kotlin/net/corda/coretests/crypto/X509NameConstraintsTest.kt b/core-tests/src/test/kotlin/net/corda/coretests/crypto/X509NameConstraintsTest.kt index cb285c5aff..c355ef8567 100644 --- a/core-tests/src/test/kotlin/net/corda/coretests/crypto/X509NameConstraintsTest.kt +++ b/core-tests/src/test/kotlin/net/corda/coretests/crypto/X509NameConstraintsTest.kt @@ -12,6 +12,7 @@ import org.bouncycastle.asn1.x509.GeneralSubtree import org.bouncycastle.asn1.x509.NameConstraints import org.bouncycastle.jce.provider.BouncyCastleProvider import org.junit.Test +import java.security.Security import java.security.UnrecoverableKeyException import java.security.cert.CertPathValidator import java.security.cert.CertPathValidatorException @@ -94,6 +95,7 @@ class X509NameConstraintsTest { @Test fun `x500 name with correct cn and extra attribute`() { + Security.addProvider(BouncyCastleProvider()) val acceptableNames = listOf("CN=Bank A TLS, UID=", "O=Bank A") .map { GeneralSubtree(GeneralName(X500Name(it))) }.toTypedArray() diff --git a/node/src/integration-test-slow/kotlin/net/corda/node/services/distributed/DistributedServiceTests.kt b/node/src/integration-test-slow/kotlin/net/corda/node/services/distributed/DistributedServiceTests.kt index 82eaec0d59..c028d710ce 100644 --- a/node/src/integration-test-slow/kotlin/net/corda/node/services/distributed/DistributedServiceTests.kt +++ b/node/src/integration-test-slow/kotlin/net/corda/node/services/distributed/DistributedServiceTests.kt @@ -25,6 +25,7 @@ import net.corda.testing.node.internal.DummyClusterSpec import net.corda.testing.node.internal.FINANCE_CORDAPPS import net.corda.testing.node.internal.cordappWithPackages import org.assertj.core.api.Assertions.assertThat +import org.junit.Ignore import org.junit.Test import rx.Observable import java.util.* @@ -81,6 +82,7 @@ class DistributedServiceTests { } // TODO This should be in RaftNotaryServiceTests + @Ignore @Test fun `cluster survives if a notary is killed`() { setup { @@ -119,6 +121,7 @@ class DistributedServiceTests { // TODO Use a dummy distributed service rather than a Raft Notary Service as this test is only about Artemis' ability // to handle distributed services + @Ignore @Test fun `requests are distributed evenly amongst the nodes`() { setup {