From 01c1e4bc17446748d954ad0e08bf703139223910 Mon Sep 17 00:00:00 2001 From: Stefano Franz Date: Fri, 15 Nov 2019 15:14:01 +0000 Subject: [PATCH] attempt to make preAllocation job deletion more certain (#5708) fix issue with deallocate during build phase fix test report URL --- .ci/dev/smoke/Jenkinsfile | 4 +-- .../corda/testing/DistributedTesting.groovy | 4 ++- .../net/corda/testing/PodAllocator.java | 29 ++++++++++--------- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/.ci/dev/smoke/Jenkinsfile b/.ci/dev/smoke/Jenkinsfile index 0c93fd2b79..d5332b2508 100644 --- a/.ci/dev/smoke/Jenkinsfile +++ b/.ci/dev/smoke/Jenkinsfile @@ -60,7 +60,7 @@ pipeline { pullRequest.createStatus(status: 'success', context: 'continuous-integration/jenkins/pr-merge/smokeTest', description: 'Smoke Tests Passed', - targetUrl: "${env.JOB_URL}testResults") + targetUrl: "${env.BUILD_URL}testResults") } } } @@ -71,7 +71,7 @@ pipeline { pullRequest.createStatus(status: 'failure', context: 'continuous-integration/jenkins/pr-merge/smokeTest', description: 'Smoke Tests Failed', - targetUrl: "${env.JOB_URL}testResults") + targetUrl: "${env.BUILD_URL}testResults") } } } diff --git a/buildSrc/src/main/groovy/net/corda/testing/DistributedTesting.groovy b/buildSrc/src/main/groovy/net/corda/testing/DistributedTesting.groovy index 2ce623f4a8..47a5032c29 100644 --- a/buildSrc/src/main/groovy/net/corda/testing/DistributedTesting.groovy +++ b/buildSrc/src/main/groovy/net/corda/testing/DistributedTesting.groovy @@ -97,6 +97,7 @@ class DistributedTesting implements Plugin { //modify the image building task to depend on the preAllocate task (if specified on the command line) - this prevents gradle running out of order if (preAllocateTask.name in requestedTaskNames) { imageBuildTask.dependsOn preAllocateTask + imagePushTask.finalizedBy(deAllocateTask) } def userDefinedParallelTask = project.rootProject.tasks.create("userDefined" + testGrouping.getName().capitalize(), KubesTest) { @@ -171,7 +172,8 @@ class DistributedTesting implements Plugin { Task deAllocateTask = project.rootProject.tasks.create("deAllocateFor" + testGrouping.getName().capitalize()) { group = GRADLE_GROUP doFirst { - String dockerTag = System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_RUNNING_PROPERTY) + String dockerTag = System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_RUNNING_PROPERTY) ?: + System.getProperty(ImageBuilding.PROVIDE_TAG_FOR_BUILDING_PROPERTY) if (dockerTag == null) { throw new GradleException("pre allocation cannot be used without a stable docker tag - please provide one using -D" + ImageBuilding.PROVIDE_TAG_FOR_RUNNING_PROPERTY) } diff --git a/buildSrc/src/main/groovy/net/corda/testing/PodAllocator.java b/buildSrc/src/main/groovy/net/corda/testing/PodAllocator.java index 7dc64678a8..995868e1e8 100644 --- a/buildSrc/src/main/groovy/net/corda/testing/PodAllocator.java +++ b/buildSrc/src/main/groovy/net/corda/testing/PodAllocator.java @@ -1,6 +1,5 @@ package net.corda.testing; -import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.api.model.Quantity; import io.fabric8.kubernetes.api.model.batch.Job; import io.fabric8.kubernetes.api.model.batch.JobBuilder; @@ -39,36 +38,38 @@ public class PodAllocator { public void allocatePods(Integer number, Integer coresPerPod, Integer memoryPerPod, String prefix) { - Config config = new ConfigBuilder() - .withConnectionTimeout(CONNECTION_TIMEOUT) - .withRequestTimeout(CONNECTION_TIMEOUT) - .withRollingTimeout(CONNECTION_TIMEOUT) - .withWebsocketTimeout(CONNECTION_TIMEOUT) - .withWebsocketPingInterval(CONNECTION_TIMEOUT) - .build(); - + Config config = getConfig(); KubernetesClient client = new DefaultKubernetesClient(config); List podsToRequest = IntStream.range(0, number).mapToObj(i -> buildJob("pa-" + prefix + i, coresPerPod, memoryPerPod)).collect(Collectors.toList()); - podsToRequest.forEach(requestedJob -> { + List createdJobs = podsToRequest.stream().map(requestedJob -> { String msg = "PreAllocating " + requestedJob.getMetadata().getName(); if (logger instanceof org.gradle.api.logging.Logger) { ((org.gradle.api.logging.Logger) logger).quiet(msg); } else { logger.info(msg); } - client.batch().jobs().inNamespace(KubesTest.NAMESPACE).create(requestedJob); - }); + return client.batch().jobs().inNamespace(KubesTest.NAMESPACE).create(requestedJob); + }).collect(Collectors.toList()); + + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + KubernetesClient tearDownClient = new DefaultKubernetesClient(getConfig()); + tearDownClient.batch().jobs().delete(createdJobs); + })); } - public void tearDownPods(String prefix) { - io.fabric8.kubernetes.client.Config config = new io.fabric8.kubernetes.client.ConfigBuilder() + private Config getConfig() { + return new ConfigBuilder() .withConnectionTimeout(CONNECTION_TIMEOUT) .withRequestTimeout(CONNECTION_TIMEOUT) .withRollingTimeout(CONNECTION_TIMEOUT) .withWebsocketTimeout(CONNECTION_TIMEOUT) .withWebsocketPingInterval(CONNECTION_TIMEOUT) .build(); + } + + public void tearDownPods(String prefix) { + io.fabric8.kubernetes.client.Config config = getConfig(); KubernetesClient client = new DefaultKubernetesClient(config); Stream jobsToDelete = client.batch().jobs().inNamespace(KubesTest.NAMESPACE).list() .getItems()